From df687da06d9550236b67ca7c13071a0950aa99f8 Mon Sep 17 00:00:00 2001 From: seanmor5 Date: Fri, 10 May 2024 17:46:36 +0000 Subject: [PATCH] =?UTF-8?q?Deploying=20to=20gh-pages=20from=20@=20elixir-n?= =?UTF-8?q?x/axon@1ccbeba57395712e47d61b3912956fb2a09f58a8=20=F0=9F=9A=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .build | 5 +- 404.html | 2 +- Axon.Activations.html | 642 ++++++++++----------- Axon.CompileError.html | 2 +- Axon.Display.html | 6 +- Axon.Initializers.html | 352 ++++++------ Axon.Layers.html | 558 +++++++++--------- Axon.Loop.State.html | 22 +- Axon.Loop.html | 302 +++++----- Axon.LossScale.html | 4 +- Axon.Losses.html | 508 ++++++++--------- Axon.Metrics.html | 136 ++--- Axon.MixedPrecision.html | 82 +-- Axon.ModelState.html | 556 ++++++++++++++++++ Axon.None.html | 2 +- Axon.StatefulOutput.html | 2 +- Axon.epub | Bin 419383 -> 418394 bytes Axon.html | 783 +++++++++++--------------- accelerating_axon.html | 140 ++--- api-reference.html | 11 +- complex_models.html | 86 +-- credit_card_fraud.html | 180 +++--- custom_layers.html | 116 ++-- custom_models_loss_optimizers.html | 560 +++++++++--------- dist/search_data-7F2B0842.js | 1 - dist/search_data-BDD49AAD.js | 1 + dist/sidebar_items-B66D7C0E.js | 1 - dist/sidebar_items-D4AB84D3.js | 1 + fashionmnist_autoencoder.html | 112 ++-- fashionmnist_vae.html | 522 ++++++++--------- guides.html | 2 +- horses_or_humans.html | 184 +++--- instrumenting_loops_with_metrics.html | 386 ++++++------- lstm_generation.html | 158 +++--- mnist.html | 54 +- mnist_autoencoder_using_kino.html | 198 +++---- model_hooks.html | 556 +++++++++--------- multi_input_multi_output_models.html | 200 +++---- onnx_to_axon.html | 122 ++-- search.html | 4 +- sequential_models.html | 164 +++--- training_and_inference_mode.html | 152 ++--- using_loop_event_handlers.html | 396 ++++++------- writing_custom_event_handlers.html | 436 +++++++------- writing_custom_metrics.html | 586 +++++++++---------- xor.html | 78 +-- your_first_axon_model.html | 32 +- your_first_evaluation_loop.html | 214 +++---- your_first_training_loop.html | 372 ++++++------ 49 files changed, 5203 insertions(+), 4786 deletions(-) create mode 100644 Axon.ModelState.html delete mode 100644 dist/search_data-7F2B0842.js create mode 100644 dist/search_data-BDD49AAD.js delete mode 100644 dist/sidebar_items-B66D7C0E.js create mode 100644 dist/sidebar_items-D4AB84D3.js diff --git a/.build b/.build index 9f147e29..a84c0c28 100644 --- a/.build +++ b/.build @@ -34,6 +34,7 @@ Axon.LossScale.html Axon.Losses.html Axon.Metrics.html Axon.MixedPrecision.html +Axon.ModelState.html Axon.None.html Axon.StatefulOutput.html Axon.html @@ -71,8 +72,8 @@ dist/merriweather-latin-ext-300-normal-K6L27CZ5.woff2 dist/merriweather-vietnamese-300-italic-EHHNZPUO.woff2 dist/merriweather-vietnamese-300-normal-U376L4Z4.woff2 dist/remixicon-NKANDIL5.woff2 -dist/search_data-7F2B0842.js -dist/sidebar_items-B66D7C0E.js +dist/search_data-BDD49AAD.js +dist/sidebar_items-D4AB84D3.js fashionmnist_autoencoder.html fashionmnist_vae.html guides.html diff --git a/404.html b/404.html index 307a333b..b05e638f 100644 --- a/404.html +++ b/404.html @@ -16,7 +16,7 @@ - + diff --git a/Axon.Activations.html b/Axon.Activations.html index 48b38d30..ea24bc8d 100644 --- a/Axon.Activations.html +++ b/Axon.Activations.html @@ -14,7 +14,7 @@ - + @@ -136,19 +136,19 @@

Activation functions.

Activation functions are element-wise, (typically) non-linear functions called on the output of another layer, such as a dense layer:

x
-|> dense(weight, bias)
-|> relu()

Activation functions output the "activation" or how active +|> dense(weight, bias) +|> relu()

Activation functions output the "activation" or how active a given layer's neurons are in learning a representation of the data-generating distribution.

Some activations are commonly used as output activations. For example softmax is often used as the output in multiclass classification problems because it returns a categorical -probability distribution:

iex> Axon.Activations.softmax(Nx.tensor([[1, 2, 3]], type: {:f, 32}))
-#Nx.Tensor<
-  f32[1][3]
-  [
-    [0.09003057330846786, 0.2447284758090973, 0.6652409434318542]
-  ]
->

Other activations such as tanh or sigmoid are used because +probability distribution:

iex> Axon.Activations.softmax(Nx.tensor([[1, 2, 3]], type: {:f, 32}))
+#Nx.Tensor<
+  f32[1][3]
+  [
+    [0.09003057330846786, 0.2447284758090973, 0.6652409434318542]
+  ]
+>

Other activations such as tanh or sigmoid are used because they have desirable properties, such as keeping the output tensor constrained within a certain range.

Generally, the choice of activation function is arbitrary; although some activations work better than others in certain @@ -442,26 +442,26 @@

celu(x, opts \\ [])

Examples -
iex> Axon.Activations.celu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
-#Nx.Tensor<
-  f32[7]
-  [-0.9502129554748535, -0.8646647334098816, -0.6321205496788025, 0.0, 1.0, 2.0, 3.0]
->
-
-iex> Axon.Activations.celu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))
-#Nx.Tensor<
-  bf16[2][3]
-  [
-    [-0.62890625, -0.86328125, -0.94921875],
-    [1.0, 2.0, 3.0]
-  ]
->

+
iex> Axon.Activations.celu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
+#Nx.Tensor<
+  f32[7]
+  [-0.9502129554748535, -0.8646647334098816, -0.6321205496788025, 0.0, 1.0, 2.0, 3.0]
+>
+
+iex> Axon.Activations.celu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))
+#Nx.Tensor<
+  bf16[2][3]
+  [
+    [-0.62890625, -0.86328125, -0.94921875],
+    [1.0, 2.0, 3.0]
+  ]
+>

Error cases

-
iex> Axon.Activations.celu(Nx.tensor([0.0, 1.0, 2.0], type: {:f, 32}), alpha: 0.0)
+
iex> Axon.Activations.celu(Nx.tensor([0.0, 1.0, 2.0], type: {:f, 32}), alpha: 0.0)
 ** (ArgumentError) :alpha must be non-zero in CELU activation

@@ -506,20 +506,20 @@

elu(x, opts \\ [])

Examples

-
iex> Axon.Activations.elu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
-#Nx.Tensor<
-  f32[7]
-  [-0.9502129554748535, -0.8646647334098816, -0.6321205496788025, 0.0, 1.0, 2.0, 3.0]
->
-
-iex> Axon.Activations.elu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))
-#Nx.Tensor<
-  bf16[2][3]
-  [
-    [-0.62890625, -0.86328125, -0.94921875],
-    [1.0, 2.0, 3.0]
-  ]
->

+
iex> Axon.Activations.elu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
+#Nx.Tensor<
+  f32[7]
+  [-0.9502129554748535, -0.8646647334098816, -0.6321205496788025, 0.0, 1.0, 2.0, 3.0]
+>
+
+iex> Axon.Activations.elu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))
+#Nx.Tensor<
+  bf16[2][3]
+  [
+    [-0.62890625, -0.86328125, -0.94921875],
+    [1.0, 2.0, 3.0]
+  ]
+>

@@ -555,20 +555,20 @@

exp(x)

Examples -
iex> Axon.Activations.exp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [0.049787066876888275, 0.1353352814912796, 0.3678794503211975, 1.0, 2.7182817459106445, 7.389056205749512, 20.08553695678711]
->
-
-iex> Axon.Activations.exp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [0.3671875, 0.134765625, 0.049560546875],
-    [2.703125, 7.375, 20.0]
-  ]
->
+
iex> Axon.Activations.exp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [0.049787066876888275, 0.1353352814912796, 0.3678794503211975, 1.0, 2.7182817459106445, 7.389056205749512, 20.08553695678711]
+>
+
+iex> Axon.Activations.exp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [0.3671875, 0.134765625, 0.049560546875],
+    [2.703125, 7.375, 20.0]
+  ]
+>
@@ -598,20 +598,20 @@

gelu(x)

Examples -
iex> Axon.Activations.gelu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-0.0040496885776519775, -0.04550027847290039, -0.15865525603294373, 0.0, 0.8413447141647339, 1.9544997215270996, 2.995950222015381]
->
-
-iex> Axon.Activations.gelu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-0.16015625, -0.046875, -0.005859375],
-    [0.83984375, 1.953125, 2.984375]
-  ]
->

+
iex> Axon.Activations.gelu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-0.0040496885776519775, -0.04550027847290039, -0.15865525603294373, 0.0, 0.8413447141647339, 1.9544997215270996, 2.995950222015381]
+>
+
+iex> Axon.Activations.gelu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-0.16015625, -0.046875, -0.005859375],
+    [0.83984375, 1.953125, 2.984375]
+  ]
+>

@@ -647,20 +647,20 @@

hard_sigmoid(x, opts \\ [])

Examples -
iex> Axon.Activations.hard_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [0.0, 0.0, 0.0, 0.20000000298023224, 0.4000000059604645, 0.6000000238418579, 0.800000011920929]
->
-
-iex> Axon.Activations.hard_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [7.781982421875e-4, 0.0, 0.0],
-    [0.3984375, 0.59765625, 0.796875]
-  ]
->
+
iex> Axon.Activations.hard_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [0.0, 0.0, 0.0, 0.20000000298023224, 0.4000000059604645, 0.6000000238418579, 0.800000011920929]
+>
+
+iex> Axon.Activations.hard_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [7.781982421875e-4, 0.0, 0.0],
+    [0.3984375, 0.59765625, 0.796875]
+  ]
+>
@@ -694,20 +694,20 @@

hard_silu(x, opts \\ [])

Examples -
iex> Axon.Activations.hard_silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-0.0, -0.0, -0.0, 0.0, 0.4000000059604645, 1.2000000476837158, 2.4000000953674316]
->
-
-iex> Axon.Activations.hard_silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-7.781982421875e-4, -0.0, -0.0],
-    [0.3984375, 1.1953125, 2.390625]
-  ]
->
+
iex> Axon.Activations.hard_silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-0.0, -0.0, -0.0, 0.0, 0.4000000059604645, 1.2000000476837158, 2.4000000953674316]
+>
+
+iex> Axon.Activations.hard_silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-7.781982421875e-4, -0.0, -0.0],
+    [0.3984375, 1.1953125, 2.390625]
+  ]
+>
@@ -737,20 +737,20 @@

hard_tanh(x)

Examples -
iex> Axon.Activations.hard_tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0]
->
-
-iex> Axon.Activations.hard_tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-1.0, -1.0, -1.0],
-    [1.0, 1.0, 1.0]
-  ]
->
+
iex> Axon.Activations.hard_tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-1.0, -1.0, -1.0, 0.0, 1.0, 1.0, 1.0]
+>
+
+iex> Axon.Activations.hard_tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-1.0, -1.0, -1.0],
+    [1.0, 1.0, 1.0]
+  ]
+>
@@ -788,20 +788,20 @@

leaky_relu(x, opts \\ [])

Examples -
iex> Axon.Activations.leaky_relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]), alpha: 0.5)
-#Nx.Tensor<
-  f32[data: 7]
-  [-1.5, -1.0, -0.5, 0.0, 1.0, 2.0, 3.0]
->
-
-iex> Axon.Activations.leaky_relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], names: [:batch, :data]), alpha: 0.5)
-#Nx.Tensor<
-  f32[batch: 2][data: 3]
-  [
-    [-0.5, -1.0, -1.5],
-    [1.0, 2.0, 3.0]
-  ]
->
+
iex> Axon.Activations.leaky_relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]), alpha: 0.5)
+#Nx.Tensor<
+  f32[data: 7]
+  [-1.5, -1.0, -0.5, 0.0, 1.0, 2.0, 3.0]
+>
+
+iex> Axon.Activations.leaky_relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], names: [:batch, :data]), alpha: 0.5)
+#Nx.Tensor<
+  f32[batch: 2][data: 3]
+  [
+    [-0.5, -1.0, -1.5],
+    [1.0, 2.0, 3.0]
+  ]
+>
@@ -831,20 +831,20 @@

linear(x)

Examples -
iex> Axon.Activations.linear(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]
->
-
-iex> Axon.Activations.linear(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-1.0, -2.0, -3.0],
-    [1.0, 2.0, 3.0]
-  ]
->
+
iex> Axon.Activations.linear(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]
+>
+
+iex> Axon.Activations.linear(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-1.0, -2.0, -3.0],
+    [1.0, 2.0, 3.0]
+  ]
+>
@@ -874,20 +874,20 @@

log_sigmoid(x)

Examples -
iex> Axon.Activations.log_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-3.0485873222351074, -2.1269280910491943, -1.3132617473602295, -0.6931471824645996, -0.3132616877555847, -0.12692801654338837, -0.04858734831213951]
->
-
-iex> Axon.Activations.log_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-1.3125, -2.125, -3.046875],
-    [-0.3125, -0.1259765625, -0.04833984375]
-  ]
->
+
iex> Axon.Activations.log_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-3.0485873222351074, -2.1269280910491943, -1.3132617473602295, -0.6931471824645996, -0.3132616877555847, -0.12692801654338837, -0.04858734831213951]
+>
+
+iex> Axon.Activations.log_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-1.3125, -2.125, -3.046875],
+    [-0.3125, -0.1259765625, -0.04833984375]
+  ]
+>
@@ -919,20 +919,20 @@

log_softmax(x, opts \\ [])

Examples -
iex> Axon.Activations.log_softmax(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-6.457762718200684, -5.457762718200684, -4.457762718200684, -3.4577627182006836, -2.4577627182006836, -1.4577628374099731, -0.45776283740997314]
->
-
-iex> Axon.Activations.log_softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-0.404296875, -1.3984375, -2.390625],
-    [-2.390625, -1.3984375, -0.404296875]
-  ]
->
+
iex> Axon.Activations.log_softmax(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-6.457762718200684, -5.457762718200684, -4.457762718200684, -3.4577627182006836, -2.4577627182006836, -1.4577628374099731, -0.45776283740997314]
+>
+
+iex> Axon.Activations.log_softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-0.404296875, -1.3984375, -2.390625],
+    [-2.390625, -1.3984375, -0.404296875]
+  ]
+>
@@ -964,20 +964,20 @@

log_sumexp(x, opts \\ [])

Examples -
iex> Axon.Activations.log_sumexp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 1]
-  [3.4577627182006836]
->
-
-iex> Axon.Activations.log_sumexp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 1]
-  [
-    [-0.59375],
-    [3.390625]
-  ]
->
+
iex> Axon.Activations.log_sumexp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 1]
+  [3.4577627182006836]
+>
+
+iex> Axon.Activations.log_sumexp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 1]
+  [
+    [-0.59375],
+    [3.390625]
+  ]
+>
@@ -1007,20 +1007,20 @@

mish(x)

Examples -
iex> Axon.Activations.mish(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-0.14564745128154755, -0.2525014877319336, -0.30340147018432617, 0.0, 0.8650984168052673, 1.9439589977264404, 2.98653507232666]
->
-
-iex> Axon.Activations.mish(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-0.30078125, -0.25, -0.1435546875],
-    [0.86328125, 1.9375, 2.96875]
-  ]
->
+
iex> Axon.Activations.mish(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-0.14564745128154755, -0.2525014877319336, -0.30340147018432617, 0.0, 0.8650984168052673, 1.9439589977264404, 2.98653507232666]
+>
+
+iex> Axon.Activations.mish(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-0.30078125, -0.25, -0.1435546875],
+    [0.86328125, 1.9375, 2.96875]
+  ]
+>
@@ -1050,20 +1050,20 @@

relu6(x)

Examples -
iex> Axon.Activations.relu6(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
-#Nx.Tensor<
-  f32[7]
-  [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
->
-
-iex> Axon.Activations.relu6(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [0.0, 0.0, 0.0],
-    [1.0, 2.0, 3.0]
-  ]
->

+
iex> Axon.Activations.relu6(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))
+#Nx.Tensor<
+  f32[7]
+  [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
+>
+
+iex> Axon.Activations.relu6(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [0.0, 0.0, 0.0],
+    [1.0, 2.0, 3.0]
+  ]
+>

@@ -1099,20 +1099,20 @@

relu(x)

Examples -
iex> Axon.Activations.relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
->
-
-iex> Axon.Activations.relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [0.0, 0.0, 0.0],
-    [1.0, 2.0, 3.0]
-  ]
->
+
iex> Axon.Activations.relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [0.0, 0.0, 0.0, 0.0, 1.0, 2.0, 3.0]
+>
+
+iex> Axon.Activations.relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [0.0, 0.0, 0.0],
+    [1.0, 2.0, 3.0]
+  ]
+>
@@ -1150,20 +1150,20 @@

selu(x, opts \\ [])

Examples -
iex> Axon.Activations.selu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-1.670568823814392, -1.5201665163040161, -1.1113307476043701, 0.0, 1.0507010221481323, 2.1014020442962646, 3.1521029472351074]
->
-
-iex> Axon.Activations.selu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-1.09375, -1.5078125, -1.6640625],
-    [1.046875, 2.09375, 3.140625]
-  ]
->

+
iex> Axon.Activations.selu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-1.670568823814392, -1.5201665163040161, -1.1113307476043701, 0.0, 1.0507010221481323, 2.1014020442962646, 3.1521029472351074]
+>
+
+iex> Axon.Activations.selu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-1.09375, -1.5078125, -1.6640625],
+    [1.046875, 2.09375, 3.140625]
+  ]
+>

@@ -1202,20 +1202,20 @@

sigmoid(x)

Examples -
iex> Axon.Activations.sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [0.04742587357759476, 0.11920291930437088, 0.2689414322376251, 0.5, 0.7310585975646973, 0.8807970881462097, 0.9525741338729858]
->
-
-iex> Axon.Activations.sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [0.267578125, 0.119140625, 0.04736328125],
-    [0.73046875, 0.87890625, 0.94921875]
-  ]
->
+
iex> Axon.Activations.sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [0.04742587357759476, 0.11920291930437088, 0.2689414322376251, 0.5, 0.7310585975646973, 0.8807970881462097, 0.9525741338729858]
+>
+
+iex> Axon.Activations.sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [0.267578125, 0.119140625, 0.04736328125],
+    [0.73046875, 0.87890625, 0.94921875]
+  ]
+>
@@ -1245,20 +1245,20 @@

silu(x)

Examples -
iex> Axon.Activations.silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-0.14227762818336487, -0.23840583860874176, -0.2689414322376251, 0.0, 0.7310585975646973, 1.7615941762924194, 2.857722282409668]
->
-
-iex> Axon.Activations.silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-0.267578125, -0.23828125, -0.1416015625],
-    [0.73046875, 1.7578125, 2.84375]
-  ]
->

+
iex> Axon.Activations.silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-0.14227762818336487, -0.23840583860874176, -0.2689414322376251, 0.0, 0.7310585975646973, 1.7615941762924194, 2.857722282409668]
+>
+
+iex> Axon.Activations.silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-0.267578125, -0.23828125, -0.1416015625],
+    [0.73046875, 1.7578125, 2.84375]
+  ]
+>

@@ -1306,22 +1306,22 @@

softmax(x, opts \\ [])

Examples -
iex> Axon.Activations.softmax(Nx.tensor([[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]], names: [:batch, :data]))
-#Nx.Tensor<
-  f32[batch: 1][data: 7]
-  [
-    [0.0015683004166930914, 0.004263082519173622, 0.011588259600102901, 0.03150015324354172, 0.08562629669904709, 0.23275642096996307, 0.6326975226402283]
-  ]
->
-
-iex> Axon.Activations.softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [0.6640625, 0.2431640625, 0.08935546875],
-    [0.08935546875, 0.2431640625, 0.6640625]
-  ]
->
+
iex> Axon.Activations.softmax(Nx.tensor([[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]], names: [:batch, :data]))
+#Nx.Tensor<
+  f32[batch: 1][data: 7]
+  [
+    [0.0015683004166930914, 0.004263082519173622, 0.011588259600102901, 0.03150015324354172, 0.08562629669904709, 0.23275642096996307, 0.6326975226402283]
+  ]
+>
+
+iex> Axon.Activations.softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [0.6640625, 0.2431640625, 0.08935546875],
+    [0.08935546875, 0.2431640625, 0.6640625]
+  ]
+>
@@ -1351,20 +1351,20 @@

softplus(x)

Examples -
iex> Axon.Activations.softplus(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [0.04858734831213951, 0.12692801654338837, 0.3132616877555847, 0.6931471824645996, 1.3132617473602295, 2.1269280910491943, 3.0485873222351074]
->
-
-iex> Axon.Activations.softplus(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [0.3125, 0.1259765625, 0.04833984375],
-    [1.3125, 2.125, 3.046875]
-  ]
->
+
iex> Axon.Activations.softplus(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [0.04858734831213951, 0.12692801654338837, 0.3132616877555847, 0.6931471824645996, 1.3132617473602295, 2.1269280910491943, 3.0485873222351074]
+>
+
+iex> Axon.Activations.softplus(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [0.3125, 0.1259765625, 0.04833984375],
+    [1.3125, 2.125, 3.046875]
+  ]
+>
@@ -1394,20 +1394,20 @@

softsign(x)

Examples -
iex> Axon.Activations.softsign(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-0.75, -0.6666666865348816, -0.5, 0.0, 0.5, 0.6666666865348816, 0.75]
->
-
-iex> Axon.Activations.softsign(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-0.5, -0.6640625, -0.75],
-    [0.5, 0.6640625, 0.75]
-  ]
->
+
iex> Axon.Activations.softsign(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-0.75, -0.6666666865348816, -0.5, 0.0, 0.5, 0.6666666865348816, 0.75]
+>
+
+iex> Axon.Activations.softsign(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-0.5, -0.6640625, -0.75],
+    [0.5, 0.6640625, 0.75]
+  ]
+>
@@ -1437,20 +1437,20 @@

tanh(x)

Examples -
iex> Axon.Activations.tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
-#Nx.Tensor<
-  f32[data: 7]
-  [-0.9950547814369202, -0.9640275835990906, -0.7615941762924194, 0.0, 0.7615941762924194, 0.9640275835990906, 0.9950547814369202]
->
-
-iex> Axon.Activations.tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
-#Nx.Tensor<
-  bf16[batch: 2][data: 3]
-  [
-    [-0.7578125, -0.9609375, -0.9921875],
-    [0.7578125, 0.9609375, 0.9921875]
-  ]
->
+
iex> Axon.Activations.tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))
+#Nx.Tensor<
+  f32[data: 7]
+  [-0.9950547814369202, -0.9640275835990906, -0.7615941762924194, 0.0, 0.7615941762924194, 0.9640275835990906, 0.9950547814369202]
+>
+
+iex> Axon.Activations.tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))
+#Nx.Tensor<
+  bf16[batch: 2][data: 3]
+  [
+    [-0.7578125, -0.9609375, -0.9921875],
+    [0.7578125, 0.9609375, 0.9921875]
+  ]
+>
diff --git a/Axon.CompileError.html b/Axon.CompileError.html index 9cc731ec..71287577 100644 --- a/Axon.CompileError.html +++ b/Axon.CompileError.html @@ -14,7 +14,7 @@ - + diff --git a/Axon.Display.html b/Axon.Display.html index 9bc6df8f..6f636321 100644 --- a/Axon.Display.html +++ b/Axon.Display.html @@ -14,7 +14,7 @@ - + @@ -220,7 +220,7 @@

as_graph(axon, input_templates, opts \\ []) Examples

-

Given an Axon model:

model = Axon.input("input") |> Axon.dense(32)

You can define input templates for each input:

input = Nx.template({1, 16}, :f32)

And then display the execution flow of the model:

Axon.Display.as_graph(model, input, direction: :top_down)
+

Given an Axon model:

model = Axon.input("input") |> Axon.dense(32)

You can define input templates for each input:

input = Nx.template({1, 16}, :f32)

And then display the execution flow of the model:

Axon.Display.as_graph(model, input, direction: :top_down)
@@ -250,7 +250,7 @@

as_table(axon, input_templates)

Examples -

Given an Axon model:

model = Axon.input("input") |> Axon.dense(32)

You can define input templates for each input:

input = Nx.template({1, 16}, :f32)

And then display the execution flow of the model:

Axon.Display.as_table(model, input)
+

Given an Axon model:

model = Axon.input("input") |> Axon.dense(32)

You can define input templates for each input:

input = Nx.template({1, 16}, :f32)

And then display the execution flow of the model:

Axon.Display.as_table(model, input)
diff --git a/Axon.Initializers.html b/Axon.Initializers.html index fcf388cd..16bfc1bc 100644 --- a/Axon.Initializers.html +++ b/Axon.Initializers.html @@ -14,7 +14,7 @@ - + @@ -153,8 +153,8 @@

small enough to avoid exploding values. The initializers in this module have a default scale known to work well with the initialization strategy.

The functions in this module return initialization functions which -take shapes and types and return tensors:

init_fn = Axon.Initializers.zeros()
-init_fn.({1, 2}, {:f, 32})

You may use these functions from within defn or outside.

+take shapes and types and return tensors:

init_fn = Axon.Initializers.zeros()
+init_fn.({1, 2}, {:f, 32})

You may use these functions from within defn or outside.

@@ -349,16 +349,16 @@

full(value)

Examples -
iex> init_fn = Axon.Initializers.full(1.00)
-iex> out = init_fn.({2, 2}, {:f, 32})
+
iex> init_fn = Axon.Initializers.full(1.00)
+iex> out = init_fn.({2, 2}, {:f, 32})
 iex> out
-#Nx.Tensor<
-  f32[2][2]
-  [
-    [1.0, 1.0],
-    [1.0, 1.0]
-  ]
->
+
#Nx.Tensor< + f32[2][2] + [ + [1.0, 1.0], + [1.0, 1.0] + ] +>
@@ -397,19 +397,19 @@

glorot_normal(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.glorot_normal()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.glorot_normal(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}

+
iex> init_fn = Axon.Initializers.glorot_normal()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.glorot_normal(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}

@@ -454,19 +454,19 @@

glorot_uniform(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.glorot_uniform()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.glorot_uniform(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}

+
iex> init_fn = Axon.Initializers.glorot_uniform()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.glorot_uniform(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}

@@ -510,19 +510,19 @@

he_normal(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.he_normal()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.he_normal(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}

+
iex> init_fn = Axon.Initializers.he_normal()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.he_normal(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}

@@ -566,19 +566,19 @@

he_uniform(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.he_uniform()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.he_uniform(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}

+
iex> init_fn = Axon.Initializers.he_uniform()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.he_uniform(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}

@@ -612,16 +612,16 @@

identity()

Examples -
iex> init_fn = Axon.Initializers.identity()
-iex> out = init_fn.({2, 2}, {:f, 32})
+
iex> init_fn = Axon.Initializers.identity()
+iex> out = init_fn.({2, 2}, {:f, 32})
 iex> out
-#Nx.Tensor<
-  f32[2][2]
-  [
-    [1.0, 0.0],
-    [0.0, 1.0]
-  ]
->
+
#Nx.Tensor< + f32[2][2] + [ + [1.0, 0.0], + [0.0, 1.0] + ] +>
@@ -659,19 +659,19 @@

lecun_normal(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.lecun_normal()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.lecun_normal(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}

+
iex> init_fn = Axon.Initializers.lecun_normal()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.lecun_normal(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}

@@ -715,19 +715,19 @@

lecun_uniform(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.lecun_uniform()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.lecun_uniform(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}

+
iex> init_fn = Axon.Initializers.lecun_uniform()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.lecun_uniform(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}

@@ -769,19 +769,19 @@

normal(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.normal()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.normal(mean: 1.0, scale: 1.0)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}
+
iex> init_fn = Axon.Initializers.normal()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.normal(mean: 1.0, scale: 1.0)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}
@@ -809,16 +809,16 @@

ones()

Examples -
iex> init_fn = Axon.Initializers.ones()
-iex> out = init_fn.({2, 2}, {:f, 32})
+
iex> init_fn = Axon.Initializers.ones()
+iex> out = init_fn.({2, 2}, {:f, 32})
 iex> out
-#Nx.Tensor<
-  f32[2][2]
-  [
-    [1.0, 1.0],
-    [1.0, 1.0]
-  ]
->
+
#Nx.Tensor< + f32[2][2] + [ + [1.0, 1.0], + [1.0, 1.0] + ] +>
@@ -857,19 +857,19 @@

orthogonal(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.orthogonal()
-iex> t = init_fn.({3, 3}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.type(t)
-{:f, 32}
-iex> Nx.shape(t)
-{3, 3}
-
-iex> init_fn = Axon.Initializers.orthogonal()
-iex> t = init_fn.({1, 2, 3, 4}, {:f, 64}, Nx.Random.key(1))
-iex> Nx.type(t)
-{:f, 64}
-iex> Nx.shape(t)
-{1, 2, 3, 4}
+
iex> init_fn = Axon.Initializers.orthogonal()
+iex> t = init_fn.({3, 3}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.type(t)
+{:f, 32}
+iex> Nx.shape(t)
+{3, 3}
+
+iex> init_fn = Axon.Initializers.orthogonal()
+iex> t = init_fn.({1, 2, 3, 4}, {:f, 64}, Nx.Random.key(1))
+iex> Nx.type(t)
+{:f, 64}
+iex> Nx.shape(t)
+{1, 2, 3, 4}
@@ -905,19 +905,19 @@

uniform(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.uniform()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.uniform(scale: 1.0e-3)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}
+
iex> init_fn = Axon.Initializers.uniform()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.uniform(scale: 1.0e-3)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}
@@ -957,26 +957,26 @@

variance_scaling(opts \\ [])

Examples -
iex> init_fn = Axon.Initializers.variance_scaling()
-iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:f, 32}
-
-iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :truncated_normal)
-iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{2, 2}
-iex> Nx.type(t)
-{:bf, 16}
-
-iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :normal)
-iex> t = init_fn.({64, 3, 32, 32}, {:f, 32}, Nx.Random.key(1))
-iex> Nx.shape(t)
-{64, 3, 32, 32}
-iex> Nx.type(t)
-{:f, 32}
+
iex> init_fn = Axon.Initializers.variance_scaling()
+iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:f, 32}
+
+iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :truncated_normal)
+iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{2, 2}
+iex> Nx.type(t)
+{:bf, 16}
+
+iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :normal)
+iex> t = init_fn.({64, 3, 32, 32}, {:f, 32}, Nx.Random.key(1))
+iex> Nx.shape(t)
+{64, 3, 32, 32}
+iex> Nx.type(t)
+{:f, 32}
@@ -1004,16 +1004,16 @@

zeros()

Examples -
iex> init_fn = Axon.Initializers.zeros()
-iex> out = init_fn.({2, 2}, {:f, 32})
+
iex> init_fn = Axon.Initializers.zeros()
+iex> out = init_fn.({2, 2}, {:f, 32})
 iex> out
-#Nx.Tensor<
-  f32[2][2]
-  [
-    [0.0, 0.0],
-    [0.0, 0.0]
-  ]
->
+
#Nx.Tensor< + f32[2][2] + [ + [0.0, 0.0], + [0.0, 0.0] + ] +>
diff --git a/Axon.Layers.html b/Axon.Layers.html index 726026f0..defaea03 100644 --- a/Axon.Layers.html +++ b/Axon.Layers.html @@ -14,7 +14,7 @@ - + @@ -141,16 +141,16 @@

These implementations do not assume the responsibility of managing state - instead opting to delegate this responsibility to the caller.

Basic neural networks can be seen as a composition of functions:

input
-|> dense(w1, b1)
-|> relu()
-|> dense(w2, b2)
-|> softmax()

These kinds of models are often referred to as deep feedforward networks +|> dense(w1, b1) +|> relu() +|> dense(w2, b2) +|> softmax()

These kinds of models are often referred to as deep feedforward networks or multilayer perceptrons (MLPs) because information flows forward through the network with no feedback connections. Mathematically, a feedforward network can be represented as:

$$ f(x) = f^{(3)}(f^{(2)}(f^{(1)}(x))) $$

You can see a similar pattern emerge if we condense the call stack -in the previous example:

softmax(dense(relu(dense(input, w1, b1)), w2, b2))

The chain structure shown here is the most common structure used +in the previous example:

softmax(dense(relu(dense(input, w1, b1)), w2, b2))

The chain structure shown here is the most common structure used in neural networks. You can consider each function $f^{(n)}$ as a layer in the neural network - for example $f^{(2)} is the 2nd layer in the network. The number of function calls in the @@ -158,7 +158,7 @@

deep learning comes from.

Neural networks are often written as the mapping:

$$ y = f(x; \theta) $$

Where $x$ is the input to the neural network and $\theta$ are the -set of learned parameters. In Elixir, you would write this:

y = model(input, params)

From the previous example, params would represent the collection:

{w1, b1, w2, b2}

where w1 and w2 are layer kernels, and b1 and b2 are layer +set of learned parameters. In Elixir, you would write this:

y = model(input, params)

From the previous example, params would represent the collection:

{w1, b1, w2, b2}

where w1 and w2 are layer kernels, and b1 and b2 are layer biases.

@@ -735,19 +735,19 @@

bilinear(input1, input2, kernel, bias \\ 0, Examples

-
iex> inp1 = Nx.iota({3, 2}, type: {:f, 32})
-iex> inp2 = Nx.iota({3, 4}, type: {:f, 32})
-iex> kernel = Nx.iota({1, 2, 4}, type: {:f, 32})
-iex> bias = Nx.tensor(1.0)
-iex> Axon.Layers.bilinear(inp1, inp2, kernel, bias)
-#Nx.Tensor<
-  f32[3][1]
-  [
-    [39.0],
-    [455.0],
-    [1319.0]
-  ]
->
+
iex> inp1 = Nx.iota({3, 2}, type: {:f, 32})
+iex> inp2 = Nx.iota({3, 4}, type: {:f, 32})
+iex> kernel = Nx.iota({1, 2, 4}, type: {:f, 32})
+iex> bias = Nx.tensor(1.0)
+iex> Axon.Layers.bilinear(inp1, inp2, kernel, bias)
+#Nx.Tensor<
+  f32[3][1]
+  [
+    [39.0],
+    [455.0],
+    [1319.0]
+  ]
+>
@@ -777,7 +777,7 @@

dense(input, kernel, bias \\ 0, opts \\ []) y = xW^T + b $$

A dense layer or fully connected layer transforms the input using the given kernel matrix and bias -to compute:

Nx.dot(input, kernel) + bias

Typically, both kernel and bias are learnable +to compute:

Nx.dot(input, kernel) + bias

Typically, both kernel and bias are learnable parameters trained using gradient-based optimization.

@@ -796,17 +796,17 @@

dense(input, kernel, bias \\ 0, opts \\ []) Examples

-
iex> input = Nx.tensor([[1.0, 0.5, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0]], type: {:f, 32})
-iex> kernel = Nx.tensor([[0.2], [0.3], [0.5], [0.8]], type: {:f, 32})
-iex> bias = Nx.tensor([1.0], type: {:f, 32})
-iex> Axon.Layers.dense(input, kernel, bias)
-#Nx.Tensor<
-  f32[2][1]
-  [
-    [2.25],
-    [1.0]
-  ]
->
+
iex> input = Nx.tensor([[1.0, 0.5, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0]], type: {:f, 32})
+iex> kernel = Nx.tensor([[0.2], [0.3], [0.5], [0.8]], type: {:f, 32})
+iex> bias = Nx.tensor([1.0], type: {:f, 32})
+iex> Axon.Layers.dense(input, kernel, bias)
+#Nx.Tensor<
+  f32[2][1]
+  [
+    [2.25],
+    [1.0]
+  ]
+>

@@ -846,37 +846,37 @@

embedding(input, kernel, arg3 \\ [])

Examples -
iex> input = Nx.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
-iex> kernels = Nx.tensor([
-...>  [0.46299999952316284, 0.5562999844551086, 0.18170000612735748],
-...>  [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],
-...>  [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
-...>  [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],
-...>  [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
-...>  [0.1898999959230423, 0.9311000108718872, 0.8356000185012817],
-...>  [0.6383000016212463, 0.8794000148773193, 0.5282999873161316],
-...>  [0.9523000121116638, 0.7597000002861023, 0.08250000327825546],
-...>  [0.6622999906539917, 0.02329999953508377, 0.8205999732017517],
-...>  [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]
-...> ])
-iex> Axon.Layers.embedding(input, kernels)
-#Nx.Tensor<
-  f32[2][4][3]
-  [
-    [
-      [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],
-      [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
-      [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
-      [0.1898999959230423, 0.9311000108718872, 0.8356000185012817]
-    ],
-    [
-      [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
-      [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],
-      [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
-      [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]
-    ]
-  ]
->
+
iex> input = Nx.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
+iex> kernels = Nx.tensor([
+...>  [0.46299999952316284, 0.5562999844551086, 0.18170000612735748],
+...>  [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],
+...>  [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
+...>  [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],
+...>  [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
+...>  [0.1898999959230423, 0.9311000108718872, 0.8356000185012817],
+...>  [0.6383000016212463, 0.8794000148773193, 0.5282999873161316],
+...>  [0.9523000121116638, 0.7597000002861023, 0.08250000327825546],
+...>  [0.6622999906539917, 0.02329999953508377, 0.8205999732017517],
+...>  [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]
+...> ])
+iex> Axon.Layers.embedding(input, kernels)
+#Nx.Tensor<
+  f32[2][4][3]
+  [
+    [
+      [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],
+      [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
+      [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
+      [0.1898999959230423, 0.9311000108718872, 0.8356000185012817]
+    ],
+    [
+      [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],
+      [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],
+      [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],
+      [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]
+    ]
+  ]
+>
@@ -1303,33 +1303,33 @@

global_avg_pool(input, opts \\ [])

Examples -
iex> Axon.Layers.global_avg_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)
-#Nx.Tensor<
-  f32[3][2]
-  [
-    [1.0, 4.0],
-    [7.0, 10.0],
-    [13.0, 16.0]
-  ]
->
-
-iex> Axon.Layers.global_avg_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), channels: :first, keep_axes: true)
-#Nx.Tensor<
-  f32[1][3][1][1]
-  [
-    [
-      [
-        [1.5]
-      ],
-      [
-        [5.5]
-      ],
-      [
-        [9.5]
-      ]
-    ]
-  ]
->
+
iex> Axon.Layers.global_avg_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)
+#Nx.Tensor<
+  f32[3][2]
+  [
+    [1.0, 4.0],
+    [7.0, 10.0],
+    [13.0, 16.0]
+  ]
+>
+
+iex> Axon.Layers.global_avg_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), channels: :first, keep_axes: true)
+#Nx.Tensor<
+  f32[1][3][1][1]
+  [
+    [
+      [
+        [1.5]
+      ],
+      [
+        [5.5]
+      ],
+      [
+        [9.5]
+      ]
+    ]
+  ]
+>
@@ -1376,33 +1376,33 @@

global_lp_pool(input, opts \\ [])

Examples -
iex> Axon.Layers.global_lp_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), norm: 1, channels: :first)
-#Nx.Tensor<
-  f32[3][2]
-  [
-    [3.0, 12.0],
-    [21.0, 30.0],
-    [39.0, 48.0]
-  ]
->
-
-iex> Axon.Layers.global_lp_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 16}), keep_axes: true, channels: :first)
-#Nx.Tensor<
-  f16[1][3][1][1]
-  [
-    [
-      [
-        [3.7421875]
-      ],
-      [
-        [11.2265625]
-      ],
-      [
-        [19.125]
-      ]
-    ]
-  ]
->
+
iex> Axon.Layers.global_lp_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), norm: 1, channels: :first)
+#Nx.Tensor<
+  f32[3][2]
+  [
+    [3.0, 12.0],
+    [21.0, 30.0],
+    [39.0, 48.0]
+  ]
+>
+
+iex> Axon.Layers.global_lp_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 16}), keep_axes: true, channels: :first)
+#Nx.Tensor<
+  f16[1][3][1][1]
+  [
+    [
+      [
+        [3.7421875]
+      ],
+      [
+        [11.2265625]
+      ],
+      [
+        [19.125]
+      ]
+    ]
+  ]
+>
@@ -1447,33 +1447,33 @@

global_max_pool(input, opts \\ [])

Examples -
iex> Axon.Layers.global_max_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)
-#Nx.Tensor<
-  f32[3][2]
-  [
-    [2.0, 5.0],
-    [8.0, 11.0],
-    [14.0, 17.0]
-  ]
->
-
-iex> Axon.Layers.global_max_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), keep_axes: true, channels: :first)
-#Nx.Tensor<
-  f32[1][3][1][1]
-  [
-    [
-      [
-        [3.0]
-      ],
-      [
-        [7.0]
-      ],
-      [
-        [11.0]
-      ]
-    ]
-  ]
->
+
iex> Axon.Layers.global_max_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)
+#Nx.Tensor<
+  f32[3][2]
+  [
+    [2.0, 5.0],
+    [8.0, 11.0],
+    [14.0, 17.0]
+  ]
+>
+
+iex> Axon.Layers.global_max_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), keep_axes: true, channels: :first)
+#Nx.Tensor<
+  f32[1][3][1][1]
+  [
+    [
+      [
+        [3.0]
+      ],
+      [
+        [7.0]
+      ],
+      [
+        [11.0]
+      ]
+    ]
+  ]
+>
@@ -1527,18 +1527,18 @@

lp_pool(input, opts \\ [])

Examples -
iex> t = Nx.tensor([[[0.9450, 0.4684, 1.8146], [1.2663, 0.4354, -0.0781], [-0.4759, 0.3251, 0.8742]]], type: {:f, 32})
-iex> Axon.Layers.lp_pool(t, kernel_size: 2, norm: 2, channels: :first)
-#Nx.Tensor<
-  f32[1][3][1]
-  [
-    [
-      [1.0547149181365967],
-      [1.3390626907348633],
-      [0.5763426423072815]
-    ]
-  ]
->
+
iex> t = Nx.tensor([[[0.9450, 0.4684, 1.8146], [1.2663, 0.4354, -0.0781], [-0.4759, 0.3251, 0.8742]]], type: {:f, 32})
+iex> Axon.Layers.lp_pool(t, kernel_size: 2, norm: 2, channels: :first)
+#Nx.Tensor<
+  f32[1][3][1]
+  [
+    [
+      [1.0547149181365967],
+      [1.3390626907348633],
+      [0.5763426423072815]
+    ]
+  ]
+>
@@ -1589,21 +1589,21 @@

max_pool(input, opts \\ [])

Examples -
iex> t = Nx.tensor([[
-...> [0.051500000059604645, -0.7042999863624573, -0.32899999618530273],
-...> [-0.37130001187324524, 1.6191999912261963, -0.11829999834299088],
-...> [0.7099999785423279, 0.7282999753952026, -0.18639999628067017]]], type: {:f, 32})
-iex> Axon.Layers.max_pool(t, kernel_size: 2, channels: :first)
-#Nx.Tensor<
-  f32[1][3][1]
-  [
-    [
-      [0.051500000059604645],
-      [1.6191999912261963],
-      [0.7282999753952026]
-    ]
-  ]
->
+
iex> t = Nx.tensor([[
+...> [0.051500000059604645, -0.7042999863624573, -0.32899999618530273],
+...> [-0.37130001187324524, 1.6191999912261963, -0.11829999834299088],
+...> [0.7099999785423279, 0.7282999753952026, -0.18639999628067017]]], type: {:f, 32})
+iex> Axon.Layers.max_pool(t, kernel_size: 2, channels: :first)
+#Nx.Tensor<
+  f32[1][3][1]
+  [
+    [
+      [0.051500000059604645],
+      [1.6191999912261963],
+      [0.7282999753952026]
+    ]
+  ]
+>
@@ -1826,13 +1826,13 @@

flatten(input, opts \\ [])

Examples -
iex> Axon.Layers.flatten(Nx.iota({1, 2, 2}, type: {:f, 32}))
-#Nx.Tensor<
-  f32[1][4]
-  [
-    [0.0, 1.0, 2.0, 3.0]
-  ]
->
+
iex> Axon.Layers.flatten(Nx.iota({1, 2, 2}, type: {:f, 32}))
+#Nx.Tensor<
+  f32[1][4]
+  [
+    [0.0, 1.0, 2.0, 3.0]
+  ]
+>
@@ -1878,28 +1878,28 @@

resize(input, opts \\ [])

Examples -
iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})
-iex> Axon.Layers.resize(img, size: {4, 4}, channels: :first)
-#Nx.Tensor<
-  f32[1][1][4][4]
-  [
-    [
-      [
-        [0.0, 1.0, 1.0, 2.0],
-        [3.0, 4.0, 4.0, 5.0],
-        [3.0, 4.0, 4.0, 5.0],
-        [6.0, 7.0, 7.0, 8.0]
-      ]
-    ]
-  ]
->

+
iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})
+iex> Axon.Layers.resize(img, size: {4, 4}, channels: :first)
+#Nx.Tensor<
+  f32[1][1][4][4]
+  [
+    [
+      [
+        [0.0, 1.0, 1.0, 2.0],
+        [3.0, 4.0, 4.0, 5.0],
+        [3.0, 4.0, 4.0, 5.0],
+        [6.0, 7.0, 7.0, 8.0]
+      ]
+    ]
+  ]
+>

Error cases

-
iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})
-iex> Axon.Layers.resize(img, size: {4, 4}, method: :foo)
+
iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})
+iex> Axon.Layers.resize(img, size: {4, 4}, method: :foo)
 ** (ArgumentError) expected :method to be either of :nearest, :bilinear, :bicubic, :lanczos3, :lanczos5, got: :foo
@@ -1979,83 +1979,83 @@

One-dimensional convolution

-
iex> input = Nx.tensor([[[0.1294, -0.6638, 1.0251]], [[ 0.9182,  1.1512, -1.6149]]], type: {:f, 32})
-iex> kernel = Nx.tensor([[[-1.5475, 1.2425]], [[0.1871, 0.5458]], [[-0.4488,  0.8879]]], type: {:f, 32})
-iex> bias = Nx.tensor([0.7791, 0.1676, 1.5971], type: {:f, 32})
-iex> Axon.Layers.conv(input, kernel, bias, channels: :first)
-#Nx.Tensor<
-  f32[2][3][2]
-  [
-    [
-      [-0.24591797590255737, 3.08001708984375],
-      [-0.1704912781715393, 0.6029025316238403],
-      [0.9496372938156128, 2.80519962310791]
-    ],
-    [
-      [0.7885514497756958, -3.0088953971862793],
-      [0.9677201509475708, -0.4984228312969208],
-      [2.207162380218506, -0.3534282445907593]
-    ]
-  ]
->

+
iex> input = Nx.tensor([[[0.1294, -0.6638, 1.0251]], [[ 0.9182,  1.1512, -1.6149]]], type: {:f, 32})
+iex> kernel = Nx.tensor([[[-1.5475, 1.2425]], [[0.1871, 0.5458]], [[-0.4488,  0.8879]]], type: {:f, 32})
+iex> bias = Nx.tensor([0.7791, 0.1676, 1.5971], type: {:f, 32})
+iex> Axon.Layers.conv(input, kernel, bias, channels: :first)
+#Nx.Tensor<
+  f32[2][3][2]
+  [
+    [
+      [-0.24591797590255737, 3.08001708984375],
+      [-0.1704912781715393, 0.6029025316238403],
+      [0.9496372938156128, 2.80519962310791]
+    ],
+    [
+      [0.7885514497756958, -3.0088953971862793],
+      [0.9677201509475708, -0.4984228312969208],
+      [2.207162380218506, -0.3534282445907593]
+    ]
+  ]
+>

Two-dimensional convolution

-
iex> input = Nx.tensor([[[[-1.0476, -0.5041], [-0.9336, 1.5907]]]], type: {:f, 32})
-iex> kernel = Nx.tensor([
-...>  [[[0.7514, 0.7356], [1.3909,  0.6800]]],
-...>  [[[-0.3450,  0.4551], [-0.6275, -0.9875]]],
-...>  [[[1.8587, 0.4722], [0.6058, -1.0301]]]
-...> ], type: {:f, 32})
-iex> bias = Nx.tensor([1.9564, 0.2822, -0.5385], type: {:f, 32})
-iex> Axon.Layers.conv(input, kernel, bias, channels: :first)
-#Nx.Tensor<
-  f32[1][3][1][1]
-  [
-    [
-      [
-        [0.5815491676330566]
-      ],
-      [
-        [-0.5707762241363525]
-      ],
-      [
-        [-4.927865028381348]
-      ]
-    ]
-  ]
->

+
iex> input = Nx.tensor([[[[-1.0476, -0.5041], [-0.9336, 1.5907]]]], type: {:f, 32})
+iex> kernel = Nx.tensor([
+...>  [[[0.7514, 0.7356], [1.3909,  0.6800]]],
+...>  [[[-0.3450,  0.4551], [-0.6275, -0.9875]]],
+...>  [[[1.8587, 0.4722], [0.6058, -1.0301]]]
+...> ], type: {:f, 32})
+iex> bias = Nx.tensor([1.9564, 0.2822, -0.5385], type: {:f, 32})
+iex> Axon.Layers.conv(input, kernel, bias, channels: :first)
+#Nx.Tensor<
+  f32[1][3][1][1]
+  [
+    [
+      [
+        [0.5815491676330566]
+      ],
+      [
+        [-0.5707762241363525]
+      ],
+      [
+        [-4.927865028381348]
+      ]
+    ]
+  ]
+>

Three-dimensional convolution

-
iex> input = Nx.tensor([[[[[-0.6497], [1.0939]], [[-2.5465], [0.7801]]]]], type: {:f, 32})
-iex> kernel = Nx.tensor([
-...>  [[[[ 0.7390], [-0.0927]], [[-0.8675], [-0.9209]]]],
-...>  [[[[-0.6638], [0.4341]], [[0.6368], [1.1846]]]]
-...> ], type: {:f, 32})
-iex> bias = Nx.tensor([-0.4101,  0.1776], type: {:f, 32})
-iex> Axon.Layers.conv(input, kernel, bias, channels: :first)
-#Nx.Tensor<
-  f32[1][2][1][1][1]
-  [
-    [
-      [
-        [
-          [0.49906185269355774]
-        ]
-      ],
-      [
-        [
-          [0.38622811436653137]
-        ]
-      ]
-    ]
-  ]
->
+
iex> input = Nx.tensor([[[[[-0.6497], [1.0939]], [[-2.5465], [0.7801]]]]], type: {:f, 32})
+iex> kernel = Nx.tensor([
+...>  [[[[ 0.7390], [-0.0927]], [[-0.8675], [-0.9209]]]],
+...>  [[[[-0.6638], [0.4341]], [[0.6368], [1.1846]]]]
+...> ], type: {:f, 32})
+iex> bias = Nx.tensor([-0.4101,  0.1776], type: {:f, 32})
+iex> Axon.Layers.conv(input, kernel, bias, channels: :first)
+#Nx.Tensor<
+  f32[1][2][1][1][1]
+  [
+    [
+      [
+        [
+          [0.49906185269355774]
+        ]
+      ],
+      [
+        [
+          [0.38622811436653137]
+        ]
+      ]
+    ]
+  ]
+>
@@ -2113,23 +2113,23 @@

conv_transpose(input, kernel, bias \\ 0, op Examples

-
iex> input = Nx.iota({1, 3, 3}, type: {:f, 32})
-iex> kernel = Nx.iota({6, 3, 2}, type: {:f, 32})
-iex> bias = Nx.tensor(1.0, type: {:f, 32})
-iex> Axon.Layers.conv_transpose(input, kernel, bias, channels: :first)
-#Nx.Tensor<
-  f32[1][6][4]
-  [
-    [
-      [40.0, 79.0, 94.0, 43.0],
-      [94.0, 205.0, 256.0, 133.0],
-      [148.0, 331.0, 418.0, 223.0],
-      [202.0, 457.0, 580.0, 313.0],
-      [256.0, 583.0, 742.0, 403.0],
-      [310.0, 709.0, 904.0, 493.0]
-    ]
-  ]
->

+
iex> input = Nx.iota({1, 3, 3}, type: {:f, 32})
+iex> kernel = Nx.iota({6, 3, 2}, type: {:f, 32})
+iex> bias = Nx.tensor(1.0, type: {:f, 32})
+iex> Axon.Layers.conv_transpose(input, kernel, bias, channels: :first)
+#Nx.Tensor<
+  f32[1][6][4]
+  [
+    [
+      [40.0, 79.0, 94.0, 43.0],
+      [94.0, 205.0, 256.0, 133.0],
+      [148.0, 331.0, 418.0, 223.0],
+      [202.0, 457.0, 580.0, 313.0],
+      [256.0, 583.0, 742.0, 403.0],
+      [310.0, 709.0, 904.0, 493.0]
+    ]
+  ]
+>

diff --git a/Axon.Loop.State.html b/Axon.Loop.State.html index e0d3fc32..e70b01e3 100644 --- a/Axon.Loop.State.html +++ b/Axon.Loop.State.html @@ -14,7 +14,7 @@ - + @@ -133,16 +133,16 @@

-

Accumulated state in an Axon.Loop.

Loop state is a struct:

%State{
-  epoch: integer(),
-  max_epoch: integer(),
-  iteration: integer(),
-  max_iteration: integer(),
-  metrics: map(string(), container()),
-  times: map(integer(), integer()),
-  step_state: container(),
-  handler_metadata: container()
-}

epoch is the current epoch, starting at 0, of the nested loop. +

Accumulated state in an Axon.Loop.

Loop state is a struct:

%State{
+  epoch: integer(),
+  max_epoch: integer(),
+  iteration: integer(),
+  max_iteration: integer(),
+  metrics: map(string(), container()),
+  times: map(integer(), integer()),
+  step_state: container(),
+  handler_metadata: container()
+}

epoch is the current epoch, starting at 0, of the nested loop. Defaults to 0.

max_epoch is the maximum number of epochs the loop should run for. Defaults to 1.

iteration is the current iteration of the inner loop. In supervised settings, this will be the current batch. Defaults to 0.

max_iteration is the maximum number of iterations the loop should diff --git a/Axon.Loop.html b/Axon.Loop.html index e954b64d..6260d82e 100644 --- a/Axon.Loop.html +++ b/Axon.Loop.html @@ -14,7 +14,7 @@ - + @@ -135,66 +135,66 @@

Abstraction for modeling a reduction of a dataset with an accumulated state for a number of epochs.

Inspired heavily by PyTorch Ignite.

The main abstraction is the %Axon.Loop{} struct, which controls a nested -reduction of the form:

Enum.reduce(1..max_epochs, state, fn epoch, state ->
-  Enum.reduce(data, state, &batch_step/2)
-end)

data is assumed to be an Enumerable or Stream of input data which is +reduction of the form:

Enum.reduce(1..max_epochs, state, fn epoch, state ->
+  Enum.reduce(data, state, &batch_step/2)
+end)

data is assumed to be an Enumerable or Stream of input data which is handled by a processing function, batch_step. The purpose of the loop abstraction is to take away much of the boilerplate code used in solving machine learning tasks. Tasks such as normalizing a dataset, hyperparameter optimization, -or training machine learning models boil down to writing one function:

defn batch_step(batch, state) do
+or training machine learning models boil down to writing one function:

defn batch_step(batch, state) do
   # ...do something with batch...
   updated_state
-end

For tasks such as training a neural network, state will encapsulate things +end

For tasks such as training a neural network, state will encapsulate things such as model and optimizer state. For supervised learning tasks, batch_step -might look something like:

defn batch_step({inputs, targets}, state) do
-  %{parameters: params, optimizer_state: optim_state} = state
+might look something like:

defn batch_step({inputs, targets}, state) do
+  %{parameters: params, optimizer_state: optim_state} = state
 
-  gradients = grad(params, objective_fn.(&1, inputs, targets))
-  {updates, new_optim_state} = optimizer.(optim_state, params, gradients)
+  gradients = grad(params, objective_fn.(&1, inputs, targets))
+  {updates, new_optim_state} = optimizer.(optim_state, params, gradients)
 
-  new_params = apply_updates(params, updates)
+  new_params = apply_updates(params, updates)
 
-  %{parameters: new_params, optimizer_state: optim_state}
-end

batch_step takes a batch of {input, target} pairs and the current state, + %{parameters: new_params, optimizer_state: optim_state} +end

batch_step takes a batch of {input, target} pairs and the current state, and updates the model parameters based on the gradients received from some arbitrary objective function. This function will run in a nested loop, iterating over the entire dataset for N epochs before finally returning the trained model state. By defining 1 function, we've created a training loop that works for most machine learning models.

In actuality, the loop abstraction accumulates a struct, %Axon.Loop.State{}, which looks -like (assuming container is a generic Elixir container of tensors, e.g. map, tuple, etc.):

%Axon.Loop.State{
-  epoch: integer(),
-  max_epoch: integer(),
-  iteration: integer(),
-  max_iteration: integer(),
-  metrics: map(string(), container()),
-  times: map(integer(), integer()),
-  step_state: container()
-}

batch_step takes in the batch and the step state field and returns a step_state, +like (assuming container is a generic Elixir container of tensors, e.g. map, tuple, etc.):

%Axon.Loop.State{
+  epoch: integer(),
+  max_epoch: integer(),
+  iteration: integer(),
+  max_iteration: integer(),
+  metrics: map(string(), container()),
+  times: map(integer(), integer()),
+  step_state: container()
+}

batch_step takes in the batch and the step state field and returns a step_state, which is a generic container of state accumulated at each iteration. The rest of the fields in the state struct are updated automatically behind the scenes.

The loop must start from some initial step state, thus most tasks must also provide an additional initialization function to provide some starting point for the step state. For machine learning tasks, the initialization function will return things like initial model parameters and optimizer state.

Typically, the final output of the loop is the accumulated final state; however, you may optionally apply an output transform to extract specific values at the end of the -loop. For example, Axon.Loop.trainer/4 by default extracts trained model state:

output_transform = fn state ->
-  state.step_state[:model_state]
-end

+loop. For example, Axon.Loop.trainer/4 by default extracts trained model state:

output_transform = fn state ->
+  state.step_state[:model_state]
+end

Initialize and Step

The core of the Axon loop are the init and step functions. The initialization is an -arity-0 function which provides an initial step state:

init = fn ->
-  %{params: Axon.init(model)}
-end

While the step function is the batch_step function mentioned earlier:

step = fn data, state ->
+arity-0 function which provides an initial step state:

init = fn ->
+  %{params: Axon.init(model)}
+end

While the step function is the batch_step function mentioned earlier:

step = fn data, state ->
   new_state = # ...do something...
   new_state
-end

Note that any optimization and training anonymous functions that need to be used in the -batch_step function can be passed as extra arguments. For example:

step_with_training_arguments = fn data, state, optimizer_update_fn, state_update_fn ->
+end

Note that any optimization and training anonymous functions that need to be used in the +batch_step function can be passed as extra arguments. For example:

step_with_training_arguments = fn data, state, optimizer_update_fn, state_update_fn ->
   # ...do something...
-end
+end
 
-step = &(step_with_training_arguments.(&1, &2, actual_optimizer_update_fn, actual_state_update_fn))

+step = &(step_with_training_arguments.(&1, &2, actual_optimizer_update_fn, actual_state_update_fn))

@@ -202,27 +202,27 @@

Often times you want to compute metrics associated with your training iterations. To accomplish this, you can attach metrics to each Axon.Loop. Assuming a batch_step -function which looks like:

defn batch_step({inputs, targets}, state) do
-  %{parameters: params, optimizer_state: optim_state} = state
+function which looks like:

defn batch_step({inputs, targets}, state) do
+  %{parameters: params, optimizer_state: optim_state} = state
 
-  gradients = grad(params, objective_fn.(&1, inputs, targets))
-  {updates, new_optim_state} = optimizer.(optim_state, params, gradients)
+  gradients = grad(params, objective_fn.(&1, inputs, targets))
+  {updates, new_optim_state} = optimizer.(optim_state, params, gradients)
 
-  new_params = apply_updates(params, updates)
+  new_params = apply_updates(params, updates)
 
   # Shown for simplicity, you can optimize this by calculating preds
   # along with the gradient calculation
-  preds = model_fn.(params, inputs)
+  preds = model_fn.(params, inputs)
 
-  %{
+  %{
     y_true: targets,
     y_pred: preds,
     parameters: new_params,
     optimizer_state: optim_state
-  }
-end

You can attach metrics to this by using Axon.Loop.metric/4:

Axon.Loop.loop(&batch_step/2)
-|> Axon.Loop.metric("Accuracy", :accuracy, fn %{y_true: y_, y_pred: y} -> [y_, y] end)
-|> Axon.Loop.run(data)

Because metrics work directly on step_state, you typically need to provide an output + } +end

You can attach metrics to this by using Axon.Loop.metric/4:

Axon.Loop.loop(&batch_step/2)
+|> Axon.Loop.metric("Accuracy", :accuracy, fn %{y_true: y_, y_pred: y} -> [y_, y] end)
+|> Axon.Loop.run(data)

Because metrics work directly on step_state, you typically need to provide an output transform to indicate which values should be passed to your metric function. By default, Axon assumes a supervised training task with the fields :y_true and :y_pred present in the step state. See Axon.Loop.metric/4 for more information.

Metrics will be tracked in the loop state using the user-provided key. Metrics integrate @@ -234,24 +234,24 @@

Events and Handlers

You can instrument several points in the loop using event handlers. By default, several events -are fired when running a loop:

events = [
+are fired when running a loop:

events = [
   :started,             # After loop state initialization
   :epoch_started,       # On epoch start
   :iteration_started,   # On iteration start
   :iteration_completed, # On iteration complete
   :epoch_completed,     # On epoch complete
   :epoch_halted,        # On epoch halt, if early halted
-]

You can attach event handlers to events using Axon.Loop.handle_event/4:

loop
-|> Axon.Loop.handle_event(:iteration_completed, &log_metrics/1, every: 100)
-|> Axon.Loop.run(data)

The above will trigger log_metrics/1 every 100 times the :iteration_completed event +]

You can attach event handlers to events using Axon.Loop.handle_event/4:

loop
+|> Axon.Loop.handle_event(:iteration_completed, &log_metrics/1, every: 100)
+|> Axon.Loop.run(data)

The above will trigger log_metrics/1 every 100 times the :iteration_completed event is fired. Event handlers must return a tuple {status, state}, where status is an atom with one of the following values:

:continue   # Continue epoch, continue looping
 :halt_epoch # Halt the epoch, continue looping
 :halt_loop  # Halt looping

And state is an updated Axon.Loop.State struct. Handler functions take as input the current loop state.

It's important to note that event handlers are triggered in the order they are attached to the loop. If you have two handlers on the same event, they will trigger in order:

loop
-|> Axon.Loop.handle_event(:epoch_completed, &normalize_state/1) # Runs first
-|> Axon.Loop.handle_event(:epoch_completed, &log_state/1) # Runs second

You may provide filters to filter when event handlers trigger. See Axon.Loop.handle_event/4 +|> Axon.Loop.handle_event(:epoch_completed, &normalize_state/1) # Runs first +|> Axon.Loop.handle_event(:epoch_completed, &log_state/1) # Runs second

You may provide filters to filter when event handlers trigger. See Axon.Loop.handle_event/4 for more details on valid filters.

@@ -267,7 +267,7 @@

Running loops

-

In order to execute a loop, you should use Axon.Loop.run/3:

Axon.Loop.run(loop, data, epochs: 10)

+

In order to execute a loop, you should use Axon.Loop.run/3:

Axon.Loop.run(loop, data, epochs: 10)

@@ -275,8 +275,8 @@

At times you may want to resume a loop from some previous state. You can accomplish this with Axon.Loop.from_state/2:

loop
-|> Axon.Loop.from_state(state)
-|> Axon.Loop.run(data)
+|> Axon.Loop.from_state(state) +|> Axon.Loop.run(data)
@@ -511,7 +511,7 @@

checkpoint(loop, opts \\ [])

- + View Source @@ -528,21 +528,21 @@

checkpoint(loop, opts \\ [])

obtained from Axon.Loop.serialize_state/2. Serialization options will be forwarded to Axon.Loop.serialize_state/2.

You can customize checkpoint events by passing :event and :filter options:

loop
-|> Axon.Loop.checkpoint(event: :iteration_completed, filter: [every: 50])

Checkpoints are saved under the checkpoint/ directory with a pattern +|> Axon.Loop.checkpoint(event: :iteration_completed, filter: [every: 50])

Checkpoints are saved under the checkpoint/ directory with a pattern of checkpoint_{epoch}_{iteration}.ckpt. You can customize the path and pattern with the :path and :file_pattern options:

my_file_pattern =
-  fn %Axon.Loop.State{epoch: epoch, iteration: iter} ->
-    "checkpoint_#{epoch}_#{iter}"
-  end
+  fn %Axon.Loop.State{epoch: epoch, iteration: iter} ->
+    "checkpoint_#{epoch}_#{iter}"
+  end
 
 loop
-|> Axon.Loop.checkpoint(path: "my_checkpoints", file_pattern: my_file_pattern)

If you'd like to only save checkpoints based on some metric criteria, +|> Axon.Loop.checkpoint(path: "my_checkpoints", file_pattern: my_file_pattern)

If you'd like to only save checkpoints based on some metric criteria, you can specify the :criteria option. :criteria must be a valid key in metrics:

loop
-|> Axon.Loop.checkpoint(criteria: "validation_loss")

The default criteria mode is :min, meaning the min score metric will +|> Axon.Loop.checkpoint(criteria: "validation_loss")

The default criteria mode is :min, meaning the min score metric will be considered "best" when deciding to save on a given event. Valid modes are :min and :max:

loop
-|> Axon.Loop.checkpoint(criteria: "validation_accuracy", mode: :max)

+|> Axon.Loop.checkpoint(criteria: "validation_accuracy", mode: :max)

@@ -567,7 +567,7 @@

checkpoint(loop, opts \\ [])

deserialize_state(serialized, opts \\ [])

- + View Source @@ -596,7 +596,7 @@

deserialize_state(serialized, opts \\ [])

early_stop(loop, monitor, opts \\ [])

- + View Source @@ -611,18 +611,18 @@

early_stop(loop, monitor, opts \\ [])

improvement of a given metric.

You must specify a metric to monitor and the metric must be present in the loop state. Typically, this will be a validation metric:

model
-|> Axon.Loop.trainer(loss, optim)
-|> Axon.Loop.metric(:accuracy)
-|> Axon.Loop.validate(val_data)
-|> Axon.Loop.early_stop("validation_accuracy")

It's important to remember that handlers are executed in the +|> Axon.Loop.trainer(loss, optim) +|> Axon.Loop.metric(:accuracy) +|> Axon.Loop.validate(val_data) +|> Axon.Loop.early_stop("validation_accuracy")

It's important to remember that handlers are executed in the order they are added to the loop. For example, if you'd like to checkpoint a loop after every epoch and use early stopping, most likely you want to add the checkpoint handler before the early stopping handler:

model
-|> Axon.Loop.trainer(loss, optim)
-|> Axon.Loop.metric(:accuracy)
-|> Axon.Loop.checkpoint()
-|> Axon.Loop.early_stop("accuracy")

That will ensure checkpoint is always fired, even if the loop +|> Axon.Loop.trainer(loss, optim) +|> Axon.Loop.metric(:accuracy) +|> Axon.Loop.checkpoint() +|> Axon.Loop.early_stop("accuracy")

That will ensure checkpoint is always fired, even if the loop exited early.

@@ -635,7 +635,7 @@

early_stop(loop, monitor, opts \\ [])

eval_step(model)

- + View Source @@ -660,7 +660,7 @@

eval_step(model)

evaluator(model)

- + View Source @@ -673,18 +673,18 @@

evaluator(model)

Creates a supervised evaluator from a model.

An evaluator can be used for things such as testing and validation of models after or during training. It assumes model is an Axon struct, container of structs, or a tuple of init / apply functions. model_state must be a -container usable from within model.

The evaluator returns a step state of the form:

%{
+container usable from within model.

The evaluator returns a step state of the form:

%{
   y_true: labels,
   y_pred: predictions
-}

Such that you can attach any number of supervised metrics to the evaluation +}

Such that you can attach any number of supervised metrics to the evaluation loop:

model
-|> Axon.Loop.evaluator()
-|> Axon.Loop.metric("Accuracy", :accuracy)

You must pass a compatible trained model state to Axon.Loop.run/4 when using +|> Axon.Loop.evaluator() +|> Axon.Loop.metric("Accuracy", :accuracy)

You must pass a compatible trained model state to Axon.Loop.run/4 when using supervised evaluation loops. For example, if you've binded the result of a training run to trained_model_state, you can run the trained model through an evaluation run like this:

model
-|> Axon.Loop.evaluator()
-|> Axon.Loop.run(data, trained_model_state, compiler: EXLA)

This function applies an output transform which returns the map of metrics accumulated +|> Axon.Loop.evaluator() +|> Axon.Loop.run(data, trained_model_state, compiler: EXLA)

This function applies an output transform which returns the map of metrics accumulated over the given loop.

@@ -697,7 +697,7 @@

evaluator(model)

from_state(loop, state)

- + View Source @@ -709,7 +709,7 @@

from_state(loop, state)

Attaches state to the given loop in order to resume looping from a previous state.

It's important to note that a loop's attached state takes precedence -over defined initialization functions. Given initialization function:

defn init_state(), do: %{foo: 1, bar: 2}

And an attached state:

state = %State{step_state: %{foo: 2, bar: 3}}

init_state/0 will never execute, and instead the initial step state +over defined initialization functions. Given initialization function:

defn init_state(), do: %{foo: 1, bar: 2}

And an attached state:

state = %State{step_state: %{foo: 2, bar: 3}}

init_state/0 will never execute, and instead the initial step state of %{foo: 2, bar: 3} will be used.

@@ -724,7 +724,7 @@

from_state(loop, state)

handle_event(loop, event, handler, filter \\ :always)

- + View Source @@ -736,20 +736,20 @@

handle_event(loop, event, handler, filter \

Adds a handler function to the loop which will be triggered on event with an optional filter.

Events take place at different points during loop execution. The default -events are:

events = [
+events are:

events = [
   :started,             # After loop state initialization
   :epoch_started,       # On epoch start
   :iteration_started,   # On iteration start
   :iteration_completed, # On iteration complete
   :epoch_completed,     # On epoch complete
   :epoch_halted,        # On epoch halt, if early halted
-]

Generally, event handlers are side-effecting operations which provide some +]

Generally, event handlers are side-effecting operations which provide some sort of inspection into the loop's progress. It's important to note that if you define multiple handlers to be triggered on the same event, they will execute in order from when they were attached to the training loop:

loop
-|> Axon.Loop.handle_event(:epoch_started, &normalize_step_state/1) # executes first
-|> Axon.Loop.handle_event(:epoch_started, &log_step_state/1) # executes second

Thus, if you have separate handlers which alter or depend on loop state, +|> Axon.Loop.handle_event(:epoch_started, &normalize_step_state/1) # executes first +|> Axon.Loop.handle_event(:epoch_started, &log_step_state/1) # executes second

Thus, if you have separate handlers which alter or depend on loop state, you need to ensure they are ordered correctly, or combined into a single event handler for maximum control over execution.

event must be an atom representing the event to trigger handler or a list of atoms indicating handler should be triggered on multiple events. @@ -779,7 +779,7 @@

handle_event(loop, event, handler, filter \

kino_vega_lite_plot(loop, plot, metric, opts \\ [])

- + View Source @@ -790,16 +790,16 @@

kino_vega_lite_plot(loop, plot, metric, opt

Adds a handler function which updates a Kino.VegaLite plot.

By default, this will run after every iteration.

You must specify a plot to push to and a metric to track. The :x axis will be the iteration count, labeled "step". The metric must match the name given to the :y axis in your VegaLite plot:

plot =
-  Vl.new()
-  |> Vl.mark(:line)
-  |> Vl.encode_field(:x, "step", type: :quantitative)
-  |> Vl.encode_field(:y, "loss", type: :quantitative)
-  |> Kino.VegaLite.new()
-  |> Kino.render()
+  Vl.new()
+  |> Vl.mark(:line)
+  |> Vl.encode_field(:x, "step", type: :quantitative)
+  |> Vl.encode_field(:y, "loss", type: :quantitative)
+  |> Kino.VegaLite.new()
+  |> Kino.render()
 
 model
-|> Axon.Loop.trainer(loss, optim)
-|> Axon.Loop.kino_vega_lite_plot(plot, "loss")

+|> Axon.Loop.trainer(loss, optim) +|> Axon.Loop.kino_vega_lite_plot(plot, "loss")

@@ -819,7 +819,7 @@

kino_vega_lite_plot(loop, plot, metric, opt

log(loop, message_fn, opts \\ [])

- + View Source @@ -852,7 +852,7 @@

log(loop, message_fn, opts \\ [])

loop(step_fn, init_fn \\ &default_init/2, output_transform \\ & &1)

- + View Source @@ -864,13 +864,13 @@

loop(step_fn, init_fn \\ &default_init/

Creates a loop from step_fn, an optional init_fn, and an optional output_transform.

step_fn is an arity-2 function which takes a batch and state -and returns an updated step state:

defn batch_step(batch, step_state) do
+and returns an updated step state:

defn batch_step(batch, step_state) do
   step_state + 1
-end

init_fn by default is an identity function which forwards its +end

init_fn by default is an identity function which forwards its initial arguments as the model state. You should define a custom -initialization function if you require a different behavior:

defn init_step_state(state) do
-  Map.merge(%{foo: 1}, state)
-end

You may use state in conjunction with initialization functions in +initialization function if you require a different behavior:

defn init_step_state(state) do
+  Map.merge(%{foo: 1}, state)
+end

You may use state in conjunction with initialization functions in init_fn. For example, train_step/3 uses initial state as initial model parameters to allow initializing models from partial parameterizations.

step_batch/2 and init_step_state/1 are typically called from within Nx.Defn.jit/3. While JIT-compilation will work with anonymous functions, @@ -895,7 +895,7 @@

loop(step_fn, init_fn \\ &default_init/

metric(loop, metric, name \\ nil, accumulate \\ :running_average, transform_or_fields \\ [:y_true, :y_pred])

- + View Source @@ -908,20 +908,20 @@

metric(loop, metric, name \\ nil, accumulat

Adds a metric of the given name to the loop.

A metric is a function which tracks or measures some value with respect to values in the step state. For example, when training classification models, it's common to track the model's accuracy during training:

loop
-|> Axon.Loop.metric(:accuracy, "Accuracy")

By default, metrics assume a supervised learning task and extract the fields +|> Axon.Loop.metric(:accuracy, "Accuracy")

By default, metrics assume a supervised learning task and extract the fields [:y_true, :y_pred] from the step state. If you wish to work on a different value, you can use an output transform. An output transform is a list of keys to extract from the output state, or a function which returns a flattened list of values to pass to the given metric function. Values received from output -transforms are passed to the given metric using:

value = output_transform.(step_state)
-apply(metric, value)

Thus, even if you want your metric to work on a container, your output transform +transforms are passed to the given metric using:

value = output_transform.(step_state)
+apply(metric, value)

Thus, even if you want your metric to work on a container, your output transform must return a list.

metric must be an atom which matches the name of a metric in Axon.Metrics, or an arbitrary function which returns a tensor or container.

name must be a string or atom used to store the computed metric in the loop state. If names conflict, the last attached metric will take precedence:

loop
-|> Axon.Loop.metric(:mean_squared_error, "Error") # Will be overwritten
-|> Axon.Loop.metric(:mean_absolute_error, "Error") # Will be used

By default, metrics keep a running average of the metric calculation. You can +|> Axon.Loop.metric(:mean_squared_error, "Error") # Will be overwritten +|> Axon.Loop.metric(:mean_absolute_error, "Error") # Will be used

By default, metrics keep a running average of the metric calculation. You can override this behavior by changing accumulate:

loop
-|> Axon.Loop.metric(:true_negatives, "tn", :running_sum)

Accumulation function can be one of the accumulation combinators in Axon.Metrics +|> Axon.Loop.metric(:true_negatives, "tn", :running_sum)

Accumulation function can be one of the accumulation combinators in Axon.Metrics or an arity-3 function of the form: accumulate(acc, obs, i) :: new_acc.

@@ -936,7 +936,7 @@

metric(loop, metric, name \\ nil, accumulat

monitor(loop, metric, fun, name, opts \\ [])

- + View Source @@ -981,7 +981,7 @@

monitor(loop, metric, fun, name, opts \\ []

reduce_lr_on_plateau(loop, monitor, opts \\ [])

- + View Source @@ -997,10 +997,10 @@

reduce_lr_on_plateau(loop, monitor, opts \\ improvement of a given metric.

You must specify a metric to monitor and the metric must be present in the loop state. Typically, this will be a validation metric:

model
-|> Axon.Loop.trainer(loss, optim)
-|> Axon.Loop.metric(:accuracy)
-|> Axon.Loop.validate(model, val_data)
-|> Axon.Loop.reduce_lr_on_plateau("accuracy", mode: :max)

+|> Axon.Loop.trainer(loss, optim) +|> Axon.Loop.metric(:accuracy) +|> Axon.Loop.validate(model, val_data) +|> Axon.Loop.reduce_lr_on_plateau("accuracy", mode: :max)

@@ -1024,7 +1024,7 @@

reduce_lr_on_plateau(loop, monitor, opts \\

run(loop, data, init_state \\ %{}, opts \\ [])

- + View Source @@ -1049,7 +1049,9 @@

run(loop, data, init_state \\ %{}, opts \\ to true.

  • :garbage_collect - whether or not to garbage collect after each loop iteration. This may prevent OOMs, but it will slow down training.

  • :strict? - whether or not to compile step functions strictly. If this flag is set, the loop will raise on any cache miss during the training loop. Defaults -to true.

  • :debug - run loop in debug mode to trace loop progress. Defaults to +to true.

  • :force_garbage_collect? - whether or not to force garbage collection after each +iteration. This may help avoid OOMs when training large models, but it will slow +training down.

  • :debug - run loop in debug mode to trace loop progress. Defaults to false.

  • Additional options are forwarded to Nx.Defn.jit as JIT-options. If no JIT options are set, the default options set with Nx.Defn.default_options are used.

    @@ -1066,7 +1068,7 @@

    run(loop, data, init_state \\ %{}, opts \\

    serialize_state(state, opts \\ [])

    - + View Source @@ -1098,7 +1100,7 @@

    serialize_state(state, opts \\ [])

    train_step(model, loss, optimizer, opts \\ [])

    - + View Source @@ -1155,7 +1157,7 @@

    train_step(model, loss, optimizer, opts \\

    trainer(model, loss, optimizer, opts \\ [])

    - + View Source @@ -1186,13 +1188,13 @@

    trainer(model, loss, optimizer, opts \\ []) arity-3 function which scales gradient updates with respect to input parameters, optimizer state, and gradients. See Polaris.Updates for more information on building optimizers.

    This function creates a step function which outputs a map consisting of the following -fields for step_state:

    %{
    -  y_pred: tensor() | container(tensor()), # Model predictions for use in metrics
    -  y_true: tensor() | container(tensor()), # True labels for use in metrics
    -  loss: tensor(), # Running average of loss over epoch
    -  model_state: container(tensor()), # Model parameters and state
    -  optimizer_state: container(tensor()) # Optimizer state associated with each parameter
    -}

    +fields for step_state:

    %{
    +  y_pred: tensor() | container(tensor()), # Model predictions for use in metrics
    +  y_true: tensor() | container(tensor()), # True labels for use in metrics
    +  loss: tensor(), # Running average of loss over epoch
    +  model_state: container(tensor()), # Model parameters and state
    +  optimizer_state: container(tensor()) # Optimizer state associated with each parameter
    +}

    @@ -1204,42 +1206,42 @@

    Basic usage

    -
    data = Stream.zip(input, target)
    +
    data = Stream.zip(input, target)
     
    -model = Axon.input("input", shape: {nil, 32}) |> Axon.dense(1, activation: :sigmoid)
    +model = Axon.input("input", shape: {nil, 32}) |> Axon.dense(1, activation: :sigmoid)
     
     model
    -|> Axon.Loop.trainer(:binary_cross_entropy, :adam)
    -|> Axon.Loop.run(data)

    +|> Axon.Loop.trainer(:binary_cross_entropy, :adam) +|> Axon.Loop.run(data)

    Customizing Optimizer

    model
    -|> Axon.Loop.trainer(:binary_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.05))
    -|> Axon.Loop.run(data)

    +|> Axon.Loop.trainer(:binary_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.05)) +|> Axon.Loop.run(data)

    Custom loss

    -
    loss_fn = fn y_true, y_pred -> Nx.cos(y_true, y_pred) end
    +
    loss_fn = fn y_true, y_pred -> Nx.cos(y_true, y_pred) end
     
     model
    -|> Axon.Loop.trainer(loss_fn, Polaris.Optimizers.rmsprop(learning_rate: 0.01))
    -|> Axon.Loop.run(data)

    +|> Axon.Loop.trainer(loss_fn, Polaris.Optimizers.rmsprop(learning_rate: 0.01)) +|> Axon.Loop.run(data)

    Multiple objectives with multi-output model

    -
    model = {Axon.input("input_0", shape: {nil, 1}), Axon.input("input_1", shape: {nil, 2})}
    -loss_weights = [mean_squared_error: 0.5, mean_absolute_error: 0.5]
    +
    model = {Axon.input("input_0", shape: {nil, 1}), Axon.input("input_1", shape: {nil, 2})}
    +loss_weights = [mean_squared_error: 0.5, mean_absolute_error: 0.5]
     
     model
    -|> Axon.Loop.trainer(loss_weights, :sgd)
    -|> Axon.Loop.run(data)

    +|> Axon.Loop.trainer(loss_weights, :sgd) +|> Axon.Loop.run(data)

    @@ -1266,7 +1268,7 @@

    validate(loop, model, validation_data, opts \\ [])

    - + View Source @@ -1280,25 +1282,25 @@

    validate(loop, model, validation_data, opts against the given validation set.

    This handler assumes the loop state matches the state initialized in a supervised training loop. Typically, you'd call this immediately after creating a supervised training loop:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.validate(model, validation_data)

    Please note that you must pass the same (or an equivalent) model +|> Axon.Loop.trainer(:mean_squared_error, :sgd) +|> Axon.Loop.validate(model, validation_data)

    Please note that you must pass the same (or an equivalent) model into this method so it can be used during the validation loop. The metrics which are computed are those which are present BEFORE the validation handler was added to the loop. For the following loop:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.metric(:mean_absolute_error)
    -|> Axon.Loop.validate(model, validation_data)
    -|> Axon.Loop.metric(:binary_cross_entropy)

    only :mean_absolute_error will be computed at validation time.

    The returned loop state is altered to contain validation +|> Axon.Loop.trainer(:mean_squared_error, :sgd) +|> Axon.Loop.metric(:mean_absolute_error) +|> Axon.Loop.validate(model, validation_data) +|> Axon.Loop.metric(:binary_cross_entropy)

    only :mean_absolute_error will be computed at validation time.

    The returned loop state is altered to contain validation metrics for use in later handlers such as early stopping and model checkpoints. Since the order of execution of event handlers is in the same order they are declared in the training loop, you MUST call this method before any other handler which expects or may use validation metrics.

    By default the validation loop runs after every epoch; however, you can customize it by overriding the default event and event filters:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.metric(:mean_absolute_error)
    -|> Axon.Loop.validate(model, validation_data, event: :iteration_completed, filter: [every: 10_000])
    -|> Axon.Loop.metric(:binary_cross_entropy)
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd) +|> Axon.Loop.metric(:mean_absolute_error) +|> Axon.Loop.validate(model, validation_data, event: :iteration_completed, filter: [every: 10_000]) +|> Axon.Loop.metric(:binary_cross_entropy) diff --git a/Axon.LossScale.html b/Axon.LossScale.html index 3f7303b1..93f9ea83 100644 --- a/Axon.LossScale.html +++ b/Axon.LossScale.html @@ -14,7 +14,7 @@ - + @@ -136,7 +136,7 @@

    Implementations of loss-scalers for use in mixed precision training.

    Loss scaling is used to prevent underflow when using mixed precision during the model training process. Each loss-scale -implementation here returns a 3-tuple of the functions:

    {init_fn, scale_fn, unscale_fn, adjust_fn} = Axon.LossScale.static(Nx.pow(2, 15))

    You can use these to scale/unscale loss and gradients as well +implementation here returns a 3-tuple of the functions:

    {init_fn, scale_fn, unscale_fn, adjust_fn} = Axon.LossScale.static(Nx.pow(2, 15))

    You can use these to scale/unscale loss and gradients as well as adjust the loss scale state.

    Axon.Loop.trainer/3 builds loss-scaling in by default. You can reference the Axon.Loop.train_step/3 implementation to see how loss-scaling is applied in practice.

    diff --git a/Axon.Losses.html b/Axon.Losses.html index 33943727..3685fbd0 100644 --- a/Axon.Losses.html +++ b/Axon.Losses.html @@ -14,7 +14,7 @@ - + @@ -140,31 +140,31 @@

    measuring the loss with respect to the input target y_true and input prediction y_pred. As an example, the mean_squared_error/2 loss function produces a tensor whose values are the mean squared -error between targets and predictions:

    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_squared_error(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.5, 0.5]
    ->

    It's common to compute the loss across an entire minibatch. +error between targets and predictions:

    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_squared_error(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.5, 0.5]
    +>

    It's common to compute the loss across an entire minibatch. You can easily do so by specifying a :reduction mode, or -by composing one of these with an Nx reduction method:

    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +by composing one of these with an Nx reduction method:

    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.5
    ->

    You can even compose loss functions:

    defn my_strange_loss(y_true, y_pred) do
    +>

    You can even compose loss functions:

    defn my_strange_loss(y_true, y_pred) do
       y_true
    -  |> Axon.Losses.mean_squared_error(y_pred)
    -  |> Axon.Losses.binary_cross_entropy(y_pred)
    -  |> Nx.sum()
    -end

    Or, more commonly, you can combine loss functions with penalties for -regularization:

    defn regularized_loss(params, y_true, y_pred) do
    -  loss = Axon.mean_squared_error(y_true, y_pred)
    -  penalty = l2_penalty(params)
    -  Nx.sum(loss) + penalty
    -end

    All of the functions in this module are implemented as + |> Axon.Losses.mean_squared_error(y_pred) + |> Axon.Losses.binary_cross_entropy(y_pred) + |> Nx.sum() +end

    Or, more commonly, you can combine loss functions with penalties for +regularization:

    defn regularized_loss(params, y_true, y_pred) do
    +  loss = Axon.mean_squared_error(y_true, y_pred)
    +  penalty = l2_penalty(params)
    +  Nx.sum(loss) + penalty
    +end

    All of the functions in this module are implemented as numerical functions and can be JIT or AOT compiled with any supported Nx compiler.

    @@ -444,29 +444,29 @@

    binary_cross_entropy(y_true, y_pred, opts \ Examples

    -
    iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    -iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])
    -iex> Axon.Losses.binary_cross_entropy(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[3]
    -  [0.8644826412200928, 0.5150600075721741, 0.45986634492874146]
    ->
    -
    -iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    -iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])
    -iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    +iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])
    +iex> Axon.Losses.binary_cross_entropy(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[3]
    +  [0.8644826412200928, 0.5150600075721741, 0.45986634492874146]
    +>
    +
    +iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    +iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])
    +iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.613136351108551
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    -iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])
    -iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    +iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])
    +iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       1.8394089937210083
    ->
    +
    >
    @@ -495,8 +495,8 @@

    categorical_cross_entropy(y_true, y_pred, o $$

    Categorical cross-entropy is typically used for multi-class classification problems. By default, it expects y_pred to encode a probability distribution along the last axis. You can specify from_logits: true to indicate y_pred is a logits tensor.

    # Batch size of 3 with 3 target classes
    -y_true = Nx.tensor([0, 2, 1])
    -y_pred = Nx.tensor([[0.2, 0.8, 0.0], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]])

    +y_true = Nx.tensor([0, 2, 1]) +y_pred = Nx.tensor([[0.2, 0.8, 0.0], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]])

    @@ -520,37 +520,37 @@

    categorical_cross_entropy(y_true, y_pred, o Examples

    -
    iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    -iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.051293306052684784, 2.3025851249694824]
    ->
    -
    -iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    -iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    +iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.051293306052684784, 2.3025851249694824]
    +>
    +
    +iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    +iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       1.1769392490386963
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    -iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    +iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       2.3538784980773926
    ->
    +>
     
    -iex> y_true = Nx.tensor([1, 2], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    -iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum, sparse: true)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([1, 2], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
    +iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum, sparse: true)
    +#Nx.Tensor<
       f32
       2.3538784980773926
    ->
    +
    >
    @@ -593,29 +593,29 @@

    categorical_hinge(y_true, y_pred, opts \\ [ Examples

    -
    iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])
    -iex> Axon.Losses.categorical_hinge(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [1.6334158182144165, 1.2410175800323486]
    ->
    -
    -iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])
    -iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])
    +iex> Axon.Losses.categorical_hinge(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [1.6334158182144165, 1.2410175800323486]
    +>
    +
    +iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])
    +iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       1.4372167587280273
    ->
    +>
     
    -iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])
    -iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])
    +iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       2.8744335174560547
    ->
    +
    >
    @@ -710,13 +710,13 @@

    cosine_similarity(y_true, y_pred, opts \\ [ Examples

    -
    iex> y_pred = Nx.tensor([[1.0, 0.0], [1.0, 1.0]])
    -iex> y_true = Nx.tensor([[0.0, 1.0], [1.0, 1.0]])
    -iex> Axon.Losses.cosine_similarity(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.0, 1.0000001192092896]
    ->
    +
    iex> y_pred = Nx.tensor([[1.0, 0.0], [1.0, 1.0]])
    +iex> y_true = Nx.tensor([[0.0, 1.0], [1.0, 1.0]])
    +iex> Axon.Losses.cosine_similarity(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.0, 1.0000001192092896]
    +>
    @@ -761,29 +761,29 @@

    hinge(y_true, y_pred, opts \\ [])

    Examples -
    iex> y_true = Nx.tensor([[ 1,  1, -1], [ 1,  1, -1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])
    -iex> Axon.Losses.hinge(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.9700339436531067, 0.6437881588935852]
    ->
    -
    -iex> y_true = Nx.tensor([[ 1,  1, -1], [ 1,  1, -1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])
    -iex> Axon.Losses.hinge(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[ 1,  1, -1], [ 1,  1, -1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])
    +iex> Axon.Losses.hinge(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.9700339436531067, 0.6437881588935852]
    +>
    +
    +iex> y_true = Nx.tensor([[ 1,  1, -1], [ 1,  1, -1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])
    +iex> Axon.Losses.hinge(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.806911051273346
    ->
    +>
     
    -iex> y_true = Nx.tensor([[ 1,  1, -1], [ 1,  1, -1]], type: {:s, 8})
    -iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])
    -iex> Axon.Losses.hinge(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[ 1,  1, -1], [ 1,  1, -1]], type: {:s, 8})
    +iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])
    +iex> Axon.Losses.hinge(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       1.613822102546692
    ->
    +
    >
    @@ -827,25 +827,25 @@

    huber(y_true, y_pred, opts \\ [])

    Examples -
    iex> y_true = Nx.tensor([[1], [1.5], [2.0]])
    -iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])
    -iex> Axon.Losses.huber(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[3][1]
    -  [
    -    [0.019999997690320015],
    -    [0.04499998688697815],
    -    [0.004999990575015545]
    -  ]
    ->
    -
    -iex> y_true = Nx.tensor([[1], [1.5], [2.0]])
    -iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])
    -iex> Axon.Losses.huber(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[1], [1.5], [2.0]])
    +iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])
    +iex> Axon.Losses.huber(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[3][1]
    +  [
    +    [0.019999997690320015],
    +    [0.04499998688697815],
    +    [0.004999990575015545]
    +  ]
    +>
    +
    +iex> y_true = Nx.tensor([[1], [1.5], [2.0]])
    +iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])
    +iex> Axon.Losses.huber(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.02333332598209381
    ->
    +
    >
    @@ -890,29 +890,29 @@

    kl_divergence(y_true, y_pred, opts \\ []) Examples

    -
    iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})
    -iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])
    -iex> Axon.Losses.kl_divergence(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.916289210319519, -3.080907390540233e-6]
    ->
    -
    -iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})
    -iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])
    -iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})
    +iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])
    +iex> Axon.Losses.kl_divergence(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.916289210319519, -3.080907390540233e-6]
    +>
    +
    +iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})
    +iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])
    +iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.45814305543899536
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})
    -iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])
    -iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})
    +iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])
    +iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       0.9162861108779907
    ->
    +
    >
    @@ -988,29 +988,29 @@

    log_cosh(y_true, y_pred, opts \\ [])

    Examples -
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])
    -iex> Axon.Losses.log_cosh(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.2168903946876526, 0.0]
    ->
    -
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])
    -iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])
    +iex> Axon.Losses.log_cosh(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.2168903946876526, 0.0]
    +>
    +
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])
    +iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.1084451973438263
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])
    -iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])
    +iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       0.2168903946876526
    ->
    +
    >
    @@ -1049,32 +1049,32 @@

    margin_ranking(y_true, arg2, opts \\ []) Examples

    -
    iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})
    -iex> y_pred1 = Nx.tensor([0.6934, -0.7239,  1.1954], type: {:f, 32})
    -iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})
    -iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2})
    -#Nx.Tensor<
    -  f32[3]
    -  [0.0, 0.9909000396728516, 0.0]
    ->
    -
    -iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})
    -iex> y_pred1 = Nx.tensor([0.6934, -0.7239,  1.1954], type: {:f, 32})
    -iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})
    -iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})
    +iex> y_pred1 = Nx.tensor([0.6934, -0.7239,  1.1954], type: {:f, 32})
    +iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})
    +iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2})
    +#Nx.Tensor<
    +  f32[3]
    +  [0.0, 0.9909000396728516, 0.0]
    +>
    +
    +iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})
    +iex> y_pred1 = Nx.tensor([0.6934, -0.7239,  1.1954], type: {:f, 32})
    +iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})
    +iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.3303000032901764
    ->
    +>
     
    -iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})
    -iex> y_pred1 = Nx.tensor([0.6934, -0.7239,  1.1954], type: {:f, 32})
    -iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})
    -iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})
    +iex> y_pred1 = Nx.tensor([0.6934, -0.7239,  1.1954], type: {:f, 32})
    +iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})
    +iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :sum)
    +#Nx.Tensor<
       f32
       0.9909000396728516
    ->
    +
    >
    @@ -1119,29 +1119,29 @@

    mean_absolute_error(y_true, y_pred, opts \\ Examples

    -
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_absolute_error(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.5, 0.5]
    ->
    -
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_absolute_error(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.5, 0.5]
    +>
    +
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.5
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       1.0
    ->
    +
    >
    @@ -1186,29 +1186,29 @@

    mean_squared_error(y_true, y_pred, opts \\ Examples

    -
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_squared_error(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.5, 0.5]
    ->
    -
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_squared_error(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.5, 0.5]
    +>
    +
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.5
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       1.0
    ->
    +
    >
    @@ -1253,29 +1253,29 @@

    poisson(y_true, y_pred, opts \\ [])

    Examples -
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.poisson(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[2]
    -  [0.9999999403953552, 0.0]
    ->
    -
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.poisson(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.poisson(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[2]
    +  [0.9999999403953552, 0.0]
    +>
    +
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.poisson(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.4999999701976776
    ->
    +>
     
    -iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> Axon.Losses.poisson(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> Axon.Losses.poisson(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       0.9999999403953552
    ->
    +
    >
    @@ -1314,29 +1314,29 @@

    soft_margin(y_true, y_pred, opts \\ [])

    Examples -
    iex> y_true = Nx.tensor([[-1.0, 1.0,  1.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})
    -iex> Axon.Losses.soft_margin(y_true, y_pred)
    -#Nx.Tensor<
    -  f32[3]
    -  [0.851658046245575, 0.7822436094284058, 0.3273470401763916]
    ->
    -
    -iex> y_true = Nx.tensor([[-1.0, 1.0,  1.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})
    -iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :mean)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[-1.0, 1.0,  1.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})
    +iex> Axon.Losses.soft_margin(y_true, y_pred)
    +#Nx.Tensor<
    +  f32[3]
    +  [0.851658046245575, 0.7822436094284058, 0.3273470401763916]
    +>
    +
    +iex> y_true = Nx.tensor([[-1.0, 1.0,  1.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})
    +iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :mean)
    +#Nx.Tensor<
       f32
       0.6537495255470276
    ->
    +>
     
    -iex> y_true = Nx.tensor([[-1.0, 1.0,  1.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})
    -iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :sum)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[-1.0, 1.0,  1.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})
    +iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :sum)
    +#Nx.Tensor<
       f32
       1.9612486362457275
    ->
    +
    >
    diff --git a/Axon.Metrics.html b/Axon.Metrics.html index 34d16965..12a87199 100644 --- a/Axon.Metrics.html +++ b/Axon.Metrics.html @@ -14,7 +14,7 @@ - + @@ -360,23 +360,23 @@

    accuracy(y_true, y_pred, opts \\ [])

    Examples -
    iex> Axon.Metrics.accuracy(Nx.tensor([[1], [0], [0]]), Nx.tensor([[1], [1], [1]]))
    -#Nx.Tensor<
    +
    iex> Axon.Metrics.accuracy(Nx.tensor([[1], [0], [0]]), Nx.tensor([[1], [1], [1]]))
    +#Nx.Tensor<
       f32
       0.3333333432674408
    ->
    +>
     
    -iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1], [1, 0], [1, 0]]), Nx.tensor([[0, 1], [1, 0], [0, 1]]))
    -#Nx.Tensor<
    +iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1], [1, 0], [1, 0]]), Nx.tensor([[0, 1], [1, 0], [0, 1]]))
    +#Nx.Tensor<
       f32
       0.6666666865348816
    ->
    +>
     
    -iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0, 1, 0], [0, 1, 0]]))
    -#Nx.Tensor<
    +iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0, 1, 0], [0, 1, 0]]))
    +#Nx.Tensor<
       f32
       0.5
    ->
    +
    >
    @@ -436,13 +436,13 @@

    false_negatives(y_true, y_pred, opts \\ []) Examples

    -
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    -iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    -iex> Axon.Metrics.false_negatives(y_true, y_pred)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    +iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    +iex> Axon.Metrics.false_negatives(y_true, y_pred)
    +#Nx.Tensor<
       u64
       3
    ->
    +
    >
    @@ -480,13 +480,13 @@

    false_positives(y_true, y_pred, opts \\ []) Examples

    -
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    -iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    -iex> Axon.Metrics.false_positives(y_true, y_pred)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    +iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    +iex> Axon.Metrics.false_positives(y_true, y_pred)
    +#Nx.Tensor<
       u64
       2
    ->
    +
    >
    @@ -523,13 +523,13 @@

    mean_absolute_error(y_true, y_pred)

    Examples -
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    -iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    -iex> Axon.Metrics.mean_absolute_error(y_true, y_pred)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})
    +iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})
    +iex> Axon.Metrics.mean_absolute_error(y_true, y_pred)
    +#Nx.Tensor<
       f32
       0.5
    ->
    +
    >
    @@ -573,11 +573,11 @@

    precision(y_true, y_pred, opts \\ [])

    Examples -
    iex> Axon.Metrics.precision(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    -#Nx.Tensor<
    +
    iex> Axon.Metrics.precision(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    +#Nx.Tensor<
       f32
       0.6666666865348816
    ->
    +
    >
    @@ -621,11 +621,11 @@

    recall(y_true, y_pred, opts \\ [])

    Examples -
    iex> Axon.Metrics.recall(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    -#Nx.Tensor<
    +
    iex> Axon.Metrics.recall(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    +#Nx.Tensor<
       f32
       0.6666666865348816
    ->
    +
    >
    @@ -656,14 +656,14 @@

    running_average(metric)

    iex> cur_avg = 0.5
     iex> iteration = 1
    -iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    -iex> y_pred = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    -iex> avg_acc = Axon.Metrics.running_average(&Axon.Metrics.accuracy/2)
    -iex> avg_acc.(cur_avg, [y_true, y_pred], iteration)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    +iex> y_pred = Nx.tensor([[0, 1], [1, 0], [1, 0]])
    +iex> avg_acc = Axon.Metrics.running_average(&Axon.Metrics.accuracy/2)
    +iex> avg_acc.(cur_avg, [y_true, y_pred], iteration)
    +#Nx.Tensor<
       f32
       0.75
    ->
    +>
    @@ -694,14 +694,14 @@

    running_sum(metric)

    iex> cur_sum = 12
     iex> iteration = 2
    -iex> y_true = Nx.tensor([0, 1, 0, 1])
    -iex> y_pred = Nx.tensor([1, 1, 0, 1])
    -iex> fps = Axon.Metrics.running_sum(&Axon.Metrics.false_positives/2)
    -iex> fps.(cur_sum, [y_true, y_pred], iteration)
    -#Nx.Tensor<
    +iex> y_true = Nx.tensor([0, 1, 0, 1])
    +iex> y_pred = Nx.tensor([1, 1, 0, 1])
    +iex> fps = Axon.Metrics.running_sum(&Axon.Metrics.false_positives/2)
    +iex> fps.(cur_sum, [y_true, y_pred], iteration)
    +#Nx.Tensor<
       s64
       13
    ->
    +>
    @@ -745,11 +745,11 @@

    sensitivity(y_true, y_pred, opts \\ [])

    Examples -
    iex> Axon.Metrics.sensitivity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    -#Nx.Tensor<
    +
    iex> Axon.Metrics.sensitivity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    +#Nx.Tensor<
       f32
       0.6666666865348816
    ->
    +
    >
    @@ -793,11 +793,11 @@

    specificity(y_true, y_pred, opts \\ [])

    Examples -
    iex> Axon.Metrics.specificity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    -#Nx.Tensor<
    +
    iex> Axon.Metrics.specificity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))
    +#Nx.Tensor<
       f32
       0.0
    ->
    +
    >
    @@ -839,23 +839,23 @@

    top_k_categorical_accuracy(y_true, y_pred, Examples

    -
    iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([0, 1, 0, 0, 0]), Nx.tensor([0.1, 0.4, 0.3, 0.7, 0.1]), k: 2)
    -#Nx.Tensor<
    +
    iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([0, 1, 0, 0, 0]), Nx.tensor([0.1, 0.4, 0.3, 0.7, 0.1]), k: 2)
    +#Nx.Tensor<
       f32
       1.0
    ->
    +>
     
    -iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2)
    -#Nx.Tensor<
    +iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2)
    +#Nx.Tensor<
       f32
       0.5
    ->
    +>
     
    -iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0], [2]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2, sparse: true)
    -#Nx.Tensor<
    +iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0], [2]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2, sparse: true)
    +#Nx.Tensor<
       f32
       0.5
    ->
    +
    >
    @@ -893,13 +893,13 @@

    true_negatives(y_true, y_pred, opts \\ [])< Examples

    -
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    -iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    -iex> Axon.Metrics.true_negatives(y_true, y_pred)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    +iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    +iex> Axon.Metrics.true_negatives(y_true, y_pred)
    +#Nx.Tensor<
       u64
       1
    ->
    +
    >
    @@ -937,13 +937,13 @@

    true_positives(y_true, y_pred, opts \\ [])< Examples

    -
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    -iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    -iex> Axon.Metrics.true_positives(y_true, y_pred)
    -#Nx.Tensor<
    +
    iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])
    +iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])
    +iex> Axon.Metrics.true_positives(y_true, y_pred)
    +#Nx.Tensor<
       u64
       1
    ->
    +
    >
    diff --git a/Axon.MixedPrecision.html b/Axon.MixedPrecision.html index 966c1035..c0fce94c 100644 --- a/Axon.MixedPrecision.html +++ b/Axon.MixedPrecision.html @@ -14,7 +14,7 @@ - + @@ -140,24 +140,24 @@

    during intermediate computations in the model's forward pass. The output policy dictates what type the model should output.

    Here's an example of creating a mixed precision policy and applying it to a model:

    model =
    -  Axon.input("input", shape: {nil, 784})
    -  |> Axon.dense(128, activation: :relu)
    -  |> Axon.batch_norm()
    -  |> Axon.dropout(rate: 0.5)
    -  |> Axon.dense(64, activation: :relu)
    -  |> Axon.batch_norm()
    -  |> Axon.dropout(rate: 0.5)
    -  |> Axon.dense(10, activation: :softmax)
    -
    -policy = Axon.MixedPrecision.create_policy(
    -  params: {:f, 32},
    -  compute: {:f, 16},
    -  output: {:f, 32}
    -)
    +  Axon.input("input", shape: {nil, 784})
    +  |> Axon.dense(128, activation: :relu)
    +  |> Axon.batch_norm()
    +  |> Axon.dropout(rate: 0.5)
    +  |> Axon.dense(64, activation: :relu)
    +  |> Axon.batch_norm()
    +  |> Axon.dropout(rate: 0.5)
    +  |> Axon.dense(10, activation: :softmax)
    +
    +policy = Axon.MixedPrecision.create_policy(
    +  params: {:f, 32},
    +  compute: {:f, 16},
    +  output: {:f, 32}
    +)
     
     mp_model =
       model
    -  |> Axon.MixedPrecision.apply_policy(policy, except: [:batch_norm])

    The example above applies the mixed precision policy to every layer in + |> Axon.MixedPrecision.apply_policy(policy, except: [:batch_norm])

    The example above applies the mixed precision policy to every layer in the model except Batch Normalization layers. The policy will cast parameters and inputs to {:f, 16} for intermediate computations in the model's forward pass before casting the output back to {:f, 32}.

    @@ -236,27 +236,27 @@

    cast(policy, tensor_or_container, variable_ Examples

    -
    iex> policy = Axon.MixedPrecision.create_policy(params: {:f, 16})
    -iex> params = %{"dense" => %{"kernel" => Nx.tensor([1.0, 2.0, 3.0])}}
    -iex> params = Axon.MixedPrecision.cast(policy, params, :params)
    -iex> Nx.type(params["dense"]["kernel"])
    -{:f, 16}
    -
    -iex> policy = Axon.MixedPrecision.create_policy(compute: {:bf, 16})
    -iex> value = Nx.tensor([1.0, 2.0, 3.0])
    -iex> value = Axon.MixedPrecision.cast(policy, value, :compute)
    -iex> Nx.type(value)
    -{:bf, 16}
    -
    -iex> policy = Axon.MixedPrecision.create_policy(output: {:bf, 16})
    -iex> value = Nx.tensor([1.0, 2.0, 3.0])
    -iex> value = Axon.MixedPrecision.cast(policy, value, :output)
    -iex> Nx.type(value)
    -{:bf, 16}

    Note that integers are never promoted to floats:

    iex> policy = Axon.MixedPrecision.create_policy(output: {:f, 16})
    -iex> value = Nx.tensor([1, 2, 3], type: :s64)
    -iex> value = Axon.MixedPrecision.cast(policy, value, :params)
    -iex> Nx.type(value)
    -{:s, 64}
    +
    iex> policy = Axon.MixedPrecision.create_policy(params: {:f, 16})
    +iex> params = %{"dense" => %{"kernel" => Nx.tensor([1.0, 2.0, 3.0])}}
    +iex> params = Axon.MixedPrecision.cast(policy, params, :params)
    +iex> Nx.type(params["dense"]["kernel"])
    +{:f, 16}
    +
    +iex> policy = Axon.MixedPrecision.create_policy(compute: {:bf, 16})
    +iex> value = Nx.tensor([1.0, 2.0, 3.0])
    +iex> value = Axon.MixedPrecision.cast(policy, value, :compute)
    +iex> Nx.type(value)
    +{:bf, 16}
    +
    +iex> policy = Axon.MixedPrecision.create_policy(output: {:bf, 16})
    +iex> value = Nx.tensor([1.0, 2.0, 3.0])
    +iex> value = Axon.MixedPrecision.cast(policy, value, :output)
    +iex> Nx.type(value)
    +{:bf, 16}

    Note that integers are never promoted to floats:

    iex> policy = Axon.MixedPrecision.create_policy(output: {:f, 16})
    +iex> value = Nx.tensor([1, 2, 3], type: :s64)
    +iex> value = Axon.MixedPrecision.cast(policy, value, :params)
    +iex> Nx.type(value)
    +{:s, 64}
    @@ -292,11 +292,11 @@

    create_policy(opts \\ [])

    Examples -
    iex> Axon.MixedPrecision.create_policy(params: {:f, 16}, output: {:f, 16})
    -#Axon.MixedPrecision.Policy<p=f16 c=f32 o=f16>
    +
    iex> Axon.MixedPrecision.create_policy(params: {:f, 16}, output: {:f, 16})
    +#Axon.MixedPrecision.Policy<p=f16 c=f32 o=f16>
     
    -iex> Axon.MixedPrecision.create_policy(compute: {:bf, 16})
    -#Axon.MixedPrecision.Policy<p=f32 c=bf16 o=f32>
    +
    iex> Axon.MixedPrecision.create_policy(compute: {:bf, 16}) +#Axon.MixedPrecision.Policy<p=f32 c=bf16 o=f32>
    diff --git a/Axon.ModelState.html b/Axon.ModelState.html new file mode 100644 index 00000000..a42ce760 --- /dev/null +++ b/Axon.ModelState.html @@ -0,0 +1,556 @@ + + + + + + + + + + + Axon.ModelState — Axon v0.6.1 + + + + + + + + + + + + + + + + +
    + + + +
    + + + +
    + +
    +
    + + +

    + + + + View Source + + + Axon.ModelState + (Axon v0.6.1) + +

    + + +
    +

    Model State Data Structure.

    This data structure represents all the state needed for +a model to perform inference.

    +
    + + +
    +

    + + + + Summary +

    +
    +

    + Functions +

    + +
    +
    + empty() + +
    + +

    Returns an empty model state.

    + +
    + +
    + + +

    Freezes parameters and state in the given model state +using the given mask.

    + +
    + +
    + + +

    Returns the frozen parameters in the given model state.

    + +
    + +
    + + +

    Returns the frozen state in the given model state.

    + +
    + +
    +
    + new(data) + +
    + +

    Returns a new model state struct from the given parameter +map.

    + +
    + +
    + + +

    Returns the trainable parameters in the given model state.

    + +
    + +
    + + +

    Returns the trainable state in the given model state.

    + +
    + +
    + + +

    Unfreezes parameters and state in the given model state +using the given mask.

    + +
    + +
    + + +

    Updates the given model state.

    + +
    + +
    + +
    + + +
    +

    + + + + Functions +

    +
    +
    + + + +
    + +

    Returns an empty model state.

    +
    +
    +
    + + + +
    + + + Link to this function + +

    freeze(model_state, mask \\ fn _ -> true end)

    + + + + View Source + + + +
    + +
    + +

    Freezes parameters and state in the given model state +using the given mask.

    The mask is an arity 1 function which takes the access path to the +leaf parameter and returns true if the parameter should be frozen +or false otherwise. With this, you can construct flexible masking +policies:

    fn
    +  ["dense_" <> n, "kernel"] -> String.to_integer(n) < 3
    +  _ -> false
    +end

    The default mask returns true for all paths, and is equivalent to +freezing the entire model.

    +
    +
    +
    + +
    + + + Link to this function + +

    frozen_parameters(model_state)

    + + + + View Source + + + +
    + +
    + +

    Returns the frozen parameters in the given model state.

    +
    +
    +
    + +
    + + + Link to this function + +

    frozen_state(model_state)

    + + + + View Source + + + +
    + +
    + +

    Returns the frozen state in the given model state.

    +
    +
    +
    + + + +
    + +

    Returns a new model state struct from the given parameter +map.

    +
    +
    +
    + +
    + + + Link to this function + +

    trainable_parameters(model_state)

    + + + + View Source + + + +
    + +
    + +

    Returns the trainable parameters in the given model state.

    +
    +
    +
    + +
    + + + Link to this function + +

    trainable_state(model_state)

    + + + + View Source + + + +
    + +
    + +

    Returns the trainable state in the given model state.

    +
    +
    +
    + + + +
    + + + Link to this function + +

    unfreeze(model_state, mask \\ fn _ -> true end)

    + + + + View Source + + + +
    + +
    + +

    Unfreezes parameters and state in the given model state +using the given mask.

    The mask is an arity 1 function which takes the access path to the +leaf parameter and returns true if the parameter should be unfrozen +or false otherwise. With this, you can construct flexible masking +policies:

    fn
    +  ["dense_" <> n, "kernel"] -> n < 3
    +  _ -> false
    +end

    The default mask returns true for all paths, and is equivalent to +unfreezing the entire model.

    +
    +
    +
    + + + +
    + + + Link to this function + +

    update(model_state, updated_parameters, updated_state \\ %{})

    + + + + View Source + + + +
    + +
    + +

    Updates the given model state.

    +
    +
    + +
    +
    + + +
    +
    +
    +
    + + + + + + + + + + + + + diff --git a/Axon.None.html b/Axon.None.html index eef9a4a0..cba16db6 100644 --- a/Axon.None.html +++ b/Axon.None.html @@ -14,7 +14,7 @@ - + diff --git a/Axon.StatefulOutput.html b/Axon.StatefulOutput.html index e88b4517..135d965d 100644 --- a/Axon.StatefulOutput.html +++ b/Axon.StatefulOutput.html @@ -14,7 +14,7 @@ - + diff --git a/Axon.epub b/Axon.epub index f5860d72cf8faf1956a79c474934df92ce2bfc7d..9fe42e7ff64c235554aa5d16cc09bb6738d8e393 100644 GIT binary patch delta 357599 zcmZ6xV|ON8*F_oIwr#Uw+qP}bThGP@qxS)q)t2Dia(xuN6<(L6V4!E2s{&V$)?Uvr)HG1`?8%?x~2^98> zg5LC-Pf-O_Al~VVO@~RiNj6eepL-5rx|7NZx-wNR6YqmbQnQYyN1Vgh-d~f^cX|E( zkKgdrcH~WzCSOF+(vmLtsjI6RQY`s%NAE=H-dQE->FJXoYoZAzDQaZtl@f_O`V=yc@_VnGIMjY`k=hdU?zl2p7P;C*j$U6_h)&!L2pFvwRGbodp$if&mNlB_t(k(3g_<^&kwvKHl@GP%Q3WYN(~dvUX`(w zvk*L<(v@Jm%s=Rcr_HI9f5v>!!^9(B4<)h($2K!fPIWMCi+T&#Hr9vV5sCms93gr` zV)P6PYZ?t1GXk@{2k;EI;4PH~wq16H;usl|N~CV{s|d8^#a-3vvfDz|@5gA@b;8(> zVSOC4UoOoGLbLvBlq??zf+~9?v;I4jAd%TRss_jQv1>BJVR=}=wIz?(CIkN5O~(33 zt3NRc-vi7qKiJN{Sabls(`7)PCN%j3>ziW^EV2T7o4qaPJ>Ir;n~KB6t5N}WtKb_! zD&L=sT^bN?EZ_xg)G7LKEjZSUr=y1K00z0_f2XK zRYpip5pLMvEFXO3=ejF0szM5%13VGaLDIH5Ui;6mRGJ^$$u#|aq&(o?>ok%M%d|Up za7{6V%?0YvZG|OOm%p&|glktB&SHsKR>kH+RKEjdC7om`8>z1L$y4pY;Lf3jp~UrF z*v-)~3LYfkTJ`F7{6+Qg7hlj}9*afgRw z@Lt4QvkUydP1uoRtQF8bWXVHAcQ2u54lwkRq)2z(6TBuLlQf|WK-Ysg46~YXu_60* zX&3?wZ`ih4(f)j6dOit?4~3iaN(rdbce-5Sp(>u7qI-t zfq21y_1?E_y9NQwk}2(x+^TL%NzQImwTkoVhM4=Ii&>f4QnEg;u)UiZJyu5(|7NH_JVtt7<#%pSI^zIN+4U%DxO%&vtIho?W}=x} z5A?Sl6MQgHb%@=cjRjhxk3#IR=IkgHNK@Yo69*e&n+gg`N;q^j=6b@8=nC$uIQW`R z4LMLpjSlQBMq(8>Ac7dD%rwMYaqTD!L+z~YTn+fGbI7jHz#%D}ATJn~inDAn9|LCDh(|?2x zc*i;GML^#`QwsG6ZZesEL2k$V+Gh%{+1c#sz+K?IJG>5Wy2H%xa1h|3u>~!_n#W$@ zrA+}QgUuE3WU5z%CJ zHu?1f*idH%3E*FT^9R!A$6<$^{#;x_&&4gg+=IBHDZDs=0r zfPR0kwe%d8n9%&rG&J)#1hu<)q_#|LNbiSGd+vjfs$>s2FyeAI5Vi!rJg#qbqErsc zkcEA$R>odG2tMDoi@8iRE6IC;s5NscVIEzL1ccl_0pLp~li~(!d@iGoz$)@uE<6b) zu@bn{m)4?>ZH?V={-HzWbqWyK-wzUf3;<@@l=qq zRN$-Zb8P6kz|$^7G;%T@$&##X1zN!NqVOy#;GlTHr}|9G*d9- zLW#o#HS&i1xI1SNb3&3mq>2R4>FshUrPBxE@YhaWg`n(zVR9HNe&Of6`8V#9MCng5^%fs@>QjxbeH`x;@?z9p|@I%?rO!b z%o@CB^ze!UxdY#}RB2!|9y)tBl(cJVd42f{OS~RYbA5zhCR=S)P{ju5XOccA(+}PH-k2{(5c%${? zN}0-%usOZ{)v-KY(v$&&O=4)p*__rR31o7HpQ07!H3*fQ2FX(=qE;LWOd!u8@2~P$ z?k{>R4=qM1d?L!=?Nw$BZ|SpRA08YgC{VNYdq!?cpKsa;azygv32sF4F=!nwSU&;T zw^=N5$3%9R#B+VwEiZbAaifrlA0W-dt_0xxJ2T?=K+TJq2p|BSClLG>IPVJjxSn}& zzT#4G><08@U_H337YCF1>8bJQZ=40+d~r6wJ#+X2^JAc&?6pL`C*j8kudnQrvFQgB zOJjLrqwsQla-*{s(F|ctE2aJn!v*%6qGNC*qAucNQ-5Kxi2@aM!?xqDGb=ir&cX- zUzeV9WwqQk+YQ^MyKK+Z?+EjHh2P=#A<3?)Y3iZL%&W6HtGi~ZI;nT>ESvB1M^NB% zwY{b~?kKCuaA%hl@Gtv^t6>x008LGeab4l5f8)a(Yxn6S-yp9UQEg88u6-C2P!a8$ z>@f3pcGREancn;8H{+W~_&{N#mz-*s=;oiBf?m6@s`bv)vh5qzuZ@u3 zhbM@NakiT-CB2&SsZ2S(zt?A1)z>%EekJNcJXi$O5lZ$=0R+$hqpL`MdhdL_OHKhV zm^kY3Wz|FmbzCM+EHU`yw^$v=Dr>b%I?Z*6d*6mYGSCh3OsAxFoISg~ZbwWeV0p^O zEPSf@sJrdHf5Q9vY^>w>)T{&l>iTmjV5;}0Iw#ItL#fs2!BApQ+c7q&jq9WDhc7vY zDT=oo9wF1UuLLl$TEFf!m;7|v-gnuz_0ja~@OCk0CYD#{u&d8wF%nJ0oYRx>?`Z9h zO@06>i&TE@$@<$AY|50*ESH7`IWdi*Rvk5U%2g9u@%9dkBAOkcTuz(Gp4c6VF8o zC{SF+qP5kqGY(6z5?F~y!n!5S#i$NR@F+!unx2oXU8(Ilgs#IeO&%C0)y+^yA_kQ* zn_l|_$ntI{XA_%#Tgj6W-7z%OzcCh3}YK{XM zZh5x|FnUo#1SXQub`02|*f&mR1uMPi9RjtI615~R6&@w!KnV)PNhQp3qWY!{9DBG*B_eT1)pP(>lT$FC$ zFKvOxjDb_m3{xK8Tzsmo&AE+^V=o9<#MTHMn1g@TsHMXojPtOIi?q!nr4WpBr0Wtp zju_sp$gLaT1V#(uq1r48QAB11OvHxdpMwb@ut3AE7Da|#iC1f3W;hx7-6_xU1)MB{ z$OmIScb&=2npGN_l$?Y}3gBO&iJJSU+Y@8H?A8~*_gW_h_K?LI0$U1G>J5^JBgd4c z5>MHOs~1GE5zkDBlA zCgq~H4uEK5nNthhCD4$LUe6zespkq<#`fuBh$gY3jR5tnEdj;c^u|79Q7HLNOCr*);GvJ_$qzN8Sv?F5ms z!h%v}vd8MkS&(?E0+1jFlME{ALdZx}7C@*pM@Y+8DrK-rQKg`&5JC3gu~o<*TSAM& z%7qj}KZU7)vTMr70uxY>7J_6GK)*#@qz>~5Qjw<)%9_;eyG;sGQ&cQ787j>q>N9oY z*<3;*7DXOsMS&MaA`81jr7Fh=7g@A;VoTn+;82>N@C9+>1CT}EURHZygXP0Rl-~L; zx^6#90%yLL(r$NFF=D?3;~mOOQGN9h|79yl&_n^gJggD&BDPJX9{C!8hB=1*iEBfLG(_*?W~hWvFTJ$TASrf{J4$J@4z!lK51EFT>cC3p^+1QG zBeu+NrP8+p0HWi4^ANPB#kH{607&-XiJGkYsnfL>gI?>IhW%Rbip&cU1lau0aP`pH zmPeTMPYiyFqm_M~VF-?U=DYg*;#n5o!HW@Wu z=M~IB{Uo^Tv{oTPs}=ph1#rnEY`t+@`iW(E<#Jqd0LNhFb5sOxQ-qnuN)PjQhEMQy zw@WihD*>Aq`9%VU58WS-YZCf)4E-vcyy`a^q0i0Rx?+R7DdR^8E%ofrUfyGe%eIaH zNj=v9Xx7NY-g?_hoRk7Qeu=seS`NEhJ`p*uh8>FUmD{C<ok9d=Hxrr9VxlU+{jd>y@VdMbDjVRng+YjTGxz&O3^rf?>Cwhwl=mfkkE zRDuQn0RE%*AA?dB+rIso)7IbB{H8Jc#A;M10AkGx?Cb!8jW^RkqD?a1kebefg9+@! z$kJl5lzNF*_wyYu2jqUvuWJ*CgqkuXBy34GyyY=FTGFxH1*tLZx~|rH2K%{vzR<*C zlcU>)Sf9ztpD^r!d7ruW4xg<8{rdEATd$Gi@wvckgicFD&IDPg`)2MSNm89uLmy>( zKr`c(?`MU63$23mjmI|qH&405QbYEX$+n+YpR1cB|7M#o$J}i*&%-+nj$%rv6JpoD zbd?Ji5D+^bR4hDVJ%crUCJI%lpv;$>lQSS)g7v{=9af+f>ZjnnQIvq-5X6z#+!WD-rfSDhCGkf4YUhntfuNwwcR+F3BRao@c(&vnX zAGJBKzMs%9;s4GJO}Y3bNu=IKS`yOJ5kWr>P9#G0!>;n)S|k=$VDS*Lkw{JG`#ov} z@tPLtt@Wi4BVtGir{xtjJ+-@)E3fCqDvLRS_H1)CfvddcXtSlZ$11z(k9dH&$NkHC zes-0=^R?gX`e$_&MrcfBxQVQrKmD4W&PeT=8hkuaV~~^iL2nM~9OspDcyMXlDW+5= z#1WMMo7oeuS+Oz&T4ta7#!>Y1$xuIs4nSX^3)3K-?GEE}#6>`3jQp?j5Jn8vqk3NCvWMM^FlgUCSp+`O#X2Ay%2SSfa9-s{hlGIb83PbN` z3>pIspA*h0Kt*JxWxC2*q6JZ$5|y~RuHs(i?3(9<$YFclKs#aG?9sPXk3Jym_sR|V zbi?m%Mc(s2?faP*+yUC%TSmMwM~hNWr&Q$sa=q10LW?7pA#VhZ!QqE6k1B?Yf3&+c znTop2d#sc3RB2KKk%vnPCbEe(X3*|gan;<`WP8L&oJ}VA#!KY}O67Xy0;1(nu;``7 zrT*rf2c@kL24s>e7zK?H9P_Ta$aGiY~aGt^2s4-twAhgm4eniLhY z71{oI?RteM`sFY}O$wey)YSRt{)kG6$s)obBC9|Rgpg6c!!c8%nk7DW?f$OOfVknr zoYxwPD!`Mfy8>eFreo7l)+sG047KFvwjrjRozL8@Ny?QFV(USIgw`RA!5O^vh$G2Z zLddR%O3vA?L46(FkbqY3!W07HVG>Oq{Fns=@d2Yjor5k=lN98+&L|HQIHr`MYDQ|L z)1oR5UqMUJDo&!Vz$S%6T1_%=hAw4@r^00ut(yt+2mt-{D!gLorqd$qi8<(MV4v#1 z>631YF?4fDBKM$ZPzU`q1`}f;r9KTv0Pz3K;k4G4{SgO}-<<}Mw(9~&tcbJc3N&|6h&>_abl4pcAt-m_yN!TBfB*mL zFv+C}8pUeG-Y!GsizJ&0LR_OBBvWRviv~mrpbHYzE);4ApeJqQCPpC&2r;5vDBpy! z>P*B@Ty{oY`YZi6Bs@a+dQxRzhX@aQEh&(hrND{InsOups|G{Z zSrMZUXi=N=01V+c0DahU0ELl!Txo}n4Nf9Gi(w<8&dwhB}BoK&uk0S$VV zqhvTeZHf9&vYkHc@g&pgQ;8QtRVxZhq03(+yZM`?0R;eukVy#4x zM0rgM^BIMJRDtaA3h_!Ek!;4oO5)N55DnSi(tBi}bM2BW8O$#$?z+pyL3}Nw&+@=c zU%Sl${Fq3yDV?nZ_^~!gO5C>t=$ ziEBc)(a3*psz*rF_^J>1`8Ab6s?ZE`|lnzBG(mT2e>tGYUBUpCwYDgFfK0Sp$625gkC|*qi7$R%d1mA?CI9liTy6 z&uV1w!f3vx^GNDa4P5TQ;?DMEOXytGn4!cBf7N9_1A(wJ1}LY2og#dDW7xgb=dt2= zh7`YAmq`G*{^9%R`SY@@h}oL}np!M*^rYZGY;x0>=h^99vrhw!igdHZJewC5D@F$% zMgYaNubwR)|6dq^skhf6^6=xl>tr2?ha#ypdyy=^#e{61Ca0~E864-#1izCujfHp* zvA&N|s=q5cecYxFSu|2E8o4Hky-&`B^2w*zees9o&+)W`hzTR6ddFJ;*ERJzidQk6 zL^zG4vVpU>lJ~X>1gT^UZYleyHsag}w!OqA8uyNJirZDSS5f68j=IaVRoX*(ZJ%*8 zFBNoboaEKxqg^+L%QEY=SjmZ|AbxqLzB&Gr*hI>bG?1wc=v>&CvV32 z!gKk%r88WFNuRZ}5wi<`p0=32guaE;QG=Mt=~uf2p`!%{`8n0mUF9E0>?svZ5Md2q zWhc19-(kXG?5x8t+uHFxk6uy4%-zvJ@^`oILOGiDPIonQW9cRHcX`*|Ejse*XwUkJ zXekZ2yb(p7@!-9Fb5m72{tXHO!BNopy6;u;LrzZ^DEMcS?lOIVD_G&PY7d6!x@*L} z1=6QSGL$NN&7|2DoCdqY-ke$EE~A&_3-1}yp@XxKs=f({M|!1dcPPq8PCzR?X#cd-`(-;=Iid_BAm$8T2Gy| z#4`BO5%O_Q$`|SX?vMA-$lgI+2SYr16bWv-!xaC%pQ{FNVn=0nv#aFkYqUE8oWtkr zd1DAO;_~X@jNU>xDia-%96%rR@R!Ok9Y7_0j9c@gT%Yr8I!fP=(8((nc3K$D@)NnSc_B ziTZtEBa;EP7oaDjB2&YdWu}uh3RI|%hvVxdv?|8co>Yz2Gvydif6Ax2Z%&?33w-s& z-anB3uFa-6u%^V@%p;VoUlTc0@$Blm6*UdyC(sHapSDhKs}1N;UG*uV_HB#sQtJ=_ zr<+!X;e`r9M^VXl{#z7M2W#cB&X6prVOy3&=&%6XD?cF8rZzxizzG*>mf&bxH6MfH zr+@_Niq89OE*>dVf|0kbNxxt!wvhC~uG~YJZL9yb)B${^*~KqD@11#Q%X}~+6a?jv z0pPe1EmhPRVYPQKWea*+AjjBBbx0E+x>wF)Svq=#!;*~Y{zT}1O|6%3tsy|m*N_$z zH5>rO0%x2eNm*Caq%|9Kf`|rxbNIOw7)Gahmd$k|baV*%>f;PHBMNFr@x72PBpntP z%H$1$cz0v>b4axZec5H6kWQ<)%fhjfvS!}vSL9ih%eELzj^?FZvtMdAf-SCQlA1|; z=nK`ETPbaybu61 zP_c@}uqX^OTU9S;arz^uCKRD47XooU+jE-8?giT}GleXHT7&f>EXJ=Z-)BOv2PbXG zAwXz#;-^|?J$s-P<_2YlaNKjO>Vr`Y>0;o-cWQ~JhW5EeZ5Gf>C#%UqM zMf6al%_mT%?oQ9qxGxV5XIn&`R9FEhzxky$edKWOWgOu(m&CE^c;bNA%c znxYudVs=}F^9tt6&-h9BeC%aH%9HWdXzTh*YG!{i*b-&pWak#ZlpuM3Or1?=@jv<9 z#L?AgCyeBE{ZfMOk&u&xTWGmw(x}305G|r*YY=D-mCUvjQfgBcnWxHH!o!B@WW?u) zT)m7gRt%GF9NvEh#pyocleSjOc1BnYZZjK zc`aDLh`p^j%W)S#))tGqMhs$XQIZFSns&4pha08D7KsehGOVz^Y+;AW&U34B%N94@ zIu%=J+d2im>-hLhxV}=Qjd@@Pp!%|P*WR*g{n;q+=e}+8e}VjeYa3Ai)i$a$lL*Ut zAyI&U4uXMzDE?av3Sz>FDvVwZZq5egR?aT224=|#BpCnIIkq^E{;PBBYu4xlN;h3^ zM(XJZW|!%N*WtIh3o^q2lP+LjMyg8Dv^C!J-%)MUp+-1YPzau4QXcNy-*(4)NO*nB zRKJ!J@_##rDd$=&EG+vLs^vMc4_B^^l0l_SmG zz?apCo~-|SeRM~-*W-Jl(WNs?lU6e?Iq7L|GJKIexJBsid-DrEAsehs#lyow1g?`( zhDoAT%tZ>5)B-(Vf@ciEW^-i~q1mJpGVVTY9ZCdG{25xZz=i?6qdhlCP7kXp5tNt~ zQXK=Z6by!SR74MP5c7&D&fkE^2@79K7hHDmo^=}OSR|XuQiqK}DT-`QD5Cp=D(}gH z69+kdP;nQ^^|7l}8DA`rm6FB|t2YDh_~em{Qo@~uv~ScrdR{piE{-^`dNfx$KS#*; z^oV`=F4C_daDI5#-}bND@ZBQ4hHrxgJj?>{Pt9po{EyOQ(elkm^*%;=toxF`9|9Tp z&||1XhXu-!A2(jsY^w^4s%d%kKDfmXtwM08dauh8Mp9=54(~jnwB-0fa#_mFm3wCs==T^cpY5^{uosIy*LK-(t`9}!w35!imNFMJwlc?1qQR+^@ zq6jQ9>~iS8z@^vJ#vi<)T(OnPqIHQFk}4I*Jf&;Ng5-pN=;aJO7_Z1v;Ud_=rA>Ey z@g`W{pV}8?{*THKy@)CiQ?JAYyvq!bUz4F&mF^NO=wJGdeGR`0rJGi`fa^7=QLX_5 zC_v(#)Gd_=30pW;h%T-n(qxL`mM>~#f{>@UgCRx_sN}asG^H4 z{HiU#BNXA_+axOqb^!k%2&S^aMbZyemwR|Y3eyIFh)I+oDRUM9(VkaWN6V*+)pdWY z1*?$f*7)KGd^wVAgqa6N$O5+NxKJzHII3DC*9t z%3bVaJJ?nm&1+CPEd~sM;a|s}F1mP}BTY@$=gw4%A(h^dt7TZT^Q-!p9d^&;QO6jX zNu3O^y@?r#3O5$F*BAoaj05fP z_$!_TfI_l;&gl0@ZY3;m1LgQMsHy2hgxLU2&^0@u7HOLc&N5Eb_&Z~$M;ayzTu>Fg z`C7D)ajUUr4-Ax99(DC)=*>L?cF}trwC%1zpr_gj`Q4)ne(f|iEQlXwe%w2FXN7|S z&aY0_C0B|F!=~)zd9?74b0WF3W*dQv^u=DEekGtTw#Bv8l`j!QQ+pf)LBZB(QRhPb zT85inAd_J2x!mOlDyeV!^=s6fuaD8wTEvG>b;iwLrwE@iMR&8+_y1L2oBAHKAgmHn~xi;XON_zgfKGc9D5ho@{C2J^zP&>Jc$sHn7&xdJYiY1$d$Rli%$)cjmMhZcJ!J$+f zo%GV9?r4I`gZtt;`QzN~IsBLOg<}G}X@kFg;0>&00t>_g`4@_f*XGhUE{LqAxYY0g z&O8qS0ogT7FqKi0Ape5xi4=(-VFEb8EP;y1#EsN-{U8)k!IUv11Lw)IEDFF*O9*6h zog5p<2bB<#lmyv{SW!FsI4uugP+5(H=eUN?40nrZoDM?8#Xmu5;L{qa4yg zc`3bbU<&5b?x<22MNN0S`CyDE|lhh*b07Qu2(3y>jX<0Q-eM!`=(Lq;Bv2;+gY7OqmH{jsH- zr0z(AQOKJZis26Lx{&l!EX|w%%x~06amE=;iYNvaqs3_HoW}4;xIvdd$s8*G&ARm^ zN2Jl2R;Ky}im2+D(InSYBUd6u8MdG9%tMt-P8~9!rN`wrC5z@q5Vq{3Gl?3T0d`Et z$-rI3E;NlLL0}I=3#^nLj*()t>aM)QCeB@#O%N6^YFT~$lw!6=CG0B%IR1M>XZ%9_ zU#6w0^B)1A`R~;esn%!)Zc=!8+MxpbY%Dc|7di|hS+bzoq>?KXrNIsVe8%B%I>9Pc zp=ntX;AI?hhwpXzySDVl8h4c`Q%?zM&%`bZPgTqLNk+X@eUn+`EkC=tMjW>5UA-#F zcCAiP>ATemWG$yq&jWO^t5OQ0JaovmMkuSht85XfZGG^e#WD*GYjm9=4)-K>A9fyc_A$uMNv=FR_^ zEq;yUl*x`yMB!3t2;fti?8%0ZY>>UNAR=yqfN8O@ojAP^d` z)wb32IZA4<7o?d=a4S!$e2AiX?8me9xYfz&fZ2N-W>$l}_gACRBef=~9|zwikDF!B z&i<^${=KducL3+t{Zs}~@yr#0FKU#G#Qp1UTUdM zo1CT-4){et_a`_s$S#_Q;!R3FDWrJK+)Jzuxu)^(!T)|!#JI|LGWl+J{QOF-PpWZT zf#5w>t3FA-shM!%i^`u9$n|AOA1QI}tBlGbJ9hGsdFe<`-lW`3{M1Xkoe(e+Q5IcU zj(bb$=uhsk@^I?){?kW6D4mohc2iC93^tdC05CIi61{(^IG!tesi2-4c9eHEYc&cE zn4QfN;rY1ocrSu|#Fgrqwy^9x0 z2Z)t52$(47#O{0M%RGDgOqdD`UzLnc#J|pJXlX-=4|k^{?={0d%rCi1TGyTN8}E>h za(Jwq3=Wle{Mluqe+}REYVUnR7nydEv_y%78gvV1+1a6k1qzQS=n~<0EOQ4yeiWSV=Ue*PJXg*c*HJeMFWn_WTMlMh)kS z`d&t}x@wc+5LxQ;Gji&MfKLOt@_ivm--!9pBa@_ZBE9PQA*V9a-;EV}L`R(Z{olQN ztvPAG#e~%RN(+T9r%mJ}Dy5$yCg?Izd4q@+Iv^P%7eb*H^*5;PS6}(kxVr>k%bb)5 z^gIY5I|H{j?fCR2>3=W^oFgi9KK>6&n2|eUs`)XwNsx|vZI7t9f{w9gNG4A9ch6^% zcHVc|oJax%e-6Ap%A^UI@n6y!1#OxgYxjS01lmHL8LIZAG1!z}3zraHG8ak`to;urXPsvF{U% z%RLxN=ZLGB`yHCbZ#$QF@)avSmRxDq1*#{YMTtGv_(2`=!H}fzCPV-Tl+edlhQS)+ zPQXJ_msX-@PWBc9ij;w_z#-kwn*dLMg-g?fNlI-sL!jy$-9(x>1DQ)4sp<-Kt~WXN z1cZThjj91Nkt2$F3c`#?R}p9wto+%S4noLQo_cCRkY?;WXQS9T@@V5ka4_RW)0awA2 zbsf=@8t789xoi(@aRU0}UZwtVlMTxZq{UUOpX1!)L9`0Jv>83gEJVx-wf~f1=pu+v zm$fd!A#TMHlb=C$BagUhv-?6CLWUd0hOr|%69K^eB=jS2Yl1o9XW;g#s_%DphoG(l@7lPx zr?ncDjr?JtRoe}i8wjKq6S3s)%6-t36A0avkLo}CcS3&)ap@4-Cm;A<6;>8!rr zj^OcyQ3=tfSbd~%F9)Y|Gt(OsrFFv?T|d>~_LI?jxE;yE;crrP9f8|^ z)wFEGKW&y}rZPAZ?V|>5nL7 zzk{h?({eh=kdM26D+)yb%IR?vuEmXp|3=?;y4+2B%B6U0@)FH5S%JIsfK~!=sHy_Olt#Gu|MTc={A-ugcY60|rf6HbApot6rdV=YSB@h(=cR%f~ zK4$QiMSqqJx6vDVKm4tgm_1IrsX^Jjr{@&Uyc_B_rvWAuPPnG5IBOq;O;^wCZ;=&a z8?s9{P?8?-)0#dT`#y2Tpw$*wXsfYGO&8l$f8Lgp4~x1ZcV1Lkd%;f5rce7t(s{Ao zPbZbYjgxY2Zc`clRC`{D1xaT7|3)Wn05I+30X9a!e`qv@;{RH|&SWVMDp^9N-9)AX zwz%F4)LZBX3nr#iQ<>Vq-Z|CW9a97GewVvJ?j|L=nv(ib>ZK%kyWNL9%PC^O*Zp(w ze3MP+AZgsh#?w7S_>fR4Jl8Xl;fn)Sz4F%}v`=-aKp;~#2=dR-L7|oOsPifFLE_Bs zY{WOfLQPgnfBWX*;L+EJ9p1Y)TJUr;T5zAn$dy2AvN{G^`Y?oMW_p@-x`_8hQjwo+ zyM)f06jlsF*Gfd6Pg015+k*O{E4rNzaQs>i;^8V$UVxqLB}0)>Bu*1QqKri`FflXS z8l%K?{rO$nm=f({yvi+!{bCrTX%kPCHv0!)w@SSPZBhoqsbtIic&eKXWOCb-BH*}! zzy0+!HMi&MbfdE&-57^H2fskGi{9z?E%$b#VF0-J1uoD9zg2W~4|@Yc7u4`@?xS#$ zWRj28SKD;CCBz~urh>95^5mO{wz6=CF z5)N|UDKFxoF=qUT6A0*cbooWbdq&*B)E^1uV3R}So8e!L1DuHvXS5JvdsN8At1ir`{R94Zg<6W zvicdPuM0!;cZipA5N)hAg@mql0_U6R`*x;UwjF4Cwj8>2die8A?R@pyEuU;tq0YN<#igz%PK>i5J#7 zXmaX)71i2FVHy-#adxj_`YG-mWc^M@Q@x=HU+>_cvlT=8wk;IjPa0dPPA&BB0HucY1?H)BG7b>Ro!|I z9YOxrNWpy_FW91#@juYvK8e8^w)*xqx-ijU1(?7oI5e(cpob!)Hf`SrYt9UWw<$0q zL^=kzY>$Y*&6QcgDw(@^gHhSSklip4e)V!`0~uZY(u+7?cq|ogBo{#P`adG#s;8Aw%q1S8hFu#-hQjdi2SLSLODJuz6tZLZsrGaK z-1ZkQ&_|;VnwXLmxriId>$)SL+HLsY&48T17Vt{%E~dn^Wg|u;lr6LnEQBxBF4%Ok%pOZ^M5a^KKN~Y*H1QzeE%48@esxjNkxM@N?0m3 zXBn0v$~UGJ6x{=u?OXMxt@nN|NNOTWu&2}Q^q;J)D`?uy7OQz2ZY(taYxzc&;72G> z|NVC_0u`K?t1;Yx*%{{SbIWAGE&{XwOY2v zIkwkBINZ0#reib4jnFCK*!im4x4J5Y?@12H{=MJ9p2c z!~#dz9mfM4b_{pz%Ho|Q;(VK{r_ z9Xi9mI@A7p%3@XB;l>nP+)nhS zZRY@>5vcJUpjJ_S)qC^1J%`AxENufgd=)w6lwG@te%L zts(BO!da#w?G>dgxg#D*3%Epb&RuNjlh;-T0tLQw;l>XdJPuG4$BiWHub0#fJO|{( zT|SQ)|0k|zH1!;^CDHum>N{js>@=jm0O0FVmqa&5eaDrI)oN;T`^IH~*leuJFSjrK zclZtXIUvmX%i>XZ(5HEy{}tFsG%s_Yp;XGAbCY?In1}1sr7_;o<8SdC4h*_~OqnzF zW^(qwoo(X@aiJ-hs9IFz7!8EKY9}pkz~-NHc?SFaX8Vz}95+y7VC}hN1kPj|0kW?# zUoUOt?J+04v^+Nk3xp`?yl>YXt+L~VF$q5>QT&gFx&P3NtO#!lJ0v>=DGN}M5ogXc zuVaXeaAboD-ldsTs7joVLyQYAp|E{MNFZh3!)PI6jD^dmiB1H?eDhRy6iikm%E@~& zI#>ahK1-c;b~?}o8-G@V2qIYH0LZwMUo~#Cu#Y&^9P@2>q(8D{Hw0I0eD>RhQ-iI~ zYLdLPYz=JS*u&}=Ff885eM{~VorZ`b;3q)^LSm3&V4L%Yar+8d0fI3Dl01bM_*m1< z-76SVO(6EpaPaul7GlGoVKwub%;G~24~7wc6UzTO?4-w3QXv_|9jiu}0IDxtLv==n zlGf=ct*Ak5?30jvWji1~iVcrQLN#d~W=Ew({t`A8)nlQT zOr#W?r3=x-(k?OsC5s>q?F(AG5K}E0FqH2?CZV8d@!AD>!q{TEyZN7_*)t%!i}lI6D#sCGVXh)kXoJT zVd3;^uj2QVcVJRpSkloa&x)^yo)uz4ed_O=&@=MvZY0rcAiP-xm7AV|$6;ibGKH~j zn&#ltTSeY5ett;v%Kg*|u`@;D;JiwLS~Qh;>Z?LB)&?gTfNMnj0r;@cN{g?mz7bhn z0DpI3uTIV@dD-jA4<7mPV#Dr5PF)at@mfjHxDk9=kbAyPgSQ{xvb12k|#pCk*aq%cS4f?i?Wan-D%p;lf$Ux<>uoM<^-aVGD6PDdzIkewf-kWo?d{^9F z0*dl>f?3-H0Ckgk74oPZRJ}h7^t9C!n`_9D1t-h^#*U9*&4HJ~i<7+*3qJI^D=TJPwx~^PHTR{p}D({;4yB|Pp1pA6rS(r-?&y09nI1UiEU?R z`eIC|`}P0xreR1~R$u=$X>bgHlGg(rh>CaLbc~7y*!bIX%7)AL;q> zQnrg^9jpV8S|yS5|oWd11BK9AgZu8jS8MEc_9bYjkI9djffiw)EIEWNLn{Zi_}xB%AUta#TE0= z>;X{;hW`&$=MfZ9A#hM#XkgNh-VgNLFlx)Yh?byB43-@fO7 z-}YYHcF8*k_{dJ(KVv!olrhNGK_=WjzkpR}f+B%5DlYu9 z%YX1yvJftX{`=zDc&D)5(PVA!6)a{e_c#J)X5NA3ZJ|oI+9S<0T`0$z$K*z;4#daj z2YB%$1oaqphN+kjS(2e^%Y4Onc)KcF#NMxNGbG&Kt>X=$XrsJqsFm4nyGFp2n505v z2F_^(08f9pH~IuYR8#=B*sk7aM2%=Y>)U?jH0d5zQ;%MEWb=g3DC}fNBBRdZiRh2V z#bT0&ZH|{cuk-T^HpgDeC=$6k;rcLSkWrjuL0PR!O;KmEp0%WnNTJX`>|i|EQolHb zL`9Ir&_$ZBzaw{Fg~m=e6$$Kex#n$ z8mYYgy4QT9Hd)ZB;#Oa1uoMnAgY|w*UAfTl6KxgkQB;f7@Fu}=)WEG-{#m$9-ANe0pK49SL;y>2J@D(T@ zNqU707_aF(N1mQOyl?UHVVQTYl_tB4O?%*gh^JCd#A?&G-qY>p$U*_qaCZi-;A?Nm z$~I@$doZCccyToYr

    CqFLstp+w16Gab|uu>(le8&7f2N(%DKetU4HuoYed`UWaH z1{$~m_5Fr8tUFDi_4@7i#tk+z(u~Y|b7@*x6CdjDXc%fGb-FP@qY0sUpm0GL0U;Roq%QE9$uCKtl+u(yByb` zrk;NvuPTm?UHVw86E5e((q0Ux^Vo>KjSe=!PtGztTN@66I(wSPp?mLO?-*LFRpGyc zO(-GX9$u|s^ml!v(u3a!A`8w?Rvs+~eAS6+xoHR1pg?Cu(+H&6sZVW_9MZft+Z!0` zPmF`rfc^pJLpyH+!-+luVBOMlknulEp)l+tai!ONMHew8k=c)o4S*HRbcZL|`A)PF zANRY(5D9~-^cGpCeeBQZ8K^k5O#tyLg^VaL=Te5n1bPs`>f95yc-i5q;OQr{BeNS# zvn5fq6hq?)hAqtO*+qqqmi?jP>ffUV$@6T zELvd1%YKYSME|U`UnL-n>NQB9&`}$w>tF{RN~SI_j)6y}8*cO_Np2thvT)(XQ=EP_ zFT7d3(n0ODV(=gi_4A)hXGA91%=8<6))|$@O0p1y06zVuKVFqK6%kt(4D;uEUM;;q zpAI7jnlzH<*hE&V$Il!IW8w@rB_zKQV-Q}4&pxYzEon~qjL)A{s#X=SeD16miy^VE zjSqWQ6f=)wV>!8Ec@_9Ps+28pLiUzr&WU_KG>KWjp0L}xQV{V58LQ{2)&CDx&T((+vh@g~5YHQCNwgZ!;#-!r)7Ay$-MMC$irH(+b zEYpsugR0>YWW1^c=S6hP*K|77vrOByjsmEEIim*@E|#g9C-jJI94iyN3!}9@*Ab&K zH*ZnT&6O8RfBX9Kw3R=R6VX0RyOnJ;c)wDD0vco|{q!7}5j8krNry|%C?>5r5zuA+28|c7CRy1%rMnWLNLqFj7AgNCXYf8=@d}) zH0r6(4h#xOz(;85ME;^~i(^pE zWd3?Czg`Syi^Y|#UqEv2o!7XxBsxe(AUNkFj{6>C>Di~0 ztoQE`({p5coy>zh@dBynylb znP=berY<`>&}#s^Q^DohyISxmV{z>JoUhy)b!Bh^A1nmGzi>yO_ zZ={E1CiiB-vW@uxthlp0YO!r8Pg9-?xoN1B z&AtX~41{jew#y^rRc-I{J6eIpgy4E26LM+6({nXnAxu7JL)ZMpGUs3FJy4~IUsfQa z+h5j)-|c%J(`Ilmy_Ztv!-U=Ot(+kd%byE6mS@P;3ru#mFQ)?T?U)bpfOK5LH?jl+ zYn+E5A)IT_l6wK6EH)sk@wbN;`)g+AT}jL30r1yJ69Sf{oGCw10vczc$`R_VJsq5D ztdMJu`1G|;lQpG9hu{a!*yTxz>6Bi#jY;PV{Ccvxm`#0aOU+{xXSJ7pY&o&@Kp*W6 zoJsML`Ij&ET&gB)D+;q}fI{<;m|0^D)-e7Wxa?bc?fYKsR936IBuwLnz}z^szy4Ok zd1Whnywq6gbITu>-|X8*M#|C~1o}#>{RtNu>G)nNZ+nd`Lh$3|>VMzI@p(swc`q;7 z`6uarmAGxg^TA<;(y=AHLs1vGOC=LGA~z+27%1m(mm|3x4gjY+0yc3McQFW82;K39 zLPJAWXe1@OJs)#O>VS*0SrlFjk*&hygZ}!@Ir%;g{7+2PY_)MYW?JE%y0Vw*}fX{F1;`a)$a0AAf)=r3JBQEIqsvtN%KSDUZ%cieKVwOAK0=Ss* zJKGAfrNlsochmjsLW*))P{uAbLq2Q}#Y=ZK6I3SAu2|A)j(M;wddl93z1B4fFaud1r@7$d`aa8wcQg#Hk;v zKU!Lr1qu6K@n9V%n`R~KIiEkzNmd#{+fghS2;(n@grDFj3(ey?ncbrQe8)eN85T@I zCfT3Hp8mW&?RnkUx#+0IGk$wEG!+vg`0^!OS>}eC2xBgCIygay{H*tRz^lO6k8eaz zU%dmYGm@Z*bngAOsni>EqH*9u(JB!sh@A13qdRb%bbIEj+T|CWMMez>ws0e}x~!$97UrqU*?2(7 zpY1qi(Iy?(PI3uW;+^0scB|R2h!2%4K}oCaS$*Dx>6U$T<|!S=ae=VprN>E2XD6N} z#CxhfE^-9`o5Nwh!*xTblGU;OQhKeryX(YGCP_JEKk)w^0qS#ty&ls80Rg;#fk^(J zd)mX<$~75A83$mecqoDNHCw;VKyxj<9``5$m#)W_$ril4BMS7tW(jBq~Utn@9-Vr3aII z#s3&_msIe2K~_~j;Tcn%sw9(#>ol>0IW;u-^r?c}rzTdCs{a}c}axJD=s7b9*4kBLK zezxA5(Es0g?1A`831gY0JDK71)>*GQQ|Y5LtjD@P1Ru-Lo=*;dd525EI|FKe*&| z)hXUbj78Ll)5;cFaRQaGeCdR$Z1ta9xL(!zOLw2N=+9eRRJX=&AM}Nun7*$tB*U{P|AeYYoa%!}&ecWL9+1#PZ9(JCVj{R;+zX{L zXRR_j#Rh*Pw*`_wg8>&h5+Uvi0O{5w8V8R8q9aflC?c=tYhC?-E1JjK>PB1921NMn z=S48ZTO~IYR^NLCmVxbtxWABvhVS%kVZ-$pB@Pu@YKU2M%~Dn-R8>LQ(3~}7;UeUvO zKN~4sXDpV&Q-TEybjPM%_uKz^0^q)bTs^iE0A*1siP~6rEox;B&Ni=eBu;{qwfIvI zP}e^NTXlKWP4OGIw<{l4it|u@8bN;PEBVMSeOY7fQvk9(aWVel`Z5&?qvMElMvk(rGgc^hq8VkHQGVEG*D6MVc`+$?DSHR>UxJlE zh=mdhQ}TUf)v?*nK>9tq*c_Lex)4+|yzg#jo%#;*xKFVjaM`JP$s8M_?OvR=06WP5 zjFTX8_APdJ0yUP{es;4WK;G6RdqRoRu2%8xtjQwB!X>+CHy3m4iFmc`@bxoJg?i4F zfPw>RuK{aIbaHYQm(!LbD^7*8Aka3m>l)^b#k2k+xSnPV9Q-GwAE5-o#`=k8#G`z^VDXGOvp0$)4pRi67(h^OV9Jioe~FF)mC zOKJ9!kj3_kBI@_^)`q45IUi6+;oa60Z&H`;xIEbi$?xeKPSA&O`K2BW=%u-^R9iLmDk5_#j8_sp0Bc8jtw>h2X*7FTMv??TqueuwS+KyIPhY_MwLlI($GPEJ(kqU5+5OKo zW67lwG4EE5h;6!4#PUul^3LSce9E%uTY^>n-Z{nF17N9d6BK<0!ubXkpQy-0mw)`d z6;*FV9+^>sEhw>3in}JjrsSdF$SN}1f1TJ*$RO67N9s<1LyvgFS4H6$&7b3#W5PjF>A zRIXtjqH%&{Uamob-J2Szc=D#2>b~bf?T*7vb3b|ZuHgDf&=vQp=qJ+B4^V;T%Ewhz zHMs)+w6M-S?IF6TM!wi6P{aic4JZiOGfK_{xWhLTtAkT@pXh=h^q>+68qn|Vwgx}f z7J^K}1U&Xscf-hnRH#XH0y~QeN!PEmbkKoQHy(-PdKRZGUtaKUb3^PV)d80j`Opw0 zfHZ5&m4Xz?o!m}T7)`OvtWF6K>u3`MHO4 zC3~BJ*o6zpjG2cMM2y<**M^7Me6>XvloP4}Z@lUky_AMJxw+wCllo-ZMfw?j7zlPw zmNE+Iuwcscg14{8jh|(~l3FKdu9ITvhp0X~W!|;&JB?nAgYFf7f+T-v^MmJI9=iA? z^XqT}&|=U;T$>?*BfIg_JfeWASIc30_=Sn$|W|Xrsn9aqL`CZk*uekL~Lx3x*#H$l-7Q}ba zt@2_;U43>}Lmi*0&K#G4fymyi7N0t1eV}eK>Y+uE6rvy>dG%kVdSb8^Rf}#wK{5UoKSe`1+lMq+O_vN1)h&J}uTRfl?Bs$3%lL6w6P=!lyzro-X@^Ig zW7KFaW}P4hmNyXlY;X$QF&#Z-?I}=}-D49a4p&#vRap&TEWuu$b#}Jd=a)lfCZPhV zr;t1L=Z0DL-8 z0J$Aih24B~uqi#r+PtsLW5VXipI1T%>Tu`ZqIgj0LmWyw*eh#A9b2aNifCQqj?U+D zmB4M5g9@JmBr+~0!AHVXYFQtGHR;7?IR?$H#fyMKbu2d;@Zv?5sa%MrtVUy!5KXKM zbNnlb3doG>P=$)FAPxByqJspWM_uLQ5OJVf0J*M2-rx(<;0@DSh>!l1k9^yRdn|%L z`kX-c@~pm_^7f9*)s1(2^&l|IK)Ak;XXUsiR>?~Gnl0y zqPFsITG3}ua3%jOc-yaS4m;nOs!VW+Ke4lGZ z#GsCi@vK~@bDxezRGNAsSmg48hvIB%A|*&kN@FQ3Y5_{fls#1SPY`1aKqMd7sYcz% zay&Y>Lg$A%=L-AZ&+96<$WYq8UOp+Dx5)N&DKPI9}x>MMcZKz<0z{bS@-WtyS zsCA$DS*zGhCo}LMCK1y1_XnKRW)vaI@>-GDmj@hbdKv7gxD}`YVX{)zps$L;^L=1~ z>a1K|6gEIRo{+9Zs3@Q8tbq$DK31e2RRaQPAdiiGNn2O|QK||`Y6dXEqI5MJRbeSw z2^X~>Ymu;A5pYadoDnx5D4^4hj#c@A8ooX-m0`Jt%z*9APluYGHDO z&<_%AY#!eVr73Yp0Y%=-)aro6J~{Y!7_bcd489OuLd8DZRj7zWU@?F}QZ*ihW>G7?;)E;-$Y_bFlFH$#i?qgw6AS$~`2Z~u6Bdc1!P9~Zo-&S7%vd8TY6(r9 z=oDl?$1WEm0{Sef|F$cHcj494#{e+BfJIaUBi1W!I>kc*kZ>@rUyegIq3j?Mw0al8 z?snoTdPN!a+i^O%#30d?b(iV5ynvU=eh+GWg6Ew67Yw#fc=`lXQv|C(p_IiD)q z_Hb!j6PAabTTbZLS@{{o|HGcU9AEW_H&g5nucNwb+HI(^+RRI3-%5MFpaDR68_BX< z&NegC>2cz3#r^havyTFTcoIGvwzp`o(SNsIY0fr$ zt!_8dPi>uh_+=n~xO4WZ&{$+3zLwZuz2;_WpU^{1=-2zWf139k>~O9O=RYU_!DuNy z6V+C0(5KylxaeJzF1oQ?0}V);Kt|b!1MPgQ#P;~YRNn&|m-^If&%2R@8Bty05AlPS zzJA%IUw1gF1u(SHgP)o)1_$-{u2bKRd1GGxS@0MgYc`*W>J_(JKrh+>4tqL4QiMPt(Epxne?DS<_6vP)3QtBXnXd+e(_F z)X)?a6I#?sqICODjzL407@e$xO5oNUt4<^R2{;9=w#wJFYMgW(UGq$f*ZXC+jaBywzq1T`-(a~k0j&&FVG@C2_VNpv z=y4gijXy7{%sSh48&LY%Ti5l{uVBd7r*i>6?hJqdE;bj{mx;U|(SU9$qKfC+$T~eG zjre;64nRr~o=in6%Ypug4G$ZZa9-su=1WRvN2iO-Kmu<7@-jv}9c};Obuf9TxI=(^Y7l z&bFuY*;ztZm&OV>_K7|bH~}6YXijYe>q^5jYb(Am5))L5zcS(D1j!LT4Z$D)nwJI` z`I_3$%8g~Da9E47;Lztx6xQUe@ztq(dUe`}N8r9GOvZ5DpF9p^Lq^YQ6rE_eT)DY; z0WOv?42I~~cQRaQlY+n8O>u}agx?XK!gbCKDXCrbEfF{MxzB0=xpKg13^4n=)BylU z@Vi^|eKDAQM15dy0*RT{g?<>*&=k;$k5t(mNCh{-?ZTVjk1j#w9Z3GQUZ=+SNU1{f z#J>MZ-2WeqTf$emMOOfj#b(kx0;?DS=_yvh64@=`V)EO)MPmRk&Oo?1;NF-n7SI-c zMcnzqe@c+=lgxqHP%$O0lOmopM-xGy`uyjgnDcg_iwijWsEWbBHBOOrUaGcR7J272 zG{izAds8S?>rO`)l-7?tyU9RSR88#NX|nT=-DEp)Ya0h@VZ0AeIupf0t!Z;vCZHF2 z4G~^22s6h3e)(2o=U@DHQK6BuF61maI0I4GWqu7KlXr>d_+oe@ z&J8i<|M|Y|xlE%Wp*Zh_Of$)I2xNeS z;8%}KSGG(CEw*drG&HGHRbW}UOP2G-VVx|IR-s9ro1V87^IrKC7@wuPTl7hP{7!L1gw7 z>N3|sMgstwNJ6cpe2GFdL(kx}AdYK5pCjxTGY8PZri!UvUnkzWMPjEdd<)Qnui!MTz-8SI=+IUy8Esa|)|4pUz?YPqh3rr{ycR|rby)1diAoakrsS0x~ zY;d}MTcpx9^G!aoP@}4LmbVqq{z_<5={ED}$mj%2b52V*m>#?@mqKrJ3eF;3zd^XLC{Xq%3ZFB)r>iZrn<4P8wf?`KSG~ zP9?-M4s=7vyS<42uCMxZ@Q05;b zdD8}kc2$!};7K{@C5ErBXE1s+CFC@c3;YXz0@sDHj$~RkWKA%muRb{^X z0-Wt1Rk!ugbyWS?-ru^)_GP=A;@o&u`YtY-@ahP``ir`j}UIo^t~Y z;8ar%Us5_R7IFLUeN-NzeidQ*TTjT*^{ZO{I%s8>#%jh*qaL=#)fpYOx^)BrUBLp1 z@%N*90#%65jax0_g}ex(5|=AaAJ!GE-gfuo1BBf#FMbTR$cC3oU!v?s(K1ZBdLC{| zYn&%1qJEYapEKiK*3M#;p&JSk-x@COAR)o*uI4rQ)uL%3c_K|@D4r~}F2vVc0&@a9 z;6@WfI_ZA8zMZI`2}Y7|_PECYve@V98McR_o_AN<@LNI}=WjE7`Ls37+ts>EVWU%J zy8XXyn$L4UA+%HVl;z9ULsj|yfcxgOqFCFAw^7Xxsu0zaGMDn?$J*xV&$ps~zXWS^?$w7gz&^K*$aWT#&GW5Al*n@w1+##K(bnWvAL&{Mtr<>O8xI0IKAW z90*80;LZTU8Z(<5fHxNr0(1_=Jagi^pLe=O4JMhf(=MomEo5=2aJ|_*DTb)_<^i!~ z%f1*Awz%$qWR*a_xVeWu3beSVE5sp7?H(ao9&n>7O5+muhyo^sdmgd7R3pcRadWiD z>ITZg97ZJA-}p(e;^HJgRtwLZK10ZU4Ks&h08t{!v8gO&Ue1N!Qy6HY2e;mCwuD2b zkUS0ynI|yQ^pU7xsY>o@Qs^6|dF+}Bv=)>YPctLa4q5?M6CBUJ?}Q79mt3A1G3nvF zSRWADxIzo(NRs#JWLC>$UI6?`3LA~Tm8zJD66=DQ6~vDgPHeJ33^%J*^|N(YBa4WzLPwDfPA>w;?fpOrMHsGQpdvP6)qI$z zp|I!vB2o=J2T~o8@#sG%I%?G@6e?b2=irk7XmAh{(vH9wA&+_709@F>))Oqiuq-sH zT06Y*5I+^Sq$rfD;7sLUIihhinDdzkoJnCP3!D!M)-pbnY7VR@xt-#W8# zjSf0m+l0NBbu7BAFJXBOX=*>5XvGoC0SWZcy|2y|2*g0&>EsvL2#^54)69AeO>Xwf zt*G9zX= zt1%=o5f}IRR{cUrrD+rx+!WK!-iYw*XTSQy0CkF z68T2pW_^OLP&H=dTT`2Gt#vIsMj1*+lCT&Fz18|=)eMR-YF&!)cs-$>L|Mco#66H` z6p~wO6Buv$yJCO}o205xPr9oFpl!-GUUOx*`xJ&t*lsb-0FVd(ANkd%;2X}~r5{Ax zcV*80A{19KEVCMe%Wp@wkR3rrn>AP2VW7%~v%jIf;EYF+0uOPdrbieiLEh|&?c z0VaJTAegZm>hCQVwK4u*uGaLzhlZM}!?`_ff!C;?zaFtON*Zgr5i26G1pLFU-VsX4 zKlLjdLgZ7@tOKj3hlYA3>85}!c*erf?L07TAKiDQ5mAUfnHjn+P zJ)?xcx* zIv#*_6bGe>ecN>Uzr^1}?`g)?M_41PHmS1+C7JxZRAe8iUnMAxbdosUT)H%hTk@XM`TUM3?C`@cJlJg)SM7kx5ahU6ZHL6i* ze(-a@+W5h)-R*G_a*H|L`o2$;z(f|0;DQDko!5IQO)9apPUpSgx;}`jGPPNZxgft# z{-YW~kXM?{J?Za%&U>!No*R>0^H=sQC|%YB?4aP)5*jzox?1}LCq7y{20d9-?6(@# zH%aPw>tTx*JphbX?engWuOPO%?__zfmCbxg+4#H~5y27eRf2e}0*zSg`!E_Xv;`mj zC8h^(7Z(6c%-ppj);$oDHkauBURpe3H*qfVR>sQt@MV}h$!jhO&oh~{N>DUt>#LeWzGe|k|jEMwRp#w_v7h6=!S=O zuh*HBC14ijvX*h}T?pu3*}g`VHnfD&%wM)X2$yLE{lgmdI;@O+T~2ZXh$4^PZ}zlD zZseTdpZ+mJ;ZVFMYqYR2&>aEPP}^l`X7NisiC0ry{{$CyZss#yG5~kJ=|irI^ZT2@ zWz%nH-IbqaXH_nO|L%2peDBoYO8^kkKL`1bACME~RD*i-u;!p9_&<0EZQ=jilDwxiHqosuFgLXEM7CaT|o7bz`x5rll zRq5HyFc~%I&9+l5BWGuKScE_A)S6JN`;>foPNM<`qz`9KwYuj21NwfofD#xx$H3t{k&HW%9JpLnBjzFCEwVBwe0PCa_LsHe0Yx zrth5w1#iY_y!vJ`cgeaoV7XvKvN${7jDnzR;8jt8c&s150_n)(Mj#&@iTU(d?50wg zl^HLQ?ISASkSUh_;$G@q;uG0LFFNEo#@zH0$k2&Y;gE^HR&lYVaE(-3<-Z2INp1Ji z$Q)?PhWoY(c?N!oI|8=n602NZX*=_pS!hvn=+S4M@q5uZf<;^oFiWaxBX_S_j=dw! zQ)CX$T3tW7LU;m5V_agJG>6UpPG^`GhcX>Gf7uaYpb&tJeH!XkFUez=BQw4AVZT7_ zc5!dUFE#zB>oL5Kl>b~sOo(O_)?g`@=xvQ!kz;(*v7)4zusJ}Ep|1_?Bm(|5ipn|d z4}o?pzZ(AeL`tR)K!T>@uk?m`LM#!*IaflKw?CeokQd%v9D7Uu19be?9z1iLt;Y9} zSdO%8w5Od;(GS65i59)SG;5~Yxr=ll1EeWxS@rVk4XGsGX9Vr;+p%T8Ga5l_o8}tx zzk=%YCa#ThFeluA45asOQ?C4~rlb@G;&-)lca2WueS4mRDVf zh%6edzM^@0>v8Sx(`TTMvFi!qXnerqr954bHnJS?2Q(Aa%5ep zD}T9eaR4wKHr;t&M08Uy6KSqrW}blLl?CG0x5Pv;76$w^YqO7$BoKnnV_Hu3{Bv4u zT@A}1*4)H9Z!Yy$>bEG`fc-O!@6Gj7R9vgT6f8>tZnk=b^yr!dT$wg=FzrG83UGne^p&PilbBM>Q#50UzBPataX>| z(p_d-d#P$B&hXHy*v-CKLfp30VUCF)5zLUH-bg|{xnklLK#LD9N(;85>?D9kqWT-3ncUuVu_3mCxXBy z=hGs^0cH{8-;5-o!#s1`Yc-sfdmKn8J#o?uhgb8gFp#)5ub;-_2kb~MU?YtbjCf@? z7DR;0PQ9sh8U&PI)N2e@(-esQwEKAI!v+Y60T!Yi-y|)Hn0wllwjb9%LjztHdl}bk^i2@i1 zgq^Q2XHQ5>jij@RjA@+8-oFRCf-nt2$oIff3TR@ZxK>G#28Fn6aq_mzYRx7v+B!X! zje9iHo55dFSH1QA%C&4!4EPD`GW;fAvD~Q<9ci%rqBhp@TQms?1&f?ppG^SpZdG*j z77O|<00&|^bj!AbW zquavy@A}gy(@un8`Jr^qmgk4Gp9c-?yCfQ^bJ;-mj783g|6(jDBPLTb-@6>~m6HT% z@fC|?PkJSmBuvJDI!37$%#x_~ZKH%8KvE)DLj6U1ANQ^wm<$5$p;XgKMZ-B0_N4#cP@x4yaHqd9#|k-TMcZeB%=UKnbD%RnJ6H zdoKU%ZaT$_&HwK58Ls+T$0sKilSl*tj-VfjgJkH?(omR~OhKQ7ioZ=mnhmbT8f=UYF zCRhcYhl2dc96>FKK&Yhx2!SpJ>Yf8xR}goaYRe0=YB&691F!Lgrx%uGoaxzPhJSzhm#7OmjP$=#>$x={2eMajnsVx$Vwnj_&y!5_XSGq zBISreCb70=5d#>G7`wuRr;Jo*oB#D8f&+!%$8bxGQ-Pizfz7)N_nF>6VaH)Zd(0bQ z9RT8%6wAO0$o1X*v|-#97!X2n0N}u|6sriYA+fT(NrumIFyl9D>=Y%x9l3ktac=IE zc+B;8Uz+T3>I!ZH+QV0Fd^vvEpXQ&Ri>4EG&;7>m1@~UA^8cu|f%^jK?8>(P1qwsC znC;h{wie#j`|b2TSlqaY#?7zY;zaM^_q(d^KyJgz#j-QVOLGN0a_?P}O;p)i(col( zlA8EX3|w=#Q<8Wz^N6_%JC2|#rZ(w~XOHeQT9XK8jAVfgur(#VbVZ=Xmg9$aP;@Rp z@8s`kzmXVk#t)k`?j_ajnO>&dX0CSfF8K zxkXzf1YB?CS!-OlxYp9wQ2NLSx=etI98MeoA|zSopx|w}AP{Tjz%!HD*%5B#S+|`4 z)wUnsaf$SX^Ev?6XXoSmJ0@4Z-rVJfdvo$^`Q+JFy>ew?^8EY8Upf8@=zkkz`_oIw zu#<@Z;(&IpMjP0(O;`BUBG6TPtgt~tvduqqi8RuqFI``+_|#jCs?u$E9#YB!wIjT{ z{IA>D5PQAe-w(rt{d$E`l#KuuCPB-*5j}XP61Sf<)^{A%bTIhaR=A63TS{ZS*Wg5j!2Y^U5BSJ6;Wb#4>a!BZv|CnE+E{JEJ z);mWgbo-X&#+SeKS!Vu%foCp~2tTDc$C3;UXUK%&JsC4_b=h_3w{zb)C&KUKkhw-s zrl?A{Bw&FD*~~MIXg+F^Fa=$gZS{1snfpdPk$_XnpKijY3`s3|Mx+{ysEMasq;4ga z@E;N*WG?32?-Vj;v!1uq34FcWzmIFyrRq~_1e#V#$UtP15E>Y??|nU;w0>{~u--k0 zeoaOWPU-@)cOWMh1dsxdA`Ahzg=YFWbanZYFSfOI579`?nJY&W7k3}myw{5)g<&mF+*08M&-xi7Ubf?Dp zZgy7y29zMF4?hq!y4AD%)O96G;4_}j6zI=?u0{EX%ESB~xNpfKfh2$yC5NAB6Vq?h zfl1VXM(F(-6rg(a1tHc!{A6oK9!hX+^Ox6;^N11p)HrH9&&*5PXBn1{ZD}TwBxFis zBPs3_j7;U^TRKYOM!{@^tj*wRo4rLZD3fItud8423nHdUH3DXGDr~bI&#@zNj7XR` zk@p!gI7QjuB)-7kt)pO%l_H$Ur-59+7|BFIm;iX_vN0mhNEDD&Ty+L-`f1{%5@t)b z05BjM`=>=ypX1H8?JV6g7NHA5C2+l{bUH97kVG)US@cp$k<@XbFIm9u2whUakmOvsGO&l^wh)PIn%(sGU$w5qek zg|du0B6xq2erQf-L=z_G`Ip5)3vB^%(iA`L`ODL#Sj6 zJ%xR5BG%_f9|Oj?;2RNL!D-BxYgOOnB>^+4XH!aloMv4xr8Ze#t|ms17w;KeFN?bcttwAYCFfr#d+?Z$uTE7^Geig{^S zgzTyVIG6G52)=L<4XS;h0gO6!E*4(XQ@U!`5!ZWN7gcqxh*RTMaNR0;IfT%|U!%%3 z4O!mfi>@0yX=Squwmdj;lB&!6DnAu)XNaXQSu5EW`ul{e?q6NQo}4P-_6e~$2F9BH zvju&N{{25h5RDXQ)lEA0S9itcF0(4r$Bt}z7vTElWTy~p02}U%LA%p#uVuPFx2__- zM9XYMV{Nzb2$j?AUQ@|ttKxD9ZHeABw}@?rS{ulElV3#JIP8z9R%5I9WtVobZT>Ly zfGN+F#&eNdZ0ivGj3_UKp0_)MfDtbIzC?F0ncwHGtxY8W1{0-$_nYfQ{zKbs?H@#8 z^L4-Zzk=Igp1%#Jl7;Zoj8ixj_;XYTq|el(4dw9_DSxOyv(#%TLaR?1bic>Jc*tPh zCODcvf7!hRt^^JwyK+# zR1xvNk0|9CNlc_Db&4f7Rvd|Tc)|FH zEa@gooMB$%$BGlmhC|m^=q7*lhiF6C4x-}8Bm9>(vWTdJ(Z7QCv2cFv9jG*kz_is@ z#e7zX2wSfNYum|aCTRc@b2wT)-Z@YX&fp_uu?f*H7jdEt8bh|rEUhc(0$6mO>x(EY<)L_f`nuoSV6>;UzZfiq7wtiEy97^& z64c5zJwY2X!0Z9edNhv||JC_3|ChQQcJoX_{j*21Jc(axXk$Z>-0G)C@_OBk-s$OkHgKhig+MaTYP)z9*sBPvEONxQ7u|0zxns7X5#+Xy+*4v!FWuC zDm#re;h&S&?51ya^6Az}f8VPV+UR%jRw%xXvkLgsNtxDtUZvDg^WLp#eVf6X&D*X^ zn1P!}`!R=@ho`GlP2V+-SH*S!2hj(hTvh&EV4YFd*MpwW@4((hqf~ifBj3Al+*e

    iI=KKbzY zhHbN76^;*|?i&uG-l0xUm4*50au>oht(cn5@$XjIb+22=-S^(hhGO7>fvPY1o4;nm zMI~xk>E(Z{PaT5y#PzvYPhveuQwURC-8p+dpR2f zS2mvKI|;bbw}xV1GKCSUDVvYVO1LMMpF2mkmhu^eD_1DQWO0p>nlu?cZ>FL!W(+aV zS?wI9B+E!sjB=F7vSX@bQbTJYWy+-~#^e=>oRN+5@AsFI5lI05_dAto-1)Lsg@`90dAu=*zbJpvwX~% z(7;;ll}k(hb7GAltBhr&@uz{j1f@yTw97V;bF?kHT4)VMN_V34MeH!mCt3+NLs6P&Wqg;*rSW01cH>gc> zn&#m>pI@(EXIJPDfcZy;kAI_7nL}CWETcMtUc528A8ytVUp!-V9GlzejxdL(7s046 zpADmFlR;-^YHq+6Q>v|Ejy;>1H7&yyFol$|)x)FluC$Wv;*728PM%Rh zGKdZ3^H=ltSQod4sis@#65V);cpy;&^Wl@C5)}XT?5J22Sa;1oQiMoXfZG>xfpnWHa ztuFi4En-dyVLq+e4Ebyn=*NW>nOL|e7WBTfxHDX6o9_jry?0KfkxxH0eR~FjLj-Sa z6;EOX&}2H}&vsP^&B)pzMuVGY^W-cQ8VtmvRvW9iuPYB>($lJ1#%>siQl-V4Vw9q7 zGltFR*Pu&EGZ9FV&gB$o&IcCpnNbl7HPj?=R`Gf6sNqp2Wd!hBCBdK%`S~vXvB*Ra zj!;2^S_0Q@a0C7~(G@<^=CSknIQkH_$9p3HIK)j4{QkfDy#9lqMDxr0sp^-V`_)o7 zt-T;Bs07Cd9`#gkuZ$Mjuh~qZNz6vN%xU?4(^E`Q5gU;-J`cu(RM#2q!R4VM8gAu9 zw}(}OLpv0^>Qz2~!qA%YxLsIl-)*S{W`qwe)E>cH=llNrSmH_Uev|5e#2e-hK-KrU zYq0qDzKI<@$OAf*?|G8SfuW<5$wcE^_hBs=Ra-XXzy3}Dg&xC$=RJ>EGdF|dPVhJ( zhr3Ro>3KvTqHK4ST7bHh_cRf2JQ6519IFB0j~Q|s5k(mvZHOfrx(Hk(-4!}7C{B3p zG1JjNl9P0dZYqJxZ5EE=GK)Y4_$*vUmLOSAvD2l_WfMc+x}?g&RQ(`W1b5@g zdeK4|-DeJ~p1mif$NlGT1vly$)v1+?U2UY_!KjldVU#201jp#r3Wvv;(_5aJnVc$x zQf8De;*|rbIJCHQwuiyW;=#!B5L_%dNUJF>lqMl{?0T6#w(ZrGN56OsFuxxuDG+x3 zX4}^)3Y9QSw^2v-Jmci`IJ&8pun9N=7dm7RChHt*wuZuxWEN#~!2c!Oh+wqD=xOwE zE6v^7E7FD-fx7OZy-26)R8vpDR%xZ8@;6n(qdl?8K>#miB}RG%n-to??*ZSR=T+L$ z&K}wr?Tg(cK|54JbI#@vpj`5=jOMe6XqjU#l&8J*M7QJEko85PN_R2WlA%I|a(zmjSyE|vB z$q;CCKzH0B@ek>^i!r)U^|uv5RtJri#N~&*87!Z*zH9P)5(Y4H@>voyVDO;Y&wvBI zax+bpO||V9`0Oa`a=bI$J2G(pt9~Iz_N$YRkoGaX(H^>ztaDU2{$FwD+hqryCFpV@ zEBi13<5xq^xEC|N&J2FO04=M<&dAhYp{BSOHX1ek;6eB`G}vP5%bJONCHnrBYxYu{ z8@9GBq|aGO?FiK|+t6M;;CJ0arM+E_)#TjH!sM0p^hPa_;2BpTkh|_)D~@ZN7$kwy zsA>zy{N!w{^yrt*^5Gs|ODh$-<6ov-4e1A&mSPGzltqAISuvd6K(DstTQ>Jjs@e zncm()-S7S4;)WOf%4j-Gqsta5o`S>QUBBuk^VbbV<=ZFzMQF%RDW4B(N$i0s7f~jcn)Hp{Pu?~MWgyOJh*jTCH-Ms{8h&k~1EPWPm z{V%xiFi%=iV_<}INd&{&0M%sdG9VDLra>6OLSp^QrqWbhU_eAxk|7GUr^3MLEqEL? zzpF`vZ=el%qXH$q;E#%8tctHtEr2kmbp(TR*!LX(tH!F%$1@}rDjB8L0b??jOzS&h(C79lUP z`1BcW+8*PiKU z@oHW?b<$tSC+WO^`dGg#3wQTiFLQCVw_2OVsOrh;ipP@@R>{|7+M{$cKL;-dJ%h4j5OyT02}&0&cu*ii`;tvjq@|xZZ=f0FRye78Bj6| zqb}7|kZOV?5c~+U4LmI@Z#VNo5uQ$LXVU6nOa8vwquuruCLPU^%~- zD8^6keMx&GoR_5D{@!$OwId$J6JO+tph*_jg;!Ss9#huCvzHk%Vmo7<-x_fbv`^nE zNw;q!o@_FNrEhqa-v77gLv#z6aUS*Kaq)HZ}j(C)*l=k@Y>%= z?;Vc-;M;WPA}(`XWsjUpDwmaAs4;85Mx40-I41>67gmK!ML)c-6l{{ud>B}Yl?u5x0ea6-XrFhu=x=uM zWkQ|~9=@zJbxoVs0V~KcR_TxLE2(V`jKVRqkY}L16y~jN?_uqY?Nl2f4|eKqW-B}Z z+t{%q369R`WLA%Cz27%792SOsD?iTJh-XNEh5)_^e)S#F(38OWgTon6oA#tH`@J{* zYU|od6N*Muhm)^|ibho)qexXepT5eN&Dhl)j0M;BOY&QeKCZL3ljQvg(^o&=kf63}f>k>YuwK zF7@B{`i)}u^0BNF0n~Dgvudy3`7XWiGYI;lV>yvYT&OfI+%t@hhdy=x&ubfU zsTe3Fx5m-MDnu2enRk-3nF`|zDR7cxM&|lcC-=b-EfB^vqmij8L#(Y5lW(ovH@{ZR zwmqUZI>^=B?}+^4Dlhnfy|tTZyoMDt)_ZUo-Rl8b06XqvJ%KUtbhUu1%bQ@mr_`p| z&x@(i+--HY-&(5{Jysm4;6hVCwkl5|Hs|lK0SGn~6+axRSR1N(aMAfC71p089`25` zG<|oQiwA&LP8krF^a~g4o5p5Cp$cpP7j*=ztQTcBL?@8ys?digrACWRcC!=ci#6A- zpbPM53A=rA(N?A#3|UGae@R);({1@SfiG$`#iGjKG zYaeVbhCzHB-Mc!tdBoXovo6R^ppD@ruOB{#^MzD@az~hwVQJidegVegviQ3-h^z!i z8%Zs|ZG>+DB_`RiLSdt(S?u)AKe0OtbmwP))->veSD;V8611ZWQ zl;}f%C7 z-$y%UmlLjH9qBT=T1 z6qIagczl=Qa`mrO)iq;;9ctN62>@^cvgmXSUI~&c6)|hFq+(EUz7%W$G^j06OF|MA zas*|sL7~nqp;(r`L6^aSgRG659|$!Ag0j~g7d1m%^*`pqe|es7yJ9#cny!h`8MC(mbNj;q6rr4^n!hrM;R95v6 zMfy8rZPw68K^3rM;KZcclpbZ5N+!{M=rrA;kn`D=Bxz+*ve1u7lHy23%9uLdLEU-7 z+#S&WRx#>1;EBQcooI+gpQ2iQUNvl7D(ppagv^ylRYg3IAC`hZ-|gO!Lg`;$F3VF? zm3x(^l$*ql{(L*H?*I=61dp!IkJ>Mvr!R94PF-1m?EQMf&nBI5`WCyMk`bI5H9VQ% zz0PBC{pi}rJd3tHuFwbG^*vOZ=uZl|8z1wjnw&)IEh)8Qh+wvsK9EVZiR!01y$Jvi`i=*g<^39|MEv zSZh6brcYqEz%Y$d^DvjB=TjeBMSb?bBbC@-n32{6H&3CUAh3Z;G=r7jYxVk&e-|c{ zJ8F=RFkg`D0C3d0ra~zs58JBVKbNOv@{zBKkUxpB=WDEnepwc~X7a!^AF1%CO@3gt zSF_f>{F%nDnjcOe-#4d>vz1DSt*$f4GK-(Wr>qnwAMWWFD2rigz|Xm!>UgZDVD2RHuZI;79>^RlMuAt}AkG9y!j4}kFlvN>Lo=GF zgerhm1fv7>lBQhnOw-hCfAeOqcwI+=U$U-Hcq~?wb-9Hspfz=1U8ffg7fnn=Y7p5Q zyA-B&2NV*9xf*I^GTm;dYU(ytIID}?zQs!V{ewr{%KDav(?~2^-s+yAE@36r*9hf2 zc2y&dGO`XmIYBNDjGz%;F@>_7Lkm%HN9SLBkPSElO9IQtzk3MbCfAUl+!{0Q}ao{#7soAA}SlQSx`}8WXKGU6DlsX6G>pz z08yCttZ2p|YaCoP##s|$Tvy8+>JLn0KO;(bD>DunOonXBC>dAMJ7+}gD-p45PNOww z2kc^Y6D{o%7@R*xp1i&ZGZ^kmezE!b6)d!{*NnccmOeg|+NfC5U7wFM2Jb2OyS9bPx~aB$pVOf^5;bbfbX7W9@b#eK2aQaG~mCvKD#{rzJ8kmG^o`A z{9qKZociLHEpYqLTwuRr8aaJUVEx^%1t5U5>B}X2f@y9+5SFd-c_~Z@?(gF)PFS0SI# z`1txeqa726^U)r&yMT{JvM9HKprXiE!HWZtnn0yc(E{jrzUJ%8s5#)h5U!_E4*$TDzn?K z5Y)()1OyoH)c{5}{aS;FIn1Xn{bz3x@mKVPY_w`tEgJ#V38AF=ilT=wWOUTRHiJ-*x$?@CL@%NLucL)Ekqts^(PV>Jf z(LqHd2PA-t;&E9~95m+Yy|^gQ_LRZQzDzFfEiu*&zK}W^y|lXL1NtpLYAHr&G11mN z=w4zh&9z`oNtr41LF}A<@LLtC1_Q2!sJ(o=6Eoa6Nq_Criekt&9 zWr%>s7Zvh{rcu~W+bHgxxr;-C=Kc4Q^A|hcSB-2PpH8-aayk9su^p9tsG0ICdIX+; zmF>TkXdKt;tbIyg^EVrP9&Ld)*83pzN5_d$aFR20q|j{WTWrvf3ir2tV*UgiV$s7% zlB;MaWn+GwexwzjJ94-oWW)BdMG=maY|JF&-&Xh;YyMlj%@tzujz*cdD}r=$X_0cC zYV8p8T-TYjh&$5-)P!=Vg>&MjmZALse&nQlt|PE(glZ{Mn<6wgtS`yY0LVQicAXL` zAkoZjdTJXK^ARh2Z!*%4$mEnnRM@;ZvFdxdfb0R8q)owd8{^)AdIpkXT<74h-w1^p zOBRAE6i{;+nuKI1%0t0S>(Rd^`lI*i8y1jhsf>YuRq#M*;}bG4PD#;?E|2s81Kf~8 zugFzdiwsmVd~hysZ|in z(lYSUkdTN~DAAV$742u8fspNm#d#l)t3{v?s@WjMHtzynN9KUR#9)+7CO;A{h{e-Q z$%mF-npiNHNz z8eizvuADph2DROJJ{=>r9k3~Pe;OQ zVBZq$V&|5kze3H8uL_=96R*-LG)<>mZAny73f!Y%PeD~_ysE(|=1vO)YDrtbF-uT_ zA7G^lkQh~nZ|1nt8CA3aq~X{=pWF!rY_ICU{kbuMJ3Exx{-F3+1db4sRKv(3r!`w~_Kx;h7vwGm}DP?2gnxhYI#< z#zLD=4omUluqaaQWOWVu;dwavB^Y@iSwW6%lwin(VkF{`4h2ELrsUsWL{5~|&RUl9 z<~W9?>@C$S_AOjd0siId$AS*RNmpoef+0-Mb=4u%cO!O285e4Ut1pHgC1JJC5SRBC zJiX$NMhMiYg{$No7&l2EkR4y0W!2Jsjjo>d1(nn=@tGG4)M4uz{ruC?KDSqIOqg(8 z!A(s{7K&UeH>HLElMU04?NR@Y%Tvd0!1UVoP(R%^xJn+iU2BWPxmr6-fxP1m#MM5x zBuW|^ztZ=NL*!E|el5<%@OVDCJcQjGer>S!@OZsk4je(_e@?CBa$Gm_)ws)5wLu&f zi~qO?RNZ9l?w?n~S$YDV!0{7f{ruIJ1*cyUkl+TN%K&h}w_T*lQ7|824&_(bNn*or zYO{|3*?jgkN4jlng6r;M;_8JO$IteLi<<}G{M@rP02RPTVy%t`7lHfJrD!OQrnYjY z@(KS>>ye2%7AG5vhj4ST(OLFnX`7l1($Fem$+w)g-M5t@MRcZOrS>lkRG3n^#fm2+ zwQuAqHUM^W6hytZT1=3gdo@g$DZ=mC3%B>rd;4!+#!5;|f3@^W4r_fH7K#>XbBC(I zMtNMbE!mrNOmh81HZfNM>Oo_m(}THInt_fQHH zszjtpdF$u%Iv-j~a* zv<&3cSeT+58~)B3HJ#X~4{Qojaw!yr!dJ?X%S&~D>u7_^>LZ=GLxP`ME^Ys14NdtJ zf~cSUq%1~d$do7f_AR^2)PfUh^n#SA@hXFGW@+veXL8|G>_CU7LC84Nd$Y>XOEf?^ z)xyrig3w%GQ}`IRBFntNRepmZI0-2j#0l9P@bPfYi~9(NVcu{dzi8O{*Nk3l+f%&z zvN65iOo7SgndU#SnW?4HB;n|fLzx_-Cp?q~P-1HS_Cn=#YbM;&4#jao`)y0#rRw77hR`}#OojwB+GPxffX+ow!BvH1V&?`$X* zMJPlHj$>c8aN3iJ{Mwj(rU@#WRbZ&x*?-`(!TqsdnV6T4)NR5z_{_hB^F0q zDIOM!+!8BvGvX`RZ_vAxbO6nIkto|$zaoE+Y13UkV@nm`oRrU5>PSXn`%@rEyjuiE z*c5AkL^4ZQM-KfiznK?-wQ+A{7TLVJ(v6?!TP>_fNEogyXIfxmz!`6042aF(oek*F zOM#89OqMlv-)34DC;xM%eJdvsTP;R5b?)Nz9nZs@C&7|Lel~PH=3}BU$BkwxU#1fY z+JX7X+ET=Qt%2eK>yim`fp~tnD zHmtlnQ%+dBCW5JBt8yO%_fo*)7<}mwptNJUeK}%4bmlD{_$N8EP8iReBYBwHVUw$3 z+%(hU^`W_f{n5202i2^Gaf(W!>99^37{DBN$$UJWp*o^nO2B5~o8C|!<1|>}yA%_t zH)708-tub!rwMX0Bb}iurU0vGN-^D+w}TH^Y9iJVLHic%x8~g-4AAa-Yei?UnNsU8 zQ_mX`hp5H*k_kx?pj;my zURkQacDD_xgB(fcHM})t7R!5irx7AA*-BE~js`jKmJzGcSVrNp=IQnZStYuP!?b~9 zo<=D^dDBXUE1o!xWki&c*ZPocQjrKI>=92I@Jl9YXT ztt^gZUyR5L`b8>p;pg{l~A4e<|64%fb@&+xzYWfEcMmjpn`AWhDI$YRS77k-oLxAbqdr5?_Hi`^c( z)ja=K@Auo|!AI-&2fGc_X}eI@PlESib^^r6&)1vRu19kppIz6}Gr8p`iIXdj-O>wz z3RwZguCF>jNbs2K4U_>)vyyoN7EFJJv#hZT<5K>q4j{>${!!L-tNUSR@w z!AbP|SeaBWu4>%C!R~(%0&cCAv03}^k$K&e&_{f*#WfiLSQ5bO}21Dhxs>w5pN%uq#nflw1%CAJ1`HEluK*>#CuBf~$~Te~z7Y zrVbn>JJ}r?T_6=ympWW9#>+_Gfv>)26t+2b+n}lM`3>xNc$%v!yCpeGldNhaJB^9Q zcV4UA)1|^zV+^Is6;qr5bS1@2w*oIUjRwAX-yG~#0kLw8Y7t~wCKctSNz(L8i`Fak zyNSg)vYAU@H1Yw^6uPKfu-`n5O3Aj2`$2u6GW`{Wx=~srp0B=-Jow21fmrd9NtdRZ ze-MJjMU*8E(~{bnZC!Tls-2cMmfNMwq5Q*GhTd=fE&(B-WQIH!@xyAryDjIrh5XS#XkD>nTk2mM4SXJ?@hB~9i%*) znceH{@gh5!&L*8R5Vyg@{WA9m8+Maiy}jP!TtM*RE`@mgb#gY*G^$*t4j+}SjDEX5QoM#ka389GZduehoo@G=N*SO_T5sYl zaBG#d2Er$ECJGrB74#j~*?$X!zeTC0+1=QfAC)E-MG-zV6kJH#1tNs=J<1+Ftd>>}{q+)v9k6B2i_ zna*XHU~i)Uh;I&Fe{wX8ncA!`(veo@g3ES9?)eWZ} z#!e_n0h1|(wG4celPb)BoXAR$d8LXL&*gYx%zpPIttm+{N&f}WPCqz5p3H1hCP+n7 zMmbWR$EYhF-uUOh0TKmW9U4OB&)fL#W^n+r3vrYHP-2y{zW36EG`1;r$?ly5{@eTx zEdEdBRu2#g?YRHLd7zl6rWo?K@^_lV-nx}#FZg0ey3uf^W9We z&8TIxZZ58%yaAVSp{KMPwdDkx)GKk%-qI%lT%=SMjC{nQHgqm^X*#Cs9}_Uin#d4x zIjjCtS>kZqcRDa4X_SiM5#nVy2B_FnVvsVldo4Pn%Zsy;~ z=jQgOIZtmM|17-&N7weSqd<*Z$~ zb8@{=(jo9?i?8?n;LXU#&*!DfxcJ}NUEx#yTP7By9VUR&-O{JS@%-e^?uT>F_BLO$ z?(19YIbeX_eYB_zZzNupV)tb5<)T3(&mjz1r~1?VUqObKaI+@QTgdC_bQWzYq(9rb zAzH!l)2LAh2NDrs2^d6j&~2oi`-2eVB5^TpP{JQ3xSH^@E+gJL11rNz<$9h8 zGyuzeM&j8HB_Q-pxZ+Tr=)z;nX5$H6yi?eU3)tazh}YU9C~KVS(efMKSn24OH=2;6GXR-nq0Dk-sU3>#oy+LSoxB$-4Lo!fZh@P)M!8c9O?f6iNNX zQ1*n^BuHpE51?5qG&;3Uf)t4%j(q1$wT$YFF3Ow{Z4oq8KEK`Q&#%5WCvnW zW*Zh1B5iPuktk|vY}a#%zpcY9847i zl%`@DYj$h2I2{Uwl2nBF@qH6(LbyTDQ7 zJoxoFUYbZlM+&2e3UOM41mpieRwO7`=Mwd~nHFP6cFUN6NR3d2=b_T}<+a3%G?O@? zDKGbFW1xtnSgm*P8e<@3P~SO;Li#M6J}*^j6DYDow2ho2uy-(I7z8h^`BpVzkii=}?)) zW~H?WE9BQA1H=l`)(bmEudxGP=-!wP5i&0x*iyT)4loL<5-FqA3V4^O_1VY_2O1zt z-`{a{g-xxRdIRnjTRj5k3H`c~mLEtMSp($ZA>F!u8BJaX)H9CEz07OMe0H{Z1zn$c zQhBo3(DT%a@}o*;b|N0=&+n#NogVWNBwu9vt?c!R18OSffw5NTnQ1>79tY}bm?w){ zwQ1mp*I>O5*f6vZYLkCNcs&ZYD`L=XV*B+c`_fjk?9PgJ{q*K@Y8&~$g0$~TWfr{i z!fS{o!_qdYc(L_fBWCTt$BNEs2~%p?;4dj{q18Dh;*9tzq#mJv){QX{tk+xIRuKr6 zvUYrW0Oz$CSf1+)Z#+AllTlnURVlIA)S4W({EXVmMILeS2kduhgZbw;dlMg!0@{w|No)GhguR)hYqV@@BJyT;MXMlt}W zQ1nkXY=lC&e>-01=Y?6XS7c<~dDsCk?Az`Rz=)r4-vJ=ggk|O>)r56<9e4a+qyCnb zj18^?Qt!J)1oYhx3QKQ!Eh}$N#lAnIg$xJTti;j`h4J;KA`Pt9c9yYd#gJCL3sy!$ zpz_@3LH7zFylLN@lWTYTO8(E5KA?xIhgSecnn_JaoIBO%fOv2=WIqNAjX|U?NL7yKaX^nWRU6u0bigI#gi1Lf2CCUSDa1#Q4Mr%P zsEzb@$7$Y>C}Vz}W&E{7=JM=;7@&>RfKG3oh~8U@MgDH%B#1wfZg9OryXE6jD-!np ze(|`SEx=&^jG=+Jn~rNJ@4cd}=rc70@$+zVv0HvynJ;w#48Hvv1ax<6Gyff_xxk`w zwiArZZk^^vzWRa;ILS2^k+E}8b~Z{rSR^iJ?mZ{a1QE+RqXK6GH;4*j0*qLl8z6de zm~b^SneB~sg0cpXV7VnV3vxbQPo}P<$N~-x(v5MZEDO@6Y{-gLxi8Y0Ch~aJr{#bDwVd*usnDgI5)F%@ek?ZCWhVpInw1BZ$6Py z#vr(b%-0?=`gq*5{;B$L26((6y9-sJiQu*BGcV0&TSFW|kEE&Xd$+N$*PMYugpvyu zsR~oW@ZF`r2f%^qqNwvMxG!WDWTulU%k5kn514!6#HuaWiylfy<#kgd`vpGsZUL+} z13MAK9;7G>b&4X~mOt6HoNoeuO2@!cTHxkX@~e;HbIk>`OOG4w0en}GUE#pNI_rY` zE`{b%j~|c^b97)nN{5QFc>&;tpn~Ye((BPF@`0E*J~>i4MH6L-KMc~jW(hZWAL=uO zb+J(P_JgSb!qH|bv7iN8w>wQ1G2)0caw;zLQo$X0K+Vhn%lG`vnjr1l%K2_oz$y5sQ+jO)7dBES_@2v z6f#&uX63-k6j(tLFzFQ3o8Ewy*_^%NinGS*;-{j?>z}&N>9C6k_F+R?bU3*|UA~aU zL@=L~FCSPXLRe298Vyz}@Yu@m5k5xhyK^&n3>i)lPY6rDhGe}JotROEiu{d`7WP3= z$MDRnpnz^gQ|hP~<4WJew|>2#p!fs;C7Tb60aT(`#F2$OdBr8m?gYxS)w5fWk`vF86r|B zg~t6TO0x$0OFS8G)b$v+b99XJG#8ylaPmjg;xm1qF5FD&4?BP>sZ= z@%0VXmU*o&dZezaos#=Nww07OFR6z@fY!WilwF%aI}pK_6FQoFxWQGM%LA?iBzqP2 zn?_paV_zM}@Uq3y+Bad=nX3z+LS+(bqbGWgyW885vRRoDwBti;aEKzk5Zb-7i;EX_vtp zkHnaU@9%h?NoIOI9|tEdo(1#^M9r)`nHCY7XOziLMcz#u@z+4is zv5hWfH^1b!z|loA~SUJr@8lm0qV)&c-c9)2fmuEeQsbI*C)wP8kb9 zYEk1xHf8bv7*nfB*jy*2)K)n9Xx#YwKObLcXZUC0?XEyzTnkU)kJv zFiuyi*uyZ;H>YyzuuR)~b_*#*k$F}q_m#l8g1DreM#fuq`#?{!iPMmAMFLJ@y@QVK)NokDF`%R@wD^TG7u>IfKI^xpemKgpWci z5qV7j_WnL-9A}xLgf=MXFn@QKk@KV_F+9@2&8OE35+#*pbbs3sDvCqYCcR7jC`P-; z`{y8kRk;+}>cH48?2id)Dcw=AM?-`nk-_J^6eb;qjvMMKYlY-JTgU>XQ4;0|4qeS) zZ><*=t?<5vSU}XgMM6ZOdr2ruVM+2`p;K4^E8xgWX{CZ&NNXY;f>j}mq%h{v61oxF znHxlALcnBZL{-apu!ubnPK_g4xj%3TElk+lU=A9TCIs5Re_*2dqQj)(IVCv7oVal? z(0{59+#!wj!hY$RMle(!fNA}r(!5m|+!kSoRIJAll2S5ND%Ocr@zzOd@28eV0!7LJ zs4J5t>j*ms)Tz3$QT01&69-~ZMhNK`%Mc3?n{&^?my=6E$e_@jzU6)wm~=M^jsJ|r ziY4n?8HXESd(qV0hNCm!9%v+~K%bwN`EY+Bh0V0NH${2>b|g+2LE<8&g>=GnTUH*t zpnT*Uv9fU+U^DCzK=d%}H$2R(;-Do1!0DCUtct5%m-Kf(}nSMFnP~?PN%~mQ!;dz_DqD+1I>}T6SIVqHf2Vo_+&T;DH$83 z#2^y9l0Q3{dhNC_ItI#8SM}R&5XJk)2-q#7e1!RAFeb}UpJwL!S?Ai}Yl@};l}_Iw z*ZGi~uq(|}E3Wfxnk73-v$nsVEiXc$7&|;l+nZ%MxS0(!k5e`>;gw4p;NQ~rw%|Fn zo5xLEBJ5gjoR)=p{yd5^xhkdP|G;ZET`NYjw?4S2v{$^@ zIi<}i3kLVolqTZU=N4VbJlK+GL>n@?*qfM>~neAM_u?(mFnQm91^uHej{* zQ=f0pSrANSi(B)7XH50-X3--?X_YQQKG z%KpPw;13^)%5zM#p#&pG@BF+6(egY%2Nce3Hw}j_+o=a&yjJ*7sv~%Kx7(w`uRpfr z;S0j%^PPPTPc9I^1|Pfz7@k53;k>YP&BLrx3p|U@4RWK|GxR!Aa}i(gmLRL0L5$~! zx;8LzxOct+-q4qkllq4U;2?{31e1Mpns$grzhA1`%4Fp(sxu$6b~Uf`{sI;xvO5|! z^eS7Gp1*k_ug|QyvfrkHPw|8M&smVH7RSt)mzIRoCT?BXv?!_Y<)nT|&ez4wzF@w(y~*OQVIBJP>x zp-5NTeLbEVtvcRhFVw?%ETCB0zsr$u|)GQ)tiMqFlm*BVe)6X2ri z$`tVGyxp##dAU(%kiLGH^PIN=OavzDPtgmQd#wQ>0D%$+tEQ3Iz6;ZI2I^_k8(TyY zRmNLETJQBT@h{S+A-7Ld!^EhmWE)t3ZsiL`RId<6R_z~)JI#c&?@jXNQoW1d=PiSR z_AueIz*VUMyAHkfh!7pnQy{JuAYp~~95syq^ie}L_>&|iX!E5vvPcYuF)d!>o=&@W z@_z@wkV}8~zy@P@!Q{m0BBjSJ#JO`eZtQYfUi-Y~U7Vz|wp}5ex)`Qk2(OPduZj0>DXBIUIKSUzQdr>e!z8}A-_5IF%W6|xDp+453*4~)L~eMv zdc0wVhebqf{&FkOm`B|yNLthlS{_uuuQjH&WcYOOOE;=j;y+SsH#Wl!w#@)l{&b`rz@ZEv<> zck6;Dm3z#!dkI19CRZRA|K=yo&N%4N0N7k*;}bz&H`9wI#{JqfcgO3k*L7dqj~Z(k zJtd*S&p)2lyLp=QkAiCgR<*mQf$H(wgn=iAOls$gW=xcF7^iI9MN_C7Js|Xh{pZ9B zMmLNRsr)a=kGzyEL(V9n>b}3Z*mUf@9HfyA_xf27Fbq*<5|403(ieC%>C7CFAcvBS zEwVy-i2}0n>%&^L7j@UObkmA|-3H*+CK)br0eZ0*4`qeaX=k7y%qvHabFUq5CF4G& zod!B=+dDx>6Y(*5ZJ5+9^FP$@O7fhF_@P_?bs36{_ah0dqJ)0koTzjZ)(wQN z0RGe15yh!!SRoeGo6$2r*NbP6WN!+t&frRHufWFNl8fy04f8fysOn2c$YQ|=rD<{F z@1cwcL6BQN!P_UYrI0;WwG@uL(1B#R82Gm%+U%e*5B7;M?{loK7(Mn!b_G|#Len# zb}~)IDd$>lF4jsW4JB29qBTS2Scj2jo0Dp{(D$3OtJyZKm|}_UdT|5Z0PyHFen#t< zbnbGN71>UR$+^RBz;2;z7w+w?o&vP%GauSc$GFAIQv$YcsE;(pWI-g9WWgr3?M}2& zNY}aI7GL{MehQZ3=Q8Aen#u?!SNw`%Q953Qlbm@3>uJGrMw|(SG`)9uyeDU{NL9SZZtT&0W zfR$;NOxJeT%W`yk^Q2@J+3F;AkQdNy_Lt2MgFG`Gpuq|`Iv(`}1Teo>ztYw9=H2I6 z1#$S-2r?HUH`UO{=b$Z6!W(ti*B^rq=GwS90WSD1u5EPBB8WJ+9TtQ_&;tO391k<; zA%mAAW1L|?cMz$a`>@f-a|M>Az6yP1%xZS>om4s99t#? z-4`!H)!)D<-dn|t>R3y0q8tc=0VdPQ427EQ!8=; z7mx&Zub)VqtH&aoMk>rQDlehaCiHUXD4EU_=x7<;s^m~I^L7%aDBgl3>ulFUVCk!7 zA@_xB4%ecW!N#=CW$}|JE?waAha7qVg}T@2V7;{7c6ds>hnhXM#Q?+&y+3;-cZ z@lsGzb)_BWg|c<+Zub}V|F-ir6|ZIkLl4DNYJiksG`%EAt#boTPBuY0Ikp0IwG7Vv zjj%w(c^WljilvntyK8@Z!;-JLBF;4IAc`67$a1&FoZlqj^Ll(=PCgGmloKy_VckPd z!gNlt)kzswISB&W4K z*0)Pmm)dptE~5ZK{tBaEl?ootOCvbXCyP?2z&wuAy8a>bE@lT*WXuwMg7T;Jvln4c zh{0u$@O%2wb9Gojh6$OkSEkmS9kPWba6%v?S`qW1K-VgcjQ{HKqa`6-zXZ#{6~R74 zEi?z0wrpRm`G-G}92d`BY=TpP$;T`xMM+G+WG&IA&?5w}xXl#r@qOK$hBXoHLa4bF zXow}EXHsF;L}vGKUw*wW%?Uu{A;lY3X`$U*VcBTGI)tW?uLmgM~g|8XSWy;m#4%jHdyLvykeTEEh*=7wl6JdNZPq>%QZ3 zi|PF{v{eUiuvB=-_9i}*UZSh=*7hJP&3reeo*d*<#B=!tIVN#E^c=BwKbRI1(y8~V zo-^M(*-?7orT1DQ59Q8@ry$$s6?4!#6sCo!uy%8*bf(^A{ADG&IUqD&OJH+&yB5k| z3n~M^r$Zeb`2m@lSgn7=uTQ^glWOQ|lRD`MQJ4ZG6MnBNxvUgXxhh^fJy{%JBx&_n zwq3xWl$-fnDeKd31=?=~wyA(7m<4{6p6)Nwke_vr@a>6;(9{aPmjb*VQpYgr$o|dI zZIi07T$Itp1{jB>kf*nfRe}{io3nE*1@_#{Z1WU|?pRts)H{MM)mqvdU{msh;I5e$ zCSwAilce?2m3r!xgxDv9*$OAxO_;F`Kfo|iacPD}F{Yy6kc5mC=4gUUG8?JL^Q4oL z^JOL+YZ}J6>{JDJ$+?WAvkg;PE2x^%9Rw|)Vq7Bl9K@x;SJtJ2$tp!%#i(Hm1i_Dr zkPCHC=}&6&v7=K(sITL4VYn6N9KwcZbVLEcx(jeQWD&!T=6glIR;ylEa|(&WgEOh& zJxyb2{=y=Nic!TmPISa8cG|E0L(=7CQey+l(|mPrgVipcKAIv4d=8iOO%MLP@lDF*)In>p#?PCykj3FyxeE-j?>j+$Kdtci0YM<(IF5EhhwpZz1Ak99sl`tIl7z%@5!bZsjciq+q{844&5&f> z<9&4uoj#sUwoK6B_+3A)WLz8AMa13Z(#?n=^!HR8eaYH#lJ4tdQBjD+brV9$%5IH^ zUZ8_N4OpWTpvt3Zt(u0~(G4>gZWxFp>JS)sqzY+Pn@ZvgN`YDX!ofKopc=7lrW!$f z-?NK5gTX=aQMLTM!Jmogovw4nnKAV+m3RsvmHbpzcl?|_%b z(5+qr+>=h+zElipbQx(t)wGWPnMcJK5_C9Quy=AR28PfN4XGQeIkizI*9^s+9I}|C zi*tnx0pmyF7eFdJ^s*wtQ<7aN2oCX~*#;;haPkK6KDYZVbSAD{H1f?dHV`r$XAqBL zuSId3+Wu8~3)k<;bG^&88n-5eLUI#`9yXYO4JWfTQ7qFBAZ5DU($<$qX0y4y@e72mg!=jk*cwkOHiCSv?qBi%rvr-kx9QY? zjp%ZfvJTHhsl%K~-OKxStl$tfGd592<>*?^KLY8`S^K+KHn^{c;}LiBliaI_B_DW8 zu|$!7unfm><8F>N$nEf~Y_ zHaG6v_2|MJ+XRK_xWxqc9&fi;XHxM18Y#bz5Fsbz)Wh&0UJ3wLdXkIi1v!xiY`kJk z*ahO{$X3BP|9g4Q6MjR~+;!f{|3`xUS;p(c_QB{x+OT)v%n!POcvSy)X52zdJ9GNG zKxHQx>lM8=za+grzM$?RAMhOkDPVco^C1sIF^7ww3560FmYw0#NKycDa|5MBu_%K_ zvK4UtEVa<09mG3KN@1bES$^VeDD0v!{Rx)fAOy+VH4VllQ5;96U^ZzDE~6zn3Kp6i z9j5R!?OD8^<8}*~KC{5$9Jq4FmDN&q84Y z)%JyG3TKz%F%m?Df|jgAp+L(T5?Vo-NGkx9{me}yGto&kkT9KuX#id8nIcLXqhJ}z z*3*Uoh65%Dz?UEUsDN2+0ySOA0yhc^st-{w$0u~aa}@Dlg&;Ew1{ zcsp0e$Oy4Ev6o-^lx>Lx;JX*x%fZ(#Ge0!^G>v)$fc6)VEfE1>pY3y~Fy|qMC?>Q0 zy<$dVQ5x8xqr%qOERc4RNEpdS7-0f(SAb-WpB6eC7d_c5o+gzqMJ8w)&&4|LiRt!3 z$U#^s@VAvvz^&tY#^)!}ViFa}CC>_lC>`7o%DDrp28f5OIUNHo9$`Lx14)SiGstYd3Hqm?n}wmxgo0`iDYS*V+) zSabgs-L+$vM9$9M|K|MpGn4dN(;VTA`)C5#7)!s~+lQU-;V7IX`X}2j?)TTJf0OU4 z$II)*--~C5CtSe$AsQXg>4>l=E(E5^%g|mKz-?cKxKIBUaZebXCwfmHVuRCK|Kq%F zXYOotXQ=ewRw*^VwZFKHr-(-e+SY8`2f!E2iqMW?m*A~^VvzUiPg#SFa(^qxz5bS2 ze;Y+il+Nqn#liE%iMD}=jj@OiI&L8+?j#5<$7t65REfQ*w)_dJL*p#oeHDjZfgElf zAiT3m;%{??Vf0>d`<@Y_bJSOwZRaW+-ECsY==M@d3K;%W!m$$xOsLVl0W@ijCs%!9 zzA<+vba48YVmU}uOE}C-^%8BwuT*a9&oMsat(No@ld4hHtaItl*cG}tsB8_;iE@mW zKzZB9y@t&7=c76mN>tT4O^X}##FTD)Kvg#DRtqH-Du>v1b_4nrL`$Cg@w4eS?0;vW zHRSC7Ny$B@>P$5CTi{lVeFBT3{RGdM$^nH`k%6?V5uJ_2k%_29XMkG;sZM!X8f3)3 zLZR0goIZH7cQKqQ=5KXrgud37pDetJiki1|jbC!9hxC*%#`5(iD!9bs$) zTAO60Oe%z1QEBo-kZQ>=(K9y9-i!ihY*2Tz_JmOf*(8uU$)s6xIh@k|Oox(mBykFG zjRXXlNu1G0XHdQ%2a0+Q6HmY;RD$}^#3?Q@`J_T5{6VI13RoWrc0w}SsBmlXsH{kV zV=9`vSSm{dIs1Sv?X-g=wM@BS2aO!NHw*SkAhaFY0*+Ifg1MC{$3q|}Yi+}DFmiP{ zGY;jpRh2ZtbL5eKEbtm8hNuh!_3uIbIQTfJSalr8Y+kX90UaaKv`YX@Zza+JGx1*^ zS4aG6^3r`_Zv9=N*UHGCIa-9#=EQ76(39ZuV|Pq_RfoIueGfaNXknx9&`&6%*S~&a z>MQ6zdg_8R{k$NV@zgU1+vEpz;}nV-78KDeOfp98#SX%=YUmieavBB`OyHsEil+G| zC^WXJh6AS|;ASWYWC?(LhU9s1dcyA4%E`bvT3DR#seEBcDW-7PTtV~yy^lhvis*}J z3}uuq3C)0yheS@(KoSQuQCRTSKuDZiOfSKFvky;Yup7=e&0rdf8bxE2Y>bm5idpI^ znAey=A{gT3ruByzp%V{fd18;b``CigfH2DJbid|$f8zuo!1;1|)_FcPSnxgjd`HE% z86+2Cvw+xI8GAPCwd~$neVsO0u$Ubq|6YQv09?39-{gUw%0f(gou<8S}>F>;waGsFEidyS$?CP4bqrFBs ztM!TITtRKM;VZxlrGyG8-8l&M5mEkC2+?2GOn?2M5Knk@K1dhUy^8G$}}+8t>doDPEGqra_111ZkZRy zXBGhL!~A;VzX=zD4WYF-h>8EWCy?^qx_;GvSzgiGlfEs!>9#x~1c7)uW5^qcNqoF2 z=1D|R6=0X0@e)Lmf!m5J(S=6Itxcg#LL1oh!?VQLIKt>9zUtsnaWs`O@Nz_!QqsVr zsRU5c{1p(sMx}}nQC55A4X=HH4Et0-oBfc;B9?3}be=bNE7}KwMn;Qw2O#8^Nyjt9 zdDT`gl!cY@Ba-Z2*&Kb8Y~T+m-M;&#m% z<}K!=j3MO^N@${nA)FW1!MtKw#+oC4XvvsrwS1nNo$X@AML8^02993z5FZgw*9-nn z1EY}FR7JemIM8HoP6Z%5zv#P|lnjAkWFJSHMd*5}v4}`zDwkw%BU-aY$BHc8Bf@in z%P^FrGnuwyCA4vJ_9ZHrRnHGgIx!~&iryGE5(>n1nd7&Wn(E9a=7f<_q!)@;#uqDL zQ5dvUZOCN&8PwyvVQ{B!$3kVTGe|LHU6N`+h%g%QET8E%-~pIVvz^YOxe=Ai$Hk3oRpCXUCH0QfT$)zV4ygO{~5XfGHoU9+Jqm&xc}+RFR(}UoUm7y8v+cIxxAC5>P_OHGW zT3Pyi`kDCgQhwYkcHcs@-oM1b>uw^X4~_X7HU)m?0Rx<0Yy7$2^`p;ah}BBhFpHeq z*?A=PU~*353W{d(=VW+{eh1Dz)GT;@*UWp~WAPX>93=rcH?lJ^_M$V5#Q_(shfxlc zU=rF#Ox@$?wBB0%Y)tFG_&#A{ON-f2@E#_2clw4^%0^>dfV)>W&swx?xe8nzO}R5N zN@5N#=>woYt>$?2m@hqW^S1v4d58ytovWy)sDzrDFj4mD>Dw41^JY$E7BlIsgr;X( zvlNKUKw%zToM65daN+yU4+U8B8-h}gfjv@7Q}@iI?pyxc{K>1Uy_w|;-_lrcw$<1$ zH*%8x{<{F&-FyWsAi;rhR^}_R5}wi0y(#W>%>jtwp~5&Af*JeFN6yu=6IT&kZzieY z5-P>PHl&2btQb-nDhXHE8T4_tyBK+Ff`>asF%zd+&ypUp#Y5NP*+@%gp6D*&l=zRRDb`ZZH<&%=##L6u#9{bbHO&QsA=$DN4D-j3xxuEuAyVd* zRtNVbi_V>`B;_fi;jmCmD%%5Ofc~myW>b6XRt-RLQ8w-eSc`zW7gwkl<0o00`OUl8 zZzE*Hl^!R{9=I9&V;}H4S(w-i!e}^^9em+~TlzfFncg9ltNP-QK2;v=#^_t7;BaKB{~R&u3E8f+U*!k8qgpt|CpT(^?eO$lv9L@RS5~Qh=AmWpS@b z;uK;>r%IN3@Ef4H{sKhy-(w@$RBFswDRhzzI2dORBgv!d0qQtWbQn?#Dp_h@ng_u( zo$j(06`}Ems});sIl(kYkafpsHsySSVN<~k7JQPnn7BIds3VD;9JUT(qv#57zZIfN zn5*-8QEkNR7=M$TZ8^L`i2^{>l?-Y=Wz7Jg0cAx`ecLC+wNNOPEpN}st#lyK>`MLP zFwmd*6vsBH6Dnh>8v`gVtfap-&1 zW2ckClXs%EJFf=&>J~v9sg4Fh0e+3yy6|^}z7y7rRS-)V(fBy?{P_Z=< zmJ?KWtkBgmOP7`q1_83iQd-LuncZt;pfGe2^F8FK7#)(tdDsnc|0>pK0CflSG$+h6 z$Ivrk5WzL03)o0r4rcQh^zHLicaZ|g8QVABR&*M3KP7!MXbq@KML7lvk<*e<^f$t5 zY{GM+X(E9GB_7+OcK4WKFR3XBIiX=I1NTOi3W9!gkLa-dOFnhA!h?kQ4?8QH+TK}( zE>%;e3iT2j6xiS(F|kpkHD-44KTaU%N!32vQaAKNU%}UdkKiCEUj}Z(f}#nt*VSB+ zVYb;<57QOX=MBT`k~4AavF8|o{7IdvouB-jhfQRAkO?2B!x(RMv(V4S`_urr67HLy z;6W~$T!j05wGYT+ekjEq)tAk4#GnwwbAOl93)=tqY4g9#>DGAg^4^)eAKEf|DfjkS zymUppYt5+f@oo7|rL#!?+`$L3dUvaH()t-2Oka|EJ!x4cHDh&%IBp_K9Gw zg}wU;2KcADGlEjVW0q6Yy5TpW@;o>w@-|k>5M%7);pke(5i`=3iiZ+jf&AV#^nZ40 zSDLo=TL2EE?_K?YN)3Jg@kmoj$41fq&O(iV8tPV%Eq>@`QEeS6CDL;)%e9~0Uz765 zDv9(X^lAog|9ZVVyS;A??ztm3uh^KtQx>TG5=9k^XL*msf;vwZqrR1NR((}H$VyV* zoog^c=9XCPd3mwqVvh9w5BITQqG{WdU``(aN=#wi4d1b7^{_IMY1Ni6_-@ zb&NO|-jCt^zs;k<^d1`qW^_tquc(d!t-{w>Y~MXzE2gaa$sebE5b@-QZ>%H@%FSZ=fdOWw(zkVN+YS+Gn=Mqta!yY3yo7mZOw7 zXn8{Ts_E$X7c}0X9t_#M+H1U-D&;o=5Y|bD@$}!Q32++4Y7MA)jD+v&XwAXOG z7n0OcR@(_VaBEl8_OF1Z6r0BQU}@|=QX@BacJZBsKn(ipgon&AQ1GI+*4>FbITsp+ zoyfnU7m}wuv7yN`%$itlrDlRjkYo3lm`tXVL3~R?r`=^IkC-Vlj8}zcf&Oi~S zzZa;e)}GF8f5If;LFerZ;BRVWq7-5}G6Gxtbff#mC(L*(uLk2xXzH)?f5Z%KULRqI#CYuv3WlA(bhcG5O zLXTIW%G}M=aztHG{xv|;3KYNJCaU={6$m}xa!1-i(HpH_(Sc2LXpelwQpNX#OIMKb zN-n&!5b}X4ATx$PEqxRK11Lrcr{SG)WK>Rcf+trxw=Oh0!qE^)ly%fd>h7i>vOO2p zpp!6}C?FEz);@w8OK>sGD=A^G+LnfxJ&9Db+Fo+4oYLvYjCu0bC35x}zyLQ4)OWl# zY)BASnA%?krLK>%qmpW~386@GVEISTC<5hmumT@K!^z?DdpcR;0^Arsh+QMAiQ2Cu zLYd`ZQ@x@f5W~Jzq8UkI3hF#BA&xjqb%p6tU*Ym4Cu>uV)CAGCS;sOHlCIeJ>!HQ%z@xUli_@Dtn+hQ?Iyd=FNPg^Y8!mtFI7UPam`A4}eQXsMn4 zMD&Uj{vI5fc(w3w0M!3nBYoA*J&EeOGU?0;+D-)r%sXjKN!&yH=~Xc>o9Z>QX4S9V z@@#Sg2G995qQ+2Nyk*XZG&vSR*3E-h5`^0umI(Tb1X?>1?ge`8eA{vY%!`Z?=iY{x zh<@il_!}%Fu#u&vGJl1P6Eutb%5pVF#&cAuONr_d;=4DS6;K2`a}G&LlZsZ!y78hlF63$&|*~e6==T16N71x6B9C1 zM?`xw+U29I+;X|QZSzrj5WA*-++rfJTU>ov)wmU($sodMpDJgTwVAc2so724Ad6G3 zmN~!~DkVezgZbZ_=RYX@KRmGCZQVW?-KD=q>yZrLitQgC7+=lCI-taUh-iMbVOWcV zJu&jy_2ZV5d@Mpykpta&N*O7OH!be;Z;r7zvNnk<@`M8%!#ntqI|y_{hv);nG`2P} z4k=wizh-Wr!kB(TU=fte)$ZfNKE)~0Hk}5TMS>BrT@GAb{y1{6iAZ}8*_cYP#$b&| z2I@8-T&H3SzXX=@HKEvHX)TYyuWYu{HBXsx-@nNIqIW81v0iUlGvP(j(NHGmTz zF*GUn6s+!&0h+S?waMTw{1zQ{oL4Z?#+1;rjI5}@z)O0d|J|gsz^S8aYs~ZQB|}Zl ze#*dCALAb{u?vLC5b=RG)`Dr@I{G%Sl)+8FCiNaz#o*(+GlC;-gm?NoPvvqR%i!OA zR1xe_GDa6B;tWCb=07Mkz%^2tg~BR%gu$c*)CK!s?oPcyC1_DfET`%Ab$R&1`X8)0 zvF7T#9z6Kx=08TF;WZcc&yo5SU^f_`WMX9PGrd3ZTs%ZSm~>7m1uGE4RgEAc1;zX_>%QGoKJFF|3&e z9kR?J)eu?54k&u89!Rl$724@|6G;#NdNeHN7Z@JW$?vSwUiVazHBpK=`|t}smwN>2 zSwR=o!OL?R*F`&wNoW1#(Z~D;&&&NG*&+vH zbmT72HK|NVTcN3u%&9QL3tqH-#bRWbPR%;_8PPtJeXp3XVZ}s)zAStjY+0+zsZlBQ z7N-VV+vgr@KjKihJrO)HE`}cf>W>AN(s3vr{uMZldYw9e)fn$$LsFpX*kY323TTD$ z7LbI6G`mfTDnBX2|KTn+&xF|=;(tdp1l+*v5A*Gz-i|_i#_4*!#cRmQ#pTMuIegkT z%?4P)=eJcn3KntLPO>?E<(F2F7jVh7g(;&K#5t$vC1VQLP2j%7)|wClb{>0j5HNj} z$czg9=m&Hzz0bo11!b;qc=c2`^HyT|Xha zcvNDGFLJH4`;l#$5svc!!ioUtf_-Ud@CwSsrg_@Lk%QUs*p8J5m05=#TfXz;S$N9b zY2_;qn_}lou6?>X_7CjiT3O7YoZbo;PuuRsY9ZUa^PmY_R1nih&0oupSkV;# z*I{E3|~V9jQ@ZqG5w(9qUdLdYY_6}3ocw63PPfR3(1UO z7)C5h(l~Y|8T?=XGN5)ZqM=bJWG}rLA0&t2E~k^Hn-Jn|dKRwx2|wC)w*1O&!S(lg zTjPGwU3d;0do5fli2J9>9(ZH;oKR4(s0T;Uae|McAA~x{mFm=!d6I;d2}YB}Da}Nn zfvJo0vi%cw`WFw z^RtpIG}Ewtu_kAQLC=9v0?w8LAY_K9l9P^| zBrvkI!4wn#2o_+tjUc!Uvft@mB-E^ivZYGI9890(N2Dhr_g5If>B7@b*!-@3^1F;0 zndJG`>OMKB2vJcP9i-%8`bGSTAkFh(b_IH-)5HIsX^6%=_3!=2YWs!|G`2q%PB9;* zmhNQcQ;6y&p70r-e#Q^O>QzfJa~N9(uDLRTE%&c6@VDt`>bI%*rzVRM8mvS+SW zz#&s!n;ley$LOl6ZDMF1<7VT`?j{{NDN;sh)anZ$u4aVL8%zkPuSi2+FC%(sQ(Y^( z)v{~>&W!zu=i_xXyDpmYx~s+@x@0G{hnpZdOUP3W2P)y&y4NS;mj+2sVllqT?zO~HU5 zqt3yERj>2$w^viHFx@`Riyee6(keAm$^nN1jly%#kq)V)6bj07GSls7>QpuP-L7T; zzba#e=|Pv+lk95$pwefyoK^aXG{P^i)*Bm%x6HI3*)!T z=(~jeF0=i-IW-)bltRr9`#c@;%qSRuuVzV|7pxWJum}t1_lkZh1VjaX_@MCW_pN)A zet*SieB0f=?8=*-$M8M;So_YNf%-E8{cVeuj4@7g`&jBmSa;{~)5TJEY)wypy(_y5 z6q_+(r=^M6hSLD@6lo5k)}Yr?2J1T>R=*#Tyb1jJNJ7O)yX)U)wz zALg+8Ss}#_xG=ZB&h#)H z)><%caCG3U$YE|5L$ZcTML!__`7N7KpRoH^AKmjot3eP5G5)7S3ZD&QUS{?yKo~2+ z*b*nwM2aT1^V55b17x}Uh@3t^kIElq&tcb_6;1bafhe@8t|o@?rY;BonzU0Vp=bqV zNSrgx@P4Asf(yOyLNRUwt3HH^1(y!?3EK|up|@(1DC%LNXJxH|;t;cnc1HUswU0|Q zw5*X8ni&cwWW#q7D@%1Ls)?mTZ)=?#DoVrk7(fL7FAa7!Ko(?QMb=J0W7;Agv=hp{ z<5{r9CP!RzYH2;WsM%!FY_k595(%133Z;PG>CHP2lV zl+a+PQI|z{Y5s?%u7Ie*Aug1{DQfz5%$gmbvTpu96BY`dn%r8|P1G8H^YHU#^>pX# z>B%0ydYCG_T%45G z${+fY-Z<+krh5^Q8oCa<4JV!=C~nq{5iKAIGW8hoHb3T;A>PM%ax92O;1^LX-dC-A zdZ=e0Y+O_h@Pq;UKQB+8JfU30g)9VHNux^>HoRH0kWp8C%$F1E%PQM_a`oSmV(uGU z6^Qjj0<)C^q(=2%{I)A3)?P2|5~qTti|N)>3a43?S}03WU$Glm*N^qjzgxlhw+7tn zgb2%HuLisZkiqs8!6cBiadpBqiq_X<@92X73^*!*X~Vqh5$IOF#E~2@kw)M`Sh$?} zhS(XjX))qihduR`+SaOL;^syA|KAAfh4oq=2Ll4CNh^Z@<^d?tOhh#G`Nfgxqo${` zbtC0Xx!*`M2Ip8TJ$doz_6i5bRO0O$Mj9vK?Nzst({K8z~5p{c;A2kwWhAg~v(#Hjx@ zT{om%n%sqjeF73ID$Gn+#u%gX$G~)roYp zy%Fw$SoGz1KAHS{0y}=a1X?>V|I*$J>`E+FRnv8LbATJW?@yLbOcaz?c?%~eBd0_4 z31~ghZXx6$7%OE6rhGi3Kr)_37ZnN#GSzSxAn_8%SoQJsE-F&g1WxK8C@|LN$+E=J zGqQeDpQ!9nQFz^ZN{$`QRJ2zt9f*ab+TIm4nQ2P-8<3r-bS4*a=v5U5s3oQ@j2z5c zo(AL&H~=b53IekSk{Lw9-}7Hg38tNP_0y^Dw}mu+^${`()4BV3CN*aq3@Q<|$QsE> zM=+LCiu`~mr!q>3kEe9PZra9Me9wUPa98^yi3v3a0#HB0=Q#*w`} zZLUHpkm_RGTaQwgZ;}v#?IU;n7`!b3v~sX0$AGYBlvt9%Y5sg4!>)>(Ym6Z`rM&f) z;bd(x0+9t266m<_8r6_rjABUP|N1M`&dWQ_*E4FFrsA0GUHxUlk8w%W?-LGQCa{jB zr+8T(R0}H|sPE|5)X7J@EROw6=ON-1aVsswtt=@| zcBDA%Q$h`c#KD1SV z&Fj0AVIKnY_1ua?fpZ!g%Yf}C;qlo*4#2mc_vcm&=60qe!@=31XkCa8U*p=G3V!-s z^iWU5Y;y;~f{C#X=a*q8Y%Slv@#RYICu5rZN@!H_A+|Xtb$b$R>|dhtXX6v;L--Hz zDoiC!_1&wBrC)Z^FdlYxb+x(fO>bWbJGlc!u`Uc!KH(3@XNvgZ8l#3Jjs$%vo`9+G z3~T_GVf&z60X}y;jUla*gGeaX8VG_*Iw^MN!+wgB;@sNMwM}d-H0Ck^cv=iw3X;p+Fxwgj-;NH^IV_9{M{G6mtw?A~ z++O>cOsxUGK!!6LZuL}r#Fv7^FQPkcJ=Hw^Y0S6@sr{}vWdY#-zmjQGWpl!$#lQk{ zrP(clKm*XKEdnGP$@h-dK{_e6V5)cgOF$#@p|B!a*CmT-lv-l`rGmo}uhw(QHFDG7 zhW;Y$cp%I@9ZGop-5IH?tKI!^iy+Au{+&UcL7@UcQN%b(eSrul|JH%>fh`{iYkJ_~@6lP_a!_N0L$1fxJ6DA1RbM3l&QOWq;wkQS0oENKQMhD%QHu!8D{ zi~xi~C0MT&MNgNCONKP3{7^)kCz_Qyzum;S*8$q=VK!QfkWJW9Buxfd$5*P>ZWsl= zK8v1^LD(9mguW*tB2#)0R*zD{Xwil>1Z!y^iD>pw!CICHT#?~92@jrv3TTh7%3)jL z&4%;9?!5S<0&gXA3m1t&C*W9-jiH4M@c>t7)Ml3jbWER|F3_z*Q#-$zmMgvI;PP0A z9r4zD<>1kbB4#{CAC^`0xP8m^+;z)tGjxXlyISMSpO-uZVwy>|D;=Rf|HfPzuj81L zjeEp@WQ|2?!1FIR%Ce^(%$+^?#qg`_T_-Wa)<0TGq{4zr*3eRaO}+}CVBGH4KLXec z6>kWYj0h2^PcW^CwOa^vBGs0kj5qpczBGcf_Es)pvQfxc0?AFCSvw%1n?%8li$cK? zoy=(90t-x~_F?|Yzbqi#&ylgB=i;D)Ek2b+W8BUR3vv~$M_XW_K!j%TUXV&pL9KF5|Klf5SfF0s+eL?3O;e zF7%r%zvY5+eq@7lzDTa{unfzH2Ro4-yf|~Yy1C%Ay>G1ox3(*6)rU?hea8C_>-YdU z4XiHSIR&S#zbcRA*)=Uw7fY@RHgZRvOsh^gt*+^iwUTliv#!`SA+PU!`E)mN_a7hL zmBr$;R8fi(72yd5;tTo02?22@g3biZbPD=prmT1+(i{gM=^d8)jWZC;7VAanNeV-^ zUwm4ha+r4HPM8*-Z90qo`C_6lJYdz5#e%T8m^bsZo&lze)u;YuMG&Qv^2dOvrv~U2{Bx!Fwp3q@ z(anj=@^4Df8V6eN(~IG#=&2VbHwoJAaXusryu#=WU!JehgzHY>e+3=XSM51ord@_D z^Nn_bf|s{y?#`>Se#bTc4r%W4e60iyl))1bipy!mX>!l{d_E zUE8gdWd|ZKEK(H;+XSo{VvsbF3F-09IH`4Gm?|45?>~Cr)^57-VCdFx|GI-9Z99#5 zhgaRHm*jQjK6&h)-g8wtVt6Cu; z{*-d9Tic%P1J$~&M-1fhI;^$&M&M4M$PTePKSXKdS4BObzHIJl2FyP=I=+DTI?m3Y zS-&lOJNPYb;h&#LJl9QmKP_QGK;-VY7~GvegGom;NjIW^PYgV*j>4MkcUI|HN|aU@ zK*INzKKT;zb^!YJ;znG*tH@i+rQ}FJk9PLpyGX3(ap&-_Uc|B7q`D zXA5Covg$Y_&GQd<0s7xkJL3eNQOETEq3Rr?!w9=}ousjC+ji2}wrwYkJ&ldVwr!hD zW7~EbHa>aZ?>lF$^MC%#TC?Yw+56h}75npZ>IbhXE)DQF)PFG$o~kDAE0BAByq{Qz zl^R3L59H^Tl}XK4F^}@>%XP2cc1y``D{@*#1VxwwsB7BW3Wz1 z;ZrRvLDq<@kZb#&pTk--rui1P#lr+0a1Sf*sx#r4ocy~z%2_TqbWki)heB@~VTEy# zI)02g5x$mbA~a|K8_#~B$dLtk2D^{*{!X&h~MrhfO_c$u4#eS-^Blsvu}GY}edB=8!DeT-{VGEp`r z9|Z1aA=R0EreITXllc1NK_=vu9GnQJM3@%fmFfk{7Kj!$h!*CE)DC8^e@Tu?N=&cc zjVlGV^w@}yPBTQ{d%?RNu*}s@!`@+^u9(IQ6Z#SP-20i_cYhz_Z)`_#m}BvCwmxYk zd#*(2sMc*NKTM$&i!UP5fqch3A8=+E2r_k>) zFG_s;^Pi~@w!^jI|1&!rv0E~huTK3QapmXN?ElxtP>?xvHspJZ?WnEBVp+=+$F-a#5|R`N!Z+&KuaDB z!u?aa;mWUWFkDI^Frj)mskSbG0V1@+461-~bGm#t@(N?>vuB+!+aV5deOnuGLT}^- z{aKcldYO}WN5M!lA{-fmF1+QsV1J!pJw>QhU9}KzV9{6|Doia#=Jpq%J1RzZCmtu; zJtR2XOf))0VVy{FmGI|QLCdTUfgeyzlSi?(aQvtYVGWsPo zBw6F;==B*z_g7u{v75@C)jXlsxz|}@Odn^F+%A)L{BoST}5N#WQC6Re9w zglo$BMEAc{ukHA?590q+t5sKq@(r{5$#@tX_JBtb^wtO*l5Q4LIl{!N4>s`a3DGU2 zs@;}RmR&+w@HfAOMKADd;cn4pPH1UWA3J`_=!r;r|4|Z5HT&A|z^@BF`$gp}Jvrcs z8P_TxGqh-9lxnNocTaz7imvp0RI@wxO<}wS8zJ8fw2iT34d3#jD<^)j5N;rQUn)57 zIo81KLniok{)!D&qaec-b;%k4c!Bo5<{tHrO;CQ2mAf=mR{~u%U}_jYG-~!~LEDfs zmNgDwKfui`Bmz&cWG^;#T82FBo5$TaTI+xS?`NiQVi`84#wM}5C8`VsU-oeSOL@wl zPS2Lz)kiT`yMG*KPp)pq6iXvaQ))*4&18By1R_PS!wg#1Ooi-#^3k~T{UJW20&SKK zl3H6LN*=9tLIl<-#&|c2LwqVl%F7evx4IsFAWm#}6=R}5#_>%zT!B3%(8o4wWVuk{ zH9%@Sq@XsdJy6D`m|SQs;)#00)61$q>2{A@u1j*(RlAOL-AeDPA|VMDQ3PLCTF&g> z?v{t&C37iQz<9SXaiBFH&eDv9KCKq544vLCSQUQ9I|1gyoMdI)_U-u=Ll%CX=-`9Gp2^XSi~=jrTR}&YEd%DR*3MGP6yP^FU3yD6Ztg11Ydo~F z@$Aq~2NXKKM|9Z^rlBxxYSg##4GOO5%Cy8i=CW+L8Ow8SsT;biAN_sJy2Zul3^<*9 zTu!*#<_=KGFjas_99pBxmT~=8FRppM&-ezuKj*%)tERc?=Wa~57G%1t15dlyef;61 z`wHw;4-p_scn&dO<8Sa>!oYPEcB5bxPwtKq50D%O{xGj27QrL)nMUo3Lf4|qHwncX z1255G6Q$&7|n4eU9$O%=UBTa&g zm&3ECbjb~oW1(e7Lmyx|Gz;CfsZq3wG|o3}3!?1H_u92SI_ElPvSt-WVQk*PA5=h} zH|I*$GexkLXQ3zk7n~LXRyCv)iDBx@IzB%lMUCitR?_if;9hrv9M=SqUnCs*=OWqpDh@}#4qA%Y^I;HM7P;>sPslzTS9CnMhI1kazDrwJQ@=($}+gOhGCpT-Yo|j5oMVdx=@iFuMXW*vR<$n^-f*m%$L=;fht;Y#x0) zx-<4z+qS!Mu$`5fb@}Z}=63gEFC(|Y=DtL`i#^PPC8IVID>u%Ov1u9u%X;&T-)3QIudF(Xn0}!91w2%dSkEI@K7BvreoG^wq zMxc)xW+y8#jb}U{hY>@(76~zDC-r7k9IsXwM{gw$UyOg`a#C9sDquOz(1(haVtqq} z%vzc-E0~XENbNRc`XN*r%|oSx%o#lvou!FdQdPh_jOvl5MAXj1>iwcsKrVWjDm8Y( zN{thF5Kk7?BqjlwN}NlYX&fy2&4Cr$u-y|+;~Sc6Qu;8kw18=%6aLwVz_M|0x+f()#54k&@=9==#I@V_WFE`vVdr#1#cU$iSs|m-gah=c_(f0LPh}RP&nKsQY|% zZoAX{v0x$4Rt8`4_OK^vbjk$8WuLuvwCDb@u4RJHlg^7G{YyW$fbLW;HLVvV(6ZQS z+jZ-lwS#uym-ou=19YJfc*yAlyS4po@4OlZeXR2~X-Y0Vox~&BlK`5r6eExyzvE$9)-k%#=g;WWU{oom;v7^+&d)253$) zRW2@M?$_JAYo5F zJbcIsC^~Y;y*CS<-0iY_?B2~?8fMP>H?O)MUny0Gimns2Ue*Ea)b}XBcIM1^I#ih;vTvjK#oUZ0F4I0*vV%3 zdQD>@i~5}pxRVF&QQ!A5PJr zeZ07qa@B`s^8&D!hqcp~r7^hYj%er>UVOUED(`mSYFa&YBF{dM6flV|e9A<9juSFg z&_dBTtQL42+|Biq={*)Ma93EGv@@{ zL7BuN9?n`FO*IR!EbGXFD^*4Q!6DniEUYfi=C&N-g{gqnw%EtI4sSZdqkzQ?&(?4J z6_kUer9@>;(J}=xtYfoU59P`z+(1~(g3q_~eo!MKJZ372Y$g;beMzOLL|(x$sDL0G z7X}PlHOURSbNo~VKx?(noqC6Gq-_-UkrEsxxP4Nyp8n4h3i$f@RM;)%HH#=0axJ`f%FY_$K35w<2oS#{p{)sarp(pf!s1*d2ugMf4)g z6STGp8F}20xjb>%{c1y z4cF8#KDSuHGjAAIt*pt=3Bh198Z9lSvT$B6+H7xW`S1i2pINUvaGu2*4(#?`7b(W9 zWWLa!_>1|4C!ezHAd~XF1XjpsZQ~-y73l=x=3g7w(!y2>4~PlAcS`)`Mo9pO#ZR`u zfXm9dFmHr(nm=WnVC-0dw()wKk`{Ev0wmA1Qgr$$ zFr{#(z#IRF(I+e#%^1=%57+5A_DDGr$w+gYDM~Gn7A{nHuJpArOE5cNgEVpyvf}Zm z(n4S*(axZ7k;x}3N7|GW4J`tp*G7w4Ha}JLU}rh&%=k$q8E0eQTX>cw@R|+D9+i-k zM6mIxso{)&JT_7{R7$0koPI4t{eFErsi+P+D$|@*Q$=irdn<7yW6JV5;Ug2 z+8e-Og&NcyGU69)2&(*82*I9;Gh6l$9=-Ec-l;7ao~C?N(eyLW5MKu|3oh2X-m}%jms1xPVM2}iusKLK#wk7x;N?~5$K6&Fz(p$@(w`uX+9{_|4Uzo9 zjgXBo$?JH=1ZvO!1aN4(nsM}w_gh3XgTQ8rVq>GdvT#EmzEN>Y#a|4XsV)Dqo5a8T zftv9B`gu;c<2H-X2GJG>8~;PVC%Qwc=zHcPQWqnTwNPId+zy!^}ZD99lg$3L&-ihW#;4-@{YsS zu6Dr-9%>D^zJ`Fx* zd8;1ftLGmS#_J@LXZmKruQcSG{Zr=QvRRaWEIAGfv{;K}Iy~?!8y6U_xR_=lc*o|? z`U@H5{1v0%;u>)c)c9eEw(Ef#hVp3$Jh{K3ZWxDyjmjL@_hc*pO~Ky7`!eNEIRn`( zJz5-v_omQ`qfd1`t6BVn96lrMo)hFP>%-MPhK}}M*F4J zlEQ1oUoYcPuk=u%*ySsKohzu+-qdpPuc>Y9kthGt#D>fCp>gjP;`F(jn_s5@eoaSj zhw;PP?cH&hC>Orq6<6uUy2oIn>U3#cPB)_~uB}_6Kn>H@#^u!$mZM`0FGmw~NHkLu zUQP}HIZ_~Ftp`_T*X8x?%XG28Kdhtom(3&p-nC7lr>9W|w=P{rq}KEOql)#bj!|F> z8q%y7odIhFA-9Kq5=wXhp*JV`53PW9wFKNVs4o4OnT`3>lM~mQGp&`+yVOE1(F<1Q zrLhOsadPh_;b$84o)kgr=FPCH?@f={(zvSd^|t#UeUqjo)TtOOTaT2aa~k6|c~Okn zKcWM8C8F1h<;|bn7<4v)MkEQUMn!$3nTnR|=ONWy! z5JsR&^;HA8HS?PLC7E2Y%1bNV}ff@-fYgS7G)w9P0qu>DH;51#e0`*FGsC=Mb@CIUoNHq zszpi{?;|p>akiOW(WY;dAPD65pe5x+sFrt{rZFgMDdpZ_o)O~x3|O-(Q6@4{qv$Lz z_|Z5y&{$#OAf?yet*y>58dFF|+F-_#kz}%t;I}=E6J^++mKMy9SGT6L1$5{M^+%b zm}Rwu%9k=Xu&1mGB?PL;%tmao4v`DZGr(no3;3nsekPw`!C6~xB>?}M6f|zLsDz&V zit*)ZzwcnPE^rcBSbpqi;QsBN`Qc1(0USVo7y^&YvPLoZbVw+otqc+= z(h+Idmoj$yB>xd3CbqTnur7k)#7v{x0EulsyL*^gG)G33hs`EIXLg~DfWv}NCfAQp zON3I;M^3TC>d2Pt99JNteY*@ko+sZ?g@dl4;7rA;(I7UD;9PVojjL`T^TckOz{S{+ zfBkA*R0tefYFdugZgu+sUT-hY)=-EC6L!AQa|nt^Zy(i#jzHMoe?M-WQuqgN`HptO zXAnFL^}qA~Gh%se8Zc0;b?1V?J*cjIGWeKt-$t9pd5GVYhPifJfmw^L%~Nap4im`^D(qoURRvICOkXp9LPl zeYe=vB77N~ag&`1ZbK7`nG^mb0^8S30yz#Y4_e&??ZY8Q8ZSQt&y9z)BS2cH;gfRW z>ni+$XI8cy;tznbUhv`*+#%Bdd@c*T11Y~^J?wEiuogW&mE0Y6_e+OR#>q4BB=yC? zUn3Oll4zARdLpr>=dBkgDZ)r$O~u8#IYR;uw@CIN5go?Ppl!a9n)XK{rsrHP!2T(o z-H}eskyJ*_k(Ga8kE;xp)QTL(Mo=qLkp}dbLqn!+0ADA;``W6ATNJzjS{>Qcao_ZR zJFJ_UDNbF!Tbz5DM#U=9Pb|Y3o+3snMuJ*}%aA z#OUhC_Z#WB2l1i9QR?X{x6o6?e^gkM3c{|%qOnRIPniRFhjMadYMTutbS^!PH(x%Uzjon2 zdU|P$ghsMH1#KJV1gCvJn_fC6O>e!gd`n&+=fBW1miBw3r(?xCDwWgT+W2&YaAyQ3 zfnKhe8W&FvYZsrD?Zd-=zRRW7-VrC`#-Ejn>yxc_`=YrYAZY(T2$6JV;U}1SOb^Nc zlxwwu{W4p1FwoN#T25{5>p;}v%fk-+`O7LQO00tXFtz*bX;Znu6srWS948)O5{nP@ zdUw-hdQT9 zgk0Ajxp%g>;7R#G`KdiFR&$=QqOQ3iD#cWlcrPYyn1UZ#Oe-R&O-5*rBjBnFQis39 zUnVQ>)(hOkM|VVWBT6S}EqP5Rs)8oaGZng$09!PZ zB7yPe1iIeOy?3T-L*R+OuEo|Uo7w=kK)u2K_GYu)pl^-Vd;+B`1h(6Lo2u%t{t^e($L?C$^e*{nb0X>P0ee49 zQ}R=e&e$b?HW)^qrsWc1ncPwh#uehN5S;h7`IMA!{V{~w__De-AOkT&8T4Q)wz6K; z2e&4qjhfGqoY$A#FKjHUH6-MAqDSHR2g8=xPRNO-e6#uVXI4FhY{fZ0#K9uu8Jf=x z?*1||9;+}ys3dA%JgH#-cyxjZW)SGU3#4?-Clp^^x0RGi!-~lvHosIvJf!k`c5+&- zTl6xcvZ+-9%WvEY0JG5^B|)CJEf`+w&-o^zbnRSc0vm!s_sCaXP8A@7=G|8KEpp4JqMlI-q3ZSd639x1c*;!%;D>Zx6nIz_XZ zjJNked#+~oy?ECJ*L>l8j;8QLw(_2AdH%)kDmhyQpya)wob8b>@Vv6jy?DV65kIg) zYUQ`v40R#k(5-iNsqejCOMtQ}Hlb16_{qh6%4_K0qw*tl@}^0(VWof4TF>xDqYw{x5hDfK?%xXH3=mSkWyzI$(%?U1N!W-?R~r zSIVgmR0q=PoB-;XP@S2wxnbYFyQMw^e4BKq`F;V~W$fFnIm?y7bMG$Nwh8=r{coG{ z;!DKvHSqijCWofqD_u#?&bCFnLwiYTC8kE#g#qtOpp~%-DEQp^g#hJDrh?gQ65WR^ zgo5hfZh4f&Hn@4@-j$)J(deb#>xj9(dIFreZ?LDT&#OH=m@Ucq=T4PWJ~?uIe)l&~ zUhKVkd9SS?y!7{X{caF5-OG4ul{fj=u~y%UN&LD@xQu!H|&1p zC*XSW2xBPi;eYi$Nkhb{s{mFxtlXO|7aRdY-*BnvRKF7bPJ1_!y%s*_#)^-GzrfC_ zvpTTBaS~^(u1jQ_i>U8eDr}>>m0A%BXVu@ADSG>9a#qT@h){#r^hoLu_2}x^qzLUu zIlg-Eh$BVvlprD?0qdnilY~f9B(UTjSm_dvscyNY zPk|wgp2}6&NFtszrErt?dWH&f44|&D3l}RX%}szGOIK1=7(lt3SHu}JG3RO?8rg$T zr941=Rw)iQ3B;6nte4B-r*H>(%Ci1B@zXqLs9-`Y65r?SU2Il};7enw>OyOI+H zr4uWfx+YU<-EaS$2~l)8>h`AKTIy8i7AG+0U8B zlv=BxX|`g1ni?;LRP)C&z8y6+)*fEEP(Qeh8nPcbA`Pr++95vNwRldKA5^8SBO*#xXVs8K>ZaiJxN z*o6K$v%Al-7K-%scR@Eip~FPTi8BmEV-tuA-=ky*H6;^9nHI0ZFWs8d9L}4|z9AW0h1D-_ap->bnh7&JIxBcEE|+>PTi! zPk7~|{`ty$a=G~CjcOQWu6NCtNz^0m>C2|gBysm(Q*WSNxaiDptD}J0lgo0Z5AE3{ zx3B#A@idBdQ~21~($(j{l=Js_I0fzbG}Ap!lAz7{$@8X1zH`6zvgOG3JE`YO&Z?Iw}FR#4j*g4I|fx`*B;_&Qi+dng|&Oa>`L=xvwY z>+ftU?63Q0E}i3*7qt9|Zt1O;Zr+AP>1VHljPjgbsfHVzz=OYsvR#d;ih^@>Gcp$E z(!Kp245gzjm2+y~qMv~T6>k*Am96BE9{$O>LasRiA`JejT zkNjk|;t6n;;6VfvV!}1^7baR{Uo%hRtVQx*6|?DDm0wBvibNDdewjW_Z4-TFlH4 zou~@Tlu}jnYZqga;^MWcX(6i|TQA1FeWaQa$SMWG7)5pBu^ zsv(OlP??WuXUJ0P7r|#9Vj4VQl)^M?s)Y;5_Dy`39@4BUNxF1p{#FS-pdabjLxLDW zLWeap;)Y-zuOEV}B%8vP0}alzklVUg8pZ)fh0L^pE%H zGW#Hs1E)BznmLET{IEPYGexUm>sPJD6Jz0N+5mvmFCVu{S2um`z5ln~_GZJJ%T2S& z&skHDJkw8AtyNkXL;qLmu%Vacg8JO%^8xK|-(@v5bLD6D&W(Q+4~7HhOnE}(y_D+( ze>2HzIfZOI{5N-7K~#>meede!-MtdGU#GY(Y1;$V!uSj+K4xp52o?aDp2gQV^Yo@I zD&V1K{j!sDb!jpUVS5oO`@NQ%pe~rIn$l@e^GSQ!MG|2)UYxLJv#J2P&#JrP&dJi=m6%-2O+9EI#5@vQT~ z)bTP87<2B{ox>D1=7;c9uX%{p8GXIqHhyc)i)=-$3MY#7D2lP3nb| z#V}KTpEhINF9|f^07MaPpTO})h%;AeFpL-kbrH1U(SoDT;t_M9eIEv@!N1UYtk`3b zme+6Naqdh@lOz4TEkR!yiMQL2WgE@%$M%=M9F;ab!`_-7PCBd|ybrE6yno7?BWM!X zbi|yGI6j>$xgly_Uji>{_7bX5v_97z`1T?#GXUVy(**8=oh} zst&CnFvfOa-k0MgSIVlN=KEWN-N#5h;}s&CN^!>TICBt1;4m@TF*yD@%0By{nC%r^ zbe-_eQ%ip4-JKI>ls*$C*%*9vRVv`$uv4w!3~L+r!a7S3^MJ!dPHp9-%N<2spJ*5k z8Doc$=1+$|Wl-{OyzC!`Rvn+5P_M5|3nbm!NX4G8{cNXeZ5wdQ8KT>Zietkotq$1+v7|zJ+=~lPT1d(I ztF80$@O_Lb{Q#pxNIXw^Xp`XSS(>X}w{~RCsY?3rWXq|8Jytlqfl#icWxNY-(90Z- z#tX291I9P*Su*Yh`OxkL`JBWjdR#%mLx!gG$Wlm*Ky~#Kaa-+&#v3O6%3N={gJ3x- z82gI9-5)pX8>x7JzUe{Bd3GDoJh{dcbxu)8d!<oCG63V%@(SP=nt3@|W=whx1OJYxJ;@_MZ`ajOC7bQuaR$ zmz}6k3t-w@V?qQjdV)-X9(5pMFlyB5)3d+}Wb|8qF~Tbnh&YIo?|iPAF#t_ov?l@= zm_Dn+=ngOD@&{y&IV8eZFHyW?wq8$)kvkNgI|P#?Jn7h@3M<5*iJnqqF}WCP%kO@(iTpnyB^%Jl z_{d?CtK&B41pBBiW9=$|@;-0>uk^pTg0Ip?hP7cAqQ8?xJvD2ZZE3J)qV+_=TeWW&sPOL z2}h3jZ#x8{heg9shDfFL{Yqku7&WK{6Fgy+QY1+Q4+|U?nUO;MlSrK)>$yP^7Dz6K z`b#O+G@8V$Nu!$APd8QH&1MOxB(~ruO%e~x8WDcMb$Ug^b-M`ON91;7MlGUpTrd4x z&o)2QS&yq|ncgh?+~p)X%f6yjbGKh?2DPzOXK%N8pryVzz;}{y%9091oAN1*81k`FzHP`K4&72ktfoCq_gZB;mb$!-F~l7#bEc~IUE z;zL1teB=7Y99Xc4LDr>4lSg8nCjCX9lkW%K6vt!XAADN?Q*x*Q9Y?trj9aJ=T3|oW zf)0d1gDH`l6UViS!w*T2p5Ts)kRzoJF!_-+{}_~iCH_Fq|EJyALTVzUv!CE_Leriw ziS1N;HAAlOQKs37*iH`kl_>&s`mCr`EE>`jj+Jr^u58 zaay}<4w@J8Gnh3*J!;-UMS{Hqz0r(H4!~ivxq$y%Oun4ox<3Ne@h?XG^Ve@aR;iYf zz^CPw*I@N72~Z*C4oUunT8(BBFSgKjde7HVUH4(fQ=VUm*;mf)J`KL@hj5*~CJV~= z*t$W=V`vvlec587Ef-0%UrDpqML4LYM`iPLjvT>lhhVLEo-x7=6O+Jsgasoi!wo$x8g)!-tt}! z_F(_jpXHhLc8~W>-!by0)mb-PFD_@BXQlm=Sl|`HjTsmgI&^cm>L#eyzA*MMLh>=1 z_G=7lU)iq_Y<=IUygZ8nd$1ob+wfI;?_z3Du2QRRne1*Mw(Ja{JX@##A&rg6zl*wo z(9xRe@3+YVqKogJvqBZBQ8@XbRJC0Q1slyW$lkAf{ez!dt8~j@kSWGy$DTXu-1U&o zC7Z<4^#I6EK2DySS@A1&eUdHy?g+PW0zZpy>8Yyt_YowYFWXe0*6(Kq$io+n@ZUVN zbq6#SWWQs&0`dEZrkqOq=1s-_D8ZDh<|=HYMHs*v#kS(V3E*e<|`poT`x=KeXlmYR_8Ndw|Nb{+4TP<)FV-9JmJb7U$15 z&b~Sj$M`(=3ZZ(7L}=ay2rV{$A_pr3+O!B-o2c`xtp|E)EGzdb$4q$Jd;A>t(N(=N z))c(uNBGOnme(=mr`@Z(pWbiY%{sCeI?DLl6f%r*)~`CP)pOBWmP*v$jcvX4H8?zg zUo9N%4N<-paQRV9a{eB_Z{kZStQES0fPHhcI*4-HZW#GFiuh0_ioPb|ILf;Th6O19|P zqFR08VhGWP3sVO3(FIcf~&i2JxS^!M?ZWMW4%JML%jIQU*8c4nl((m(oYIeXX{J7_1c%RrE!E# zOpSVL#DSr!jnQ=vwz(}ldJ;g4jj&4A)N8|DM+2!mA+n&o7>FQ7q7x#YKMhXFT8>&M z2|I&^?Ykc7^o1}v=91)$Kr8U=5HcD051xaG!BOLG$gL%AFn`4ck&a0#w?9rPG; z=E$WcDqb17O>kEAbz$&b{PX3}5%@-S^4GpMtWu}tose;B@zKY7-%*rEu>1T`NhgJX z5bgxwSAt;wE1!m;*n4Tg?bza=^&^%q-=uKP1G(9t(4|7{4>-t&BT`o0Is*Hycdmb03f( zmF8?#SH6F=tY98SEi*l>S1=lKEd#)(Qc|N!Z*MQtA16^3XTT_$x&WPfwq3Gy*iM$CbQ){BGt%Y7@P`15+ZdS*dZYg@T6nsgC4GD_X? zYl~f8DENqtR7SiL`T?RLvl22The85&jia(MvoSRIfU;Jt<1b+mA#=pYoSfgwToEK4 ziGdmHN-li;J%9uS3<|Tv19=8%g2Nbj0IP}|3)&dshx~8%iMscLn4&)9ZxXRXto@Gx zX)wa%wW+FR&{5^P(^Z~J0 zS{iZkpkL6>IBSRTunjaMiI%qycQ7DgXyD0KNBUNg@b0QP?FINVaO)2Y9f2Thif3kG zxTR%zlMdeRuEHc!3p8Nm<>gQYDm)pf7ohjW@P55>Ozt@r0<`FKkC*CyETHs(5eR2|+)6 zI@Vb9&>#2z@Vu*(G;PE)yI<{KSae?3#5(+ru#hJ+K@+;A3<*?fh~1N25LzHn@vpLR=la@(UI) zF&234379f$4P}DUh!KsfW+{%P+)cErgoVJokF`vp6|+C@AdvS%IpM|I>Z@+{adI{w zH_b{8TV(L^(Q$gd&l3LZ?pmi$`DosJcGLY(nSOKd?_KpPbC1$iXR;`L&3(fG3cRYc zZz&oUI}d;G>DFixX?B{puc$xx>l36f_IBOA0h)2ox5H;wH%C2FNfnvnSuhd_%qD>& z=|pU2j=Gl>Pp>S9Lil#-j&Wfmj!$oI8kU^SF8poTk1=n(;;^9+$TPT6sW$hcmEJP< zqfzS^FRF{r-*mOTPC7i1*Fx6b`U|Q`?ljx#Lol&a|Ieb7?Y7$t3JU-E^ zfHw5A|9im7+J51FJx`j@7$R`Z4QzakahxdV-dE+VDv>sH{X@TE>=HlTVmF}N|JE>= zCnx&F3!C&fy1Nr@%S739_e|#AvUau`)K-*qoz|?n)^~n-rlx9@nZ{{&JzuT7V<#aB zn>X61%3rM?x^tvYJGfNid&m3)90t6etEoOK<@~!N5!iK#9%*%!#P0f{Mi7e!B+R04h#%=9``hBqA5mBF`2= zEP+H~fR@@YR1n8xto(^f29sd>uvtN&i{MDSmAe?VbPXo&JeR^L40iREKq>=?;j)6) znloiP6M;rqr7L8ht(33?*Cyk!3>jzsGAYL7RW_#1BK`aoxdc%^Z`Y(s{whvs>?Lhi zS-1(P@cu}-l-+cViqIvURwWK=ZpFTVOpxWe#XKbLvc zI^`;>D}?K8g~9JI-yby87dhjpZNQR{npHl1TTa_ZmtJJYI< z2gClBDFMbPLNLa^#UFN$Ybsj9f-~koz?gk1u4&;jam#Q zmn%)`bQb0hO}jIqBTuW!95|8Jd#9rM+YA;5Ux!mrr1g|s(L=S=99yG1CFa-3G;;k@ zbE)PJ?SA}z!9IK)U3?(vG!%cZVCHViDd=!n$N-`u&&Ej#OT!AjT-?pgzihR@JcRk$ zf?W@Ace(4^u-XuRCfq*Ol;s5vdM=I*;UeBT<}nkY9|k$olpXz~m{LpVU7xZVoO0^H zmJ*%DJOx{p};#p4iO(TfQt_ID#y+{)}^ueDH=J zNIj6hVH5#xz};a)85w^Pba}hsI?cT49ZsDf`Q@MLqi&ahOS<@7@96`R7E5-yjR!+7 zGz%lj{*3)<2``6F4f(ajI`@G?hDvO$rXoIF<0zqtOY%!h=@^Fpek%NkENmx6Rvb+E zRitjL@$Ljl&gS%Zr5>E>46N=F%XZ|xVFmby3{Bc;l#c25DLL(9j(8%}Pj~OX)ZX;F zKDp#%RdT%XBd$Xec4t!bPQ~P;+YE;cVp^D7LfFx=`{{ty7zsS~_h?x_lZON&i%zwP znYnY#yHi*>=v{D2j-t!ULLd^Ph^02*x_~`jxYeKEAocX5i7`ce%RX$bw86T}7KI zyW|0^4Vzh?zqRL7*NjpsQf==X%4X`fsIu6^uhwov5HOXkMv?jZ0%fV60cZ>oA%2x9 zi32@9?(*Uz^)ro)!)mQHJk0@@ElFFc<7JOsg5O7yKD5~Kopm~ks$tqMzXfA#hcm^h zz9J=eqA&Hc>BQRqkT%ZrgE6o{{e=l{%p6!=)oook- zSFAh^EatC{4)7_Iq0ll)`RZ9>hX)`Bxcqf;QX2@Z#w%kj2SG#zMG3e*Av zg{V2!!NO$&u_p`fOOt`SmNE!prG$@WDUUFlqg34EnB6YYLhsY)#NIFj?%X}<2M~3^ks7Fj8Pg)=7 zz|8gR{?$*ba6;lRi?Ogy zW`Pis#s)5F%>h5G1@TRH$c;Q!3XYsUufRbW%X-*s!YZyK*Ta;zv1Wr7JbD+ngI}gv zN%GG+4Z`sy{4C?))PbrU*l1?9V=J3S+8QQT$niEpN8U9-BP{LasmtY>mSz3h9%gIp ztLfPu!!sp|zZv>D7@6PKG#2%P)?n33HVQk~PK-10793n$9Z_V|*iTC6{$OH27E_O( zb2fXtvn{x;Vis$3w1|eZi}A*>z*~M>iXq)%g4Q*5)do&k>4=8)Jj<8p1Px9c#bai3nZgK4O9SD z{Ge|=%lB$#KI1_Rlm?K5Ks+HNl#G~AqrJ0TryZxy?RdGu|4y@+)_2ZzbVhuuo^ot? zqtik$#G|SGDzuweMdh^3+4p*FBe?~lRi`~vQ2=<#?{K(qA6|pV2c*U2|MVLExYyAJ zZ8<)}J&3O2eK6nUq}Y7hVZPjWU1o^>2-hiT;WE%|G#L&Q_5%DQzdT7$-~NRgFZA)P zu}9cen)?B^x8fq;Fnz2#AANrxr>GwUyAi@4OcS(Z=#%U|{qX+PTYnxg5fvqp#j+JA z%vG{VT=#NUpCY@<`QnuN=wpXzhlxKo{=rjLM`&l!Qx)mKkNM|OrTr?fTiLUjdR5+| zz4>L_>R1!lE^NFK#F^ovkcjuL>2nSEpW4_}$-;;o=<#*}_@ z@p=%KseMx+k6?kpM<52?w}(F92`srE_T8{wo28YNdh2c$wJ2B&YJGE76!{3qiFO895mbH z2dp!^&_!7kYS9k;Oim2FQ65**rQ%@IFBHEHaxO2;DxH)g?GzEBsjf#=#cQEaYK1Bn zt+L0#cqLs)!HY>gfZddYrVJJ-!*jYue242JNkt_=-^S!lE$(16%>Nc~c{MJ@xy zO;@AmkfbPUkb+BLF0a1;Kdt}Cf~tul?h4rdVajkScNon&fcXXbA8~Q(+t{=89d$aT zXCOyO`?;RAZcm#244UsF4wFT}h*e2A60i3yFUE?8p) zG0diRHsU-lp4|1gnh4P3m|^JcRdP=v#kH-Wpd6>g`1$tPrF54OxcgRk&hhI$2cQ8F zB&TPYhpKmggGD@E2xT6ICI-f1e}-TY^v8xWZ7FnR{AN6e{j48FCs{!{;h=)t;#r`E zoIXfcWP@F23o=BCF_uluub5dE!_n2Zxw8Fy%8gaNOal!9w_m=HMkZnsuBuFb#Eq&S7*e^3BD3&k$1!d@Kf3A!lG5@(XGkvjcQ=Yuo=N zZX4bggpMUWv{~|-N`Km-*6drPlqy=^*fNK`Bxzn*SNsiNC z0lE^x;=ER1A<%-X3lG+k8`CEm&TDcEPQmn3_XwtFm5@+Y5GN>=J8zuabr$WE7sCbR z)bwj#a8&WaaVbyvEHa8tYI)5$aglyaX!gv)bo$9q%vzb-q;_Gj(E!-@HnEZt&q7&Q zd^?kI`N}ejvGjs4ODNggqRyOzr-NZ22aeh?>UxGPheWXb^ctVaz4K2tu}|4Ry_s9` zeGpDp#}zl>xn<)1Z7Wh~3kA2cxi=waJw#sValh0H5yjQYm}9I%Vw5*Nq0*zbbh%dW3Y3=);rFMNjleHKmq z+*SGRg?3*+9NQA?{xk8*qEhGSdyY;<+%|2Gkf*d*$DWc!R&nE#h1aEKNrQo@Pjf^8)+qj!Jtzx_2R@zOIeKFq`G=8ML;^8i08FybB(M zH>#R_!%TFKBBd5su!zn;pe zPk0ruyMs6Ln2m=h&0oF7y=vMX-MnP=*+1ck8N9c=9F4Zf3>Xq_51!0pF3)>l+~+?# z0?tzsnc4<6dsyhvqe2Rpqk_MR`ZApKL8$=QAzmEARM=sqH3W;zO%h!H4QJVxcM|cC zM7gH-xQiqMdAoW#;$xN#lO8T&O;}{Ymy*&vK}USdzca7QOr#vm)G<4H(n35r7OP9) zZW2e!^c;5N2L;B(Qb4=%!CY*tlhC>aABqGi2$npqg!vjIYPC6Ykdb(YSBd!Vf1E{E z&-9cGe^!!1>;|!jyV~}?YjCK=Ox6u14#&51JSk!R8yF*gHqMj4rN*sBXTJPHYNlqZ z*ue^f{5Pd-v6;)Xxk(7|_R~9BRIjwBcK_alZVpS1h@lh?P(3gf(F%0K^cad13soA; ztj3+J3`@ZzQ^6z#Ms(Zkq3j|K0FRTH+0VvTE%_T}0FBidGE+iFAG#otUCDwgI8qA< zyM|J!i_-1ahN|^6X_k zC><>U*JD!F&-;tGXfM|7UlegIH}0WUh-83?$+;AvD`4?w;C3-^77ob(uzP3jV$9JX z$&%l3;zzk0^nY54q(CU;hKj+Ga|ML-@L)z2Lx|XQ!fpJ$PBGe7RToA8+op)7 zxt)u1tMR{DyY0X0>{b&10IR?ZM8CuCy6!2WzvUAfPUBv0kJuxsso(hNkE7Syx((;% zs>>h z<;$oVn-cxg5r!7_3NK|W$?As}^=|PNSYQ_p(_`T&Tc1A|&w~k6i#^26;ZG%DnvPV$ z26J?kgw(?UU4I@KF5c#9*RG$II=`3lmnqLlE~0hiN$k$S{h*|j)*b>h6U_2&DrZeC zX8$fQV-2Iz?rAdtIz()M`Cp?%AM6ab$p5`){GOXZ0#yCLLQH3N;lADJJqKc3(MI|R_J6Y?zc)Kgpfa?KL3scj zsGs@91L{ErSUR$8Jr5NrYsu>Am!$U z_AS_8wL3`V9p%H4| zBzAe(WL3w^-g`O!^8`V(m6UY-fdl0N;ebvmZ3Q}~j@*>?GED_4I?L=-rz2@5%udbh zEmw85fiFo$?QitKKRBUFr~zG;9vFH@{!O{;W_jmXilt?`e@POuQxt1APM&}=Gz@1U4)DzK`aCAwi~I@OVHoHZ%twD31Fy-EOlGpU7Ak#0aRSx&Eemx-k)ov@%8XPT4>ypAm7Ne*yw zMlH+dhPn?d!^8yumkyG1uW@Bb2vxG7`Zs~K)Q@R)ac$$gzEt(kBGPs)B}ZhXXaPV z(Amq-=_yP)7Uny(-U_d!3zbl!NLlqU zLFjmQL5gM=h$&=nlPy`taI$EZmJW;6v4aJ6@_A*RhWOxu$)<<>z>F`+Rs$a3fZEel zb(%vvv)xhPA8M!c5nDl(yGoCF&A}A{$2JZ{p0iq-0h>b3zw6Da$e1` zsH2PuyxI*`aSHLHisxt7=(CeGk8u>*YHZ=%m`OFI0adX=7OB*6td0?m zL60-ut0h2{-Dg?`2@ldCaizL#)Rf$=5PfEiPBRKNT3}e8diaFu=f&H?wipLhJ$KhH zZWCZwJj!05xy8g1Z?nX8R=HnbC_gr^&UDMlX-+X@IFgQ&BcvQ-MFd{m0e!lxTt$;? zn|T0FGM{On&V*mh`KcSipMFBgB+5iXK0`N~G8-Toaym=2kCpg+iC2q&lrE}79{0B) ze`34KWLST~Tf!PTHtY1-57%Td@iBtoqVzP67mt5JEccl_-OA=$>`?qY)n(cuGehY; z^JV3a9d#8zET#kL^IFXf-i^)8`#VO1v5Z?X^9Ok!aY*-78j9T?H7%OwRY*MUF7MeZ z%140FN;9V9YLx{vLMVvz+7&uhz{I3(;!lB^0>mS_8hUW%&n8+_5=T%04^Z>=7^&m# zDP^2Ai08U_I@3`HadptUL?$6nt3cV$Nuyo@#T^kI(jH^+=pXx9z}d|HA%xGaus!wy z`Lb<~&xUGJJOt(05?2o{(RAk?Ldr+=ehh%-9}}WOKZTy>@;$z`AYIDB zx29+$>?p=bT-<9{RVDG*5h~x2y0${wo~=U#am#%TIzhA5U>$NeVA4?Z{MU$5<_ZAZ zA8JUQ{>=qdzh=`DJ<91m;+!J!r{yF^5D5~_FvNsJM{P1%O8XCkM=T+bS-;qzDQkZP z^>*1d4>paVVcC=DV^~|p2xDCC4{w@@m|TnAtGttr$QDOuNwLN6Vo*J_ynLu@M(h4u z6;?`k1YDGxb4w_FLc&9!-y@cd3p;=xz2W}nE#nsVA^A|ZXT(s_YOvT{)#VxsUlG2@ z@az7?59EW3BqPSWxsL?vZltw`-L-mx$Y8!AfiD!!r&({DjTjVt(CH zePeoGt|5)>4dcRTtJ1T{D{}^`nX>g_s(4BA$x+vc#kPRS9RZe z#*RiV%YYyJA-}w$;(qs|eqDdvUzEMqJxC`aEi3888_o9OI86$@W*7Jq?feY@12vnh z^3)X{s==Dq9L!|6!(FN^UF!xgzpTs*_+m(w4EP-RMUJxX&|-Z2mzuSuYy3^kLIGS= zbX1AtAgG$rw;h&71zptkr)OBK4YA;SQxZcd)yON?oK~;;$Z7eN=xNi>E!I_r8fCU` zF1^2Wncoj@C5}TQGEVS|Q)xVpX^+Dg$knT=yxf_4e(DwYlWNpj1E%5mHSq`G6B5p8 zfPYI!sj{EdX`{ESZ_-wI;emTC>;i&M;LPvMoT}~NKb3G(U#_0iKN=Ac6MC+#R3k(v z!?5jG5oB}zL9~+AcbsK!^j>EAaW50F+x_a&Z zI5%eAX(aks1liXbHndI&Qt{QO-YD8zRp@sP+D!8#O$GXd0!(|R|K#gbhA!XOX}$X9 z{hi=pYpdc|(JFNe3jo)TTsJxN!(vWfrB<_uGg>pbG*V3E>@s8jwE zTLu!2X6&pCw^0+0=02U7ZHHvRGxSz(OZ8NY%fM$7g$DdWu?Gzw)mmMay)6dIe z?|arA1%CDsMM+k`H=0J@AxluOsL~YrD?@NovdhYQ^-=3|sLC}vdA9CLYa%bG*k{-n z^T~PvM)>HGZCLlp@HOEaJ-e=Xqi6ST+PBr?yc&0GA4UI_K+lN?93u_)q@2kba8Eei zR%sj;u(&Uijbe#QQi?xpdOy}U&sUZ!#r}jqtBbd@ zH4)5Lx~!@Y8482#q5o68a+aNa`I@e3uHTVRagT2a-_X#mo-f`oL1OrE^+m zr=tp~(@ui{)woh3+N2ys4pXs`6&uZ!2%VSotPXx0Q^XtER^Aesm6eO4&y;G*EJuC; zK$3ZX#8rTm8@^Rl*8ou@Ba#1FwISwHo}(#Va$UUa5@=s}zMcl>jb8GHB zE?%bdpLMFi{pmI3X$2c%#ql7k_A|nBdav);?&q2sMsCyxhHRri|*El~ah!eTxKqdQVyD*Cr zw+3t((eI&g;vuvv`Pi=9&Y*_Kaqi=_u=>+xU-KRif484_6txdJ&Z-2h;Towacv%&* zJ~q?myib#=n^Le#Z#2g#Sm9!hYM5xy&Wlq+;*Cj99b{3FAhEn+2zpdx5WTGco=7d9 zJQ#*A4q~{|aePZ+$Fn-~ZkQC1KH&NiIzN_A9@}U?DA`{OCBcL_Vc(&dz#IFWngZ(1 zAsF6Q;Rp27f1KZM{=PE$>e+>d8oJBx#I9j@U7PAuvnUz=*jhDM;z;}4Bk3&CRq-Z| zz;V;E9cr7*jBj=;UJp}c32;&Z%$R8I90?xt^UuV3!SvHW4mqr4lo1?9*K_qX$k86zp=|LU42;R4VM3q5qTyv z01lv70Qj+ZGiw&dx2tAw4UOg@0?B@#eX|4ifZ3phz*qc4r{Slg^BmU&j z<~RHg{r7{Y!I&$Bb9iQ`rLJ@zZ8N>BFwtm=zcX@1n1abdJ_9ZS2=6R-(7p39yE+_s z!K&JePY2r9`SP-?LlXovufg)%xLYxR-u#Jn025c!aLg{_%%|FIU7UXp&&an^r9HOi zd8>t%+Gy)ntBBVz+N))=87Rf+c>VW0Ho^+qnqi5GJ0Q%Cat2l0-AX)oXM$hk5tGmA^|>} z!s61Fti9v_>ZO|YY5j#+9v?}OVv6vE9gJ*!D#HC_;%F>#YVg?Jp?0 z8CN~@HiZIRDg0Gs&AV*4#=KG*+*g~?o$iBbccRsLv2G#|sNM6{d^E*ImM<6q6lRrr zor`&5!1g5?I#v?7BB$Mz80`n}nn~)8<;n&T%E`v%gzB1_rAvsRUac-(dec;vi;+-T zVgW_gU~S{`Ce_VS68aWrBKBJc z38MI=KE+2H8Ks z45;XQvUF&)Ubu3YR&m-iSbTNff|&YLJd!gZ_E=3E7l?FBx#)Smb@nQ*z7}+!@#msn zTsVl~L$1sAcoAE3#}|fEBhnc=tvj_G{v(`+-q2Bw7dEHN)-ux>QHEpyv+g>F$BBIq zTpr_IFFdl*sShRc;oB$CX8D@Kj55BO`r{`Z!d$_p-Qh_i8|R#xM~Sz?8U#kG;7gRo zxS#%22od&SUcSdJiJ=ky3byd~TK@q4=d#w>{m*Oa$CdE_tw^_Nqr;f&_iL&F^t=I7 z-x#gXpWP;^iATN^Afnc|?fCU}Nki+Zr;3!vAsO7qQ!BBZaMjm+Xy{QSPbZVBUdtS! ziINoaeb>GeWcESHQ%dMIN&}VsSrihMtjW7KVabA<5U)&4O>Mn2a8B}CbWT2hSA5|^Day$RtZqhOPM8- z9!2A{l1iZg>@T(_5__!cZ*w)WlZ5Mck`3(Q#>{jC?(`KB0LAZ~cxoN4iDjdl{oSy9hELbAvwW?Xe(Zh8`8G}M&n%FFm z!nG!aL>_ty;h~yZgP%+`aI|_%)B~0OFunE29{%YSo#yer7#B5NJO^!b?q*oTiBY4?pL5)WE#PK zqUZ%po@rN(BhK(zFdam~#-iHow?DF6qlH93oq$k+V&Y0&k#j~tKsTLytb z8iJt#Fd@+HX*>o|D6A*r8aZxbwm2h|=~eGcX5GF8wgri)Vn`U;^wxfHiv2))ofsMk zGg125PD`pErRLe-H9kGQ$%|_;VHY)r8)SZ`C~^!hc;iT!T9Hr!%TW8$n1~b7raVZ} z5~__Iap2<)Ba{rsV3w|dr#dr5@DN=yLQEt8d)$>I4Gv21+&WwreHb=#xj-QTD~G&s zod`=-jPrdO0YvE`?{gM*NhI1G%E#^3Hl&4ds~QGG=EQA+wHN>Bs9c$w$^QD*#|Fr@ z@TsO*5-toH(YpZf3(+;-0VLEGt}G{Y#BohDlfiHs7;~9AoXum{=WF=qT^_Kn>ID!$ zjJk;zDd<-y?xZ{6rPLfBZT~E<_#<_qrwRXZDV;TpV;;6*01e7=W^U`1V=x*NJ?@@* z1m^iuo0^53CR+L(r#2`blLrze^eZSwqJOJrmd>W#zEK_}6afp!@tnf%||{!F_R}!AP{OnIo-RNPcd*A%N8YWa8_P1VUkLW zNxpjqLNSqo-Tu!Ild8Te4_xsTVFs-J7@spkH$_*StqX}G(1Cx0siQJ`r>;!heQAN9W zeF4F0U+|w8ag5WR`^U58U{%9F>FwKM@m(LmZwgFv~rqlIs(1pXVw7T$e-WT|~q4q#Jb>UHfsiOmE*E zN1k#1C&cB=U!Tvu#M92|hdV}q1J(@vpjD-5c7k+aYGFr#_;RC8bn!mI#PeE9F@8!b zfms*@OK%n%&y-e7;|G8s4V5_qP)=0=-FM|%c-2gTc?W6izFI%Mc8DI)HGILz8DanZF8UMLm7-rxW!0G>z>DZl^z<<+c zY#%bFc18IE(AgvBe$I)g58Z5@h8sA6^+te_q+=^0Skj)R1j?pidk9*^=Ty2d{Pr7F z<3(=({f^Zu)CE}tP(>4H9f@TJ@eZXacA~pL$*u!^c?ja-k)*7Lns-=WgtOkowsp1EPR~s>5*Pe8a!Jxky>p>WIVjBI?RmN~-yINvQjeZ&dypRmT2I z>WJPN;1k=MlY;9s+`9El&TS@66n^Ax`JHPVLhYco{bQ9RTC z&l`xEY&Tv~f z{++|l`eu2b{7yL#pYpedgGf7c(mf`{wlEJ4;1cYv53VAr5r}=4nSX8xjlpt=3?=1 zNv7EQQv~-v$%=;@Rk{_d4vZ2SJW(Xm=tY@mWKyALu7*65KxxSLL1sfGC*gZPhO|{P zFppE>5Vw6+1@p>AjYb`a(-P&HRe0uPRPsfbX>Tl!X_`LN~heYmK559Ul6& z9=kdd_LhqLJ*<8Ox3+xWJ)8!fO#V-;wVr2vuGLUM1_PFMkIc3ZdQjJWfQ4G4wq{Kv zBnSF0W)zw~zOmi8s(+Jqx6WjAz&EX(Gd_GlsX%ixDEvT4xi&+qvsNK@+Rr)5M&`Zk zYd?}*3ad^@nFa5)guy!!I)>N}od00ns8_L}{ANG;qd~9wk8zMY!`}~-5b#KXTi4@; zDx_MM8l{k92?_cTVNsqWqv_+gGw!&%TebYs$&W!Vm6TBCtkA^^XmqOCgbk|=H0ZFKpj=GkKJ>26~^kHKW z+>)hQlIqjT;pPhg3`UAshkN1{7?$ova7wdI>^Jn|*9P|DWBAIRO@O}WgX2R<(Aa3` zK?;Ti6Miu&*Qy6bkVz4qih7zI;yN*0X_{3atPVeV8C&s2@KwSt_J-h_dSOQ9EnqQZ z$&T{M+p*D&rv`uvtG|bJESBWy*+KfFNH&U=iN&^ zo&=c__bsunRQ(_xR8x|x^Qv-M3L=}U$E8tYP#^6y??!JLr=tDzYm681SX4Fv*kZ(%1N+r0!U+{)Y|j! zfkxW^_kBn?>sSiUm`dnWn*(O{tO}uu_MP7@JUGVN1_`^bt47yNG4s`YMRC{y?){%{ zBWl0A$3<%8i>&|H$bzE^kY=}f{fP^S&`g=vXV!`2QsNZrxB(XQt;o_ILrT>cnX~ZX z1HBbtIMn^ZkC@hD;V0WpMM6OND;sgI+L<^2t8kqxW=$`M6K@Cx`P=9#i(8KJa949-P^8PzJ4DruRnBU|q!UwJ&dCMWh7VjMk6|pgu9`ae@oRFccbk23V z=0|$=K)7ffP8e-4jd_ed^CF*S$`4^PeLC^q@+qHi%sC&T&OCn&AN~mnEXoEn0)8n! z-XwBxTFEyU49nQMO8z4>;u9eUbs&q+F-V7pl=vrsW2rW7Fv7302vV$OV3#?@W&pa` z#Io9fna*z!ZfW=f`)q(<`&>66|4nE(z_CD~&+jg_@GovYYUcb1?|M#bU+@+5k_|+- zsxq(aLeG&sjcMUui9$-qyifCkntVBMaNeM>UEzVwMq-v|)w=>oCZA)2z z1ef~9Fsu?_)Zb$`>i5eP-}oqmQZll20<5%^ z1Oh8dfx(}-??~;}SLWTC4>ZIDYLgF~|E{Pj>IOp+*daMid2~{1J(wjt1i)lhUuGEH z)f0sRnEkxg&ib;_6WK5lD^~*+iw~?pCL*ua6>Gvw!{1j#;FLUD+*2}ED+Pg&w$Lk< zgi6}daJiwFN&Jyw)bDAk{;;P+ARZuVrof|yrrzNPZ5ta~V$`OnYo%ni!KZF<9KNS) z5L;%pau5ili_7|i>+Fg}2B2KfOS`fS>bU-cuPd6a8nJ|7bvT+#9(*VhJG1F+XMUp^ zf|z>9+41pG27(Dj#hij=bdiDeFLKmz1E;T*+RcO)^Ts6ok=v1r;3%w9gekFDb@7n@g?DzwoN+G`v-2P7noKbTBDN>7W0yB zogd7k5lgTa9)a&L1He22mtYvxh>&E89u!GQrLPIe3$vVA$4l(wGRBm4zsvh@X#ODl z*CF+%M&&t^dL7{0x$0a8eYk&S#qt>8IF@?a=aG+d@SZ^Boc1%8NWiBW#_+@X)M<=_ z*hg2O{18@go37u``$JvKkr^X_*X_-eyaA4~?n{DG@q_Qa7(mRY$nUk92C8gxrs+D) zx#6>wr41QmFSlrR++AHW96H(X#p7i?>zoCAvse-O>3>8a0&V z%#+|O!3p?ROQ$+TG*wqT;lb8}6K6dXv%?Vm)k8od>l?5hkla!zXDE42^Eu=3!9|AK z3X3nvdOb}wOOsHc`abthvaB_fBsqE@O*{HP!*4EI+4=IWSFh8U>iUpiD(*hG88;0o;~Jkg$Igqv zx`Br_V?<(6RCtTf3`ncaQ5829Xk>HQ+5lE54a!#|Ji#-LKU-i)YN|aytx0e5=_DM}6UAOa zJoZ7Nochxbv4yC3F*7A@_^UJAOscnZ76Ae03iM7hYPj5D;SDmo-?IqyQls?>ae zUw?A!^(Q@Y2P`QkL-CdCmKrLLA&B(uTNZwbG$wrRKQE@OM5Iei{5tGvh;plbY#|+E zg4s0Hd%<4Hs#Y_3!y>zu(CHoNt0x9xMYOi--qpEge+lGlo{ROaZ=n*lK>Kbi90P`1 z9Ia2Or8gfv`P19}ncJ2)0#PSEQZNp`NN zF+zn9aAnD88mEVhUWJ$fI4%p2H*EYuNJNEKEm8levyZ~xZ?0zA7x<|}j9SprC2zay zHKk&W2v$;N4=27k684+{FB#i&hyYjA#@SDaYCCIQf62-e#tvUx1PEs~3I&^9ub7+4 z;$~B-y|+(sHFmJp&$fyrS1xSjz4brP*5=i)5>4Q9Fs%ZKo!uJD2YvgGL~LUd&=rt= zP+@GqCNyI43fh^Q*UdkjFsZ)3i!4489|{+eW<~uTvYJ^7I3xk^m%}Liz{+ zqIH}anLi+rSzR_7gD-N}M61?fxBOU-VtpUWX-_t-OF4V$ImSyrUYu0T2uvZJdEjH!3aB06b+Dg0O>i}N*`A5k&c=N70MpM8r08~Tqpne6evk+l zE+kx}1i41$nf~#^?h7`fJ3!ZcZLLmCsB02BEPi+MV?!LAG}jLQ=GwUG+O+$ShTA)Y zX*;(Y3o@b{b!`GaDd<03&nW)Fa%be#J4>1p6lx+_l}F@EYbnHlul<}UqF~$0u4@W~ zI~Og!pGSx8VM=2#-5bz1z@)FKT;qj{cn4-1m80*|{kQFYMyt_j6TpJ9eTRtwG__!<2cP2$gVhW*45rTEK)9wE>?vDt*lZy`t z+@G$4%=& zX?flv)X5Rz(4f3|eEaUZyTL45`;&H4K!){KnwlhVmK>!PibH9nbh3F7sJQG&Mbg>I`TWLXG08nXi>0ffs!|`D4<<>9vIWMxZrEDN-GiJ zKN;<-%FyHj^>H*}Mb$NLf8GMVaF1(s=POsfRcw!&QCxLg=+}bdk&Ix!Je#Vb{R|@H z^bq?=Bfb}Wi1cu%8ahhb-NDc_`8o>O9Fotid%C14!KNObPl_5dszoG)HOOrgC>ZUb zSM9TpJhm*+3RtWXhAm;B@yG5F@9kjpshz-cp*&~uX*yQ$!NtAyFxa8;-5O6$^z{w= z@a>*hl-2kgz~3FXIDY&xK5fM}e^T2J#N51C7zb6JK&BwPNCa7z@AZV{vsICCdS}L| zmDuODGvjZtNGSEf_1cP^warg#v8nyKo$cM0Hb`tDp_z*l@a!aL8FJW}I9b?r=jXq_ zI3oVPMo3j;UD(r%vdHh!?2KVWwePy@a{Zt-{4NheRSZzZ1vKm^4OzOTr^iii>Gis8 zS*7Drr^WzRiko=vIl-~+lyL9-_xtDfnYjuIR$aUc#G43(W{tC?6r7ZT=O4Q1M(aFyu$^;99yLfKDlbOFBYSc3TSl2YOdy)B&Z3U3&WHN>hjH z&_uLOhXuiM7>nQHJ{gm@vd1-2RpTrbPeek7MeS6BAw3!}51W5Si-W|yN<5%ef7#~( z#?(?|tM|m=EhB;GX-*llC7HqZA^K!=RMFKK*9&Opu*KIw;u;{cgLN}kLE;Ko zROA1dp{)R*C2E8Uq;|YFt-{gdINOLlVgmE(pfz#GUSvjXnt8kgOCO-p!SMgeYU=K$ zqzi%4n16Frn)Kexop{0)6VgI`Zp5VgvmQMhrpiVqHbm)x1P_tf>($sV8eAnNdHQhg zR4W6V_+#ejbpn~jOhoH+)tx2&^I%!g2ZI�e=yI979*1De`Zw%3NnJZ_mN9yOTP| zaB*l6CzskFCDpjGF*T#~$W~4++bB7UAyO73k!JvG0&+!HZdBtg5k`w9doY2vtcxOS zG-9pd0v#`$VZgyjIQ$jYWaB z+?|$1W&R~jw@T0r32Fo_0D@CW3nr)!;h+H6x~$<=BzqbNU*J-{fINfOUJMZ|r`!5X z>vm1p6@^b??kvqz%{3e#!`A3<$gWV^$2n(XJT()n_haG-#w z)Y->+&sh=qYT3xwpmQvPHWdVl#!SpEGJT?+;3Fhb4Y;4+xlp8h>0ym-4QwS=MP)d1 z2wNf3zt3UeUU*Byd}!g}_Xo6ua-__K(br!1MF%m&wS10J#n_6$zyup#6X8Jc@+l;_ ztR{iwmZLDU9BFU$S)7eQ0og;z>f-e`n6HWAK=x8u1KXOQl2c)qY0q;*Mxvf63j?s%px(DIZXN`bW+Y#h&d*J)U0hr^&XAEBHGV9u8SF)R(+8f8*{<%4m&XGRv<`yN z52s#*?1qMM*xf5y>3hRnUHwnC+nuf0T)r#?v^c*BPQ3U()^vYZUIJ;sM&aa4yjH!eno5qx>Z#Chyexk9 z3fzq>U-=;S#D6zV@3)tBqf<7=$dw=yD*QWwvR_SSL>!5-&FJ2jm=yzrT*+yblM;kf zWl%rLFT7&38pq9kWo2Hl#0)_rG-;%W*$}}$e~lBmhcq;0()JQuTyR9zN4ixpj5n#c zmr=K~`a23}Y`E3!E1r99azqYD0a_JdF1CzXK9E|`9+c&`M7`(F35lNKk@W*&^icmg z`gI#@jlP6Y?=Nt9arOWVc4I}%Bc8&*t232sKBIPxv<&__qc4{$8?SPNZ%VZJlS}@O zUi$4?`PQxeYg$nWHVV?|850yW%gs82+O$ z!nQ?*WAZGOPe1qaoFoUyenOS3O6J_Emo2-Rsz8PQgaoh^)RCH4@B7M71^ez_o>Zd* znby6;Onqbc%P8pc14GUMj<=WeYUpp^~PX1*)^RHVgELUn` zOjAUkfClgp%j^?gOWqZk%>5P$M?@L2aE253g!{LgPW+XIvo~WRRJ6|33;lQ{{iJJi z#x<>SdDwCxt=fsXFV=rMp_->X%!hZWsw2;B;=YSNGC4}U)iX$bfU=8VB%yp8-vZXA zB`qBaXBwi}L=~^h{CUWQH$p4lhJ%;Bksra|RTyBo8bUc!T8gP~+eAi;;xIc?$KPM3 zsX{W7c&AHAatMj&LaB&AoOVph$EbhMgO3@eNUuA-(>vSbsGSKo!L zeHvi9Y&V*Bs-iMI*xe&Qj{BB$HyRIr zS2mXzeI|JAiO1PLOl;tt<*4~Dm$VhU)^~BgEsn^qicm3>C(+l@jXltxXq?T)E90N z^szH=dHkJ8I9ufVd(jY%k6Lkt2z`I@y$NLqHA1(PBqPa2gqR1w2M6yWWt{z$A^Qzq z`$kY(4>?fYVlGq%X=T%d;`#C0P<*B_&ExG?^4dfiWa{iZIP}lfk?YON4*OPsqIH%b z-yY7=N}p?{nAg+ed-`%5wc63&38BH>HJp!M5fV_&kkXPLD3Qv)%;&T>hZrCAa0(q#=+GQCkO(o z=0Asz8TD_#ZC{iWNa>{7_=-H95*lv}*dvT1b6b$k1X{R>TBZHLRyQJ4Ly(21s%+*k zAh?6yilDf&>)195sz%4wcK;t$=MC*tRvXZQIsFXZAkl z;oj$d?y9cpwbrMj^Xs^_XYT#6=Af0$fwFQk?3p(ulV80pKb<=TI8r%$dE)r);o-uV z4=aQkJTdq$*y-|$!7n$5>pH03+2EkuF_G@kn#!ZVuTuVd=&L{1=e+R%jw?9 zI;1qeBC_Rb;(r@ayV>FJM5b%jMvOCAxX z&?K1XQ?(zmC;-ega=aO``2&uw2qI-nctREAx%Fo-D9MQBY5Z^9tp@ugSlnwkp|a7H z)wz%ZFd-JrYH)Oa7cRnzIyAfS@e%)*BMLC##%V@M9H0!Rypxp4@)W?G=cMt7k<3k?>7P6xUn6feN)Zqr zi7-ZPE5Fw+-SsP28xvH>u$Ta-V9QpH_=&m&lH>ZLx)sMbb``@y*oOHOLPj$iScsL$ z(&#)Q8CK9Ho$}p--Q45F>r@gos%-d(uTPVnppW6UNHOpNi($n56v(8dZCXa~BFUj8 z13b&qh#^{+vin1c&|T3#;|ol4pz=-8$TFj`?^B7@Qj9OJWVv8eOfms>ni*s+Zf~F= zcAwwJ@tc?EZK%nZ%89IjnXV-UnB}@#CR&OV;`r-MOdvh$w^fZ@BMS9yrvXrT-qT7C6kc;_j|gIlvl~B`#8ZzuyA<6ux}QVbu5?;WrpI zEho*;5Fx_{Tf8L@17p1O_~)-t&87pFp^){TI`@nFRIqWXFk5qU5PbVe9~U>)npz)= zGF3Q9`PyVNJ^QWU4$MLMzxLp~+7_tg` zVV~RZHTRKMY=#^qWepEL2Joyk$II1T>nu?6vD39X-tleKUU+Mw>zqkPI^Xt;G2&C` zK;lVipz2u6mxx5mHC-dj`F&K1#|c@LGBd20rmCeV&3ORG##n+RWK?Ac7AhUJe1fe) zz+pe}%HYgu>mX%j=2L;bUBBMjpa5?#)^#QD^n3H{ zA}z*)Bm97XJ%`ezIgG5zgA@R<@GpYo&?cR4kT=UrliXWWz42f&CXDlmkER)vATuaW z?p=(6TxWW27BC9*P;8}UZvRdVVtRYlKLoaxoy#T%%0KsKAPZ{+f6I>ArnBu#dt|ox z$he#Fhj*^-3}_-PGCLF#kfn4-*Z0d!P%+(Tu?@9!w%iDU^wq)oa4%mpp8w+~;p`0H z>pg^=A$7k<^zZOVh3X5HRL&#5Lpv#W6=5niFG7DWOZ+M$j<#p$Jt$ zn0?N1=3~Ie31O=>FN{Rrs1$d{i<_w$LwRhEO=uLjQyJ7|1H!3zMPbvF^d5A#|0|QK9*H#}P zTcBf)CDP0UACvV88ih9Z{4pfo$=WuvKQF{xUQZ% zd-SU?Lv(ClpM&r0a_6ce2Hck6gYNp=L_8<I`g_`TYUb0Bv<%M zF!60}9}H7?{`654%WtuC&jRQxIfMk(sfRvt8%y0)>*@xukVS)ggWaB#D!Gz5&pLOL z0mFfQfZf0W-G{!zv76BkjdrtFfi^79pU}pxwgL@z+-{K-2W9GAJLTlIY@{WT-)Z;W zJN^jC8_Hh4HY1D%UCtTYPD0e(?aF=NuY+ee7OW`9GG>$hTJDecDe%N|0NtLcKu&SNjMu6vQt$pUL(yFh!$QH>mG&bd!np;7dn9#d{GJ?YUEqJ!JQLKDze z^cphQ6;Wuu^^iLjoh52{<_Aahv+x_?r2{S-LQJpX@Vrwgo6`)@%>_K=bsh^e`C?Pi(yp=AjqL}Lor~`@b6u}R;bkVoTW>g=d+p& zbj~^qVTE9QRw1vL?<-Kd3I*h6Qv01)>FKD%_Gwxl+-0De*1EHo{$Fy&oNuP+xm(-8T zSJm^zun3Q?wCP>cY1#ox_c#1WQgsJkijFX@>2=FEZ`;uM!@3WZ&j@HRyB%rNe$mXc ziZ&%D#hZk&sDCy55s{tzGr62PSfm+vj6Q!ah9?VS=%MM^vZJ{mEFND}YQ@k|;44u? zPfSGvTQ?!CS$n#JSEHx`jAx|uED5ku4bi_>&=YnH#)Orj>-kZE*n(japQo*4RFk}f zVsyVB`=HVKJ<;I!FNgw{0oj;w-7$~3N%VMnC#|RFpGbc9@^qtaRq;K zg2_u00em*EV^Ln$k1gdv7QgZ;-e={`F8tPY-u;E>;8*S-H#YdK z7d--~$Q_VA@0zjbxzIfIKWtq$c(mg2lSYrMT-+#y=ZL%!UIaMmqAe8q2j$Pgc&{q* zH-s8(4^2!LrP_<8q_C!zjcKi=O%tJ@A{!BEUhRFrl1E)g39`hddUeIROv1%DfOz<% z-+E)+Y5r}HCZ?G^hIz&qRpkWDA#q66Ehz5+-?xm%XV%X;%a*@x)bESG6iB&*BI|_E zp;o%=dY zmhj@ig@4`n19E?uJL#8HUBB%q#dtIj>DXx@*xBO+Q32L8>=r?9G}CRm?bi z^EIDsnv}Lq*sjxN`a59ng`}!!A62xu+41)5?)DLuw8vV)SB~k*;%Z!-0+=5kc41z} zZtCchm!h@j(7zR#SC|0YO`pAg-a5iIk~>I4^H@K+n4?zk)N=zl-cu;&)G_HepTC9V zCECSR@=0u9LQ5{5wN42C&){MAkEn(8?cZlSLpjeI2_~`^kZxE69Kd_{GVoFKvcL1~ zRCMgNyR!at6B_h8gzjlp+2`dVpk4!EunOh)GeYXXhDL1{GfabgVIAU*cG-p+<2oI; z7+4VS;WlUs5bZdLDn?J-W2>^A7RHA{pfS&q#>#mFG7-`~pyi1Mr|-wa+qJ?%0vvrC zh<7xW_gmHr8@>JzqPG+s_(7B3HPa^*hi)*Ud!D)Vp;eAMNBRttbZw>vWXaepE8W@P zXMBr#?UgGKa8+s}*6?GlqEAc5*6LTr%U%JLq>^SO&XVIrlCf-Izle5SVS0YAE$p~p zChIWwajxGtPvD%wuntXt!u0gu9#HtN z(d`G%BSl{MpqYW|9i}M~jmc>VXQ2p!{DYJY#bcEvh>MxbCMtrPMnDCkhurK5P+O06-1aMYWbO?D5RzZg>#GbDeBh4Yy68sEa*4y zm&f2HE{92ie^yLnasmRyLx=E$qag&~RL;Rv)$5Hj+uuGnL6j=PIo^BjYqmHlEE+v_ z&Ie$*wQ>J`9L6R3Q{*plM1_R?%A<|*7ab>dd{9<~EL==#3F3a4M=duuH^i92NciyA zGF-zDQ!oYUV!D0RW9jo%3evk|9j;80gyZ|=IBmdVHf$V+$3HdfI9lGX*f4rPV$Y6z zJWiy*l~OJ=_rxxsy@O-pS|wlM1iHD^&gwPk;udX?3aqtYOqT=_jyr5|Z7P!axF8O1 z|MUEg!we6uL@I|@_rAWP8T!=eGodH2er6yS_!F3{0k~bL#*0|2V|OZurNAo$&gDa= z1CfFjq*8NCsQ^fHNMwFW3vdI-z=Oi1ERQ*!YE_H;BO@fC7zmmBGI(bt$P620gEc_4 znsMmll4r^{a;KHSpNn{t?KP>&$41h|COPwfeX7T!#^ynn&!rydcYsn72ya9ON0m?X zKFaZcPcQ+@(tuI)H7G$4bcy zucf$yguNR;xY^d8O^t`H+-6LI?|Lr^2+Vs$4nM`w+9roF096BudTEtkCw0xp*4pL+ z-m{=@CZHBWdwr2@VT#N1vL@`+?{g%?N$3R4M3vTPr<7$C7gK1JJYhSQ3ZLstPC0v2 zv2>fnX?6Z}(T)?tE}0iuaaspl+H6{$?$p*qf*nF9qS>vrh2q+c-xw2fy zj$|^5T$@Y7ZHUNt&|!zJlk=myV>Sn?Zi+aVjla1+U>DBiB0eg@QeR9^v~&CJD(?Vv zajH=d?M$zXKDqusove6V7UXrnvES9Xu~z^O3=wUIU~dG2f(%yeloo5;XZuStc1upk zT+>}2TF-NY(!gdF`ck@@>yF;-=%$)!$-y8|^7t!>@LE8LE)TWTc2havNtK7pyXv2Q zC}$p}TOKFm&FCy+Mn?SJN89RN85OW0*b^@K>r9lgd%4~FcQ1diZpP)Ga))2RN>ciI z`8dyZy!YnkEb?N*HuryAWd2&LQ59e6cQ6^Qke?sJh~obw%j9=JM=nU3S07nGdm~$D1Q7#7*|kz* zSPQQ*IkQVp56w!h{o&Nfc?Jmt0#$q_ zIW>+~dvCC>8p}Q^>KRwgr)3;>W!S?A)6G}Es)S!VSMi^6u+XyK*VARr5Yw)gxa8~* z^b66Zt1Yp#?tKW&V_%-}^o9Xs$QE%Y7uy-h*S{VpK(G&5Jpg8sc- z&^RQix`AC^>Bm&$1~od`uU z*juH9y_Tj+c~`C*O;)x)gU>T_Sg(l=!ddS3Fdf(NLACg=Skop=hd#5W#_G6iCgqq= zLP72y#SXGNH2jxUq;&ujgrS2@q=#GOqI!m7?LEO&Chy$;y>$Q2FPPeVE%<>=GMh9i zu-94?h@Q3~bt`9ZK^#2;N=1^YoyR6%w}8w>fp#MC1bkTER_K8HbLOkC49P0BVk=0g zUQ9EJ47h@Bi%5eyNDYQ$N>PG}o(h^ml;I=@5d24%)@nwQ!i0s-qpu&;d`i3?Zcq|P z-a!@V``y;az^5F?iVSgZGBt_7e@G&WsQSp-I~yE{c@hG)=E0VzFnaIlZh8L5m4`bE zfg+j6cSp=j4$0cQA=AWjMZDCNAmnhr8M7U8TXiP#2wHa(syH?VF1Mcjg19?fa~xK=AjNs*raqmL1P|! zthv*OlLXoQ>x&0X=8)Mxd$^RqgCwq{b2_F<*V~u$K$O%$KnDf-6r=e@^yu z!$>j*?iaxH?T#-mi1#09GwIXT&;(Bk&@5Zx5d7ErNFho%hOUsc1d}O(eTFfJvZlC! zL<5&hWCuf4jFO|SnJN!)0Vo5*7AZQy_VV`;780Kl4=(L@D|t(Mqp+u*9;_?9>Z&@5 z01x4?%HQch8*uRMYf(tIplSG^$9_V%ZWWr~NkoV6yOkg&G-%6foPX-xKWQQ^fN@tb zfy~f9FO*TLqs&Sp{e0=6-gr6+RQRhda-s^-)&XH+adBZ* z#alBdl#|MuFh5a9@O*vQi26X0h0c1%3BDEr& zG%aZ77CaDBXz|lXwJ9Z?sH|NBfHQ<;EbOVw-=8U+0n~W6{p1)%#AY`2S80|~{4;z( z7a9zxWNI~P2t4UGSm)6L*@%R+x-;mxIxCr(Tu0c3B}{^?6Yd6D=;V!SA!Ateiqr`xe0}~ zwJt~LgSXe!7ujgDO0~Axu8Bz#n+a7#wo9>lcU|RTTWyiE(ec$JGYwkCa)=gd6LYUU zD)Hkf{R}xE{xJBs=Y$e&s8k$Z(Je41`}*JoSOrgy%hX|;L?p+keVmt2zA&fE?q!#7T? zy%cRR($cgi!U$?kNbhg;C5g|$sb4Q!tB*lxK$6ptF3@Rpu^vgZ>L#MyRvezT zDRd3uyitMoH5t%j1p2JIt9}PGd6{T!uG)Hk$fpqIE5sWpP$n0taH@H9_0*JID^-5l zc_NxK=$fO^0`Ob7EzgbI{lVWT#5k*3$&+6ce3L{w{^b20HkM4!9>FMb%Q>Sqn%r7= zrD3$%EltQ`oXptefFUquEyGC z|0|^9bkJ0`eMkU@+gB*|NfW>1D$isxO{#YoS?@8X1fWG~3we8b(xp8&ohXqynhX3_ z9cg*AADcTcg3n-`M;%jXd`lqa2mU{z&n>P0jTm?}a54!r7A<2_w=UB{1@>7R3UK$) z4CIFko|LkPZV{^_TjpAO>ANN43|zc8Q)FE*6e8kHyICFWI`m6nw@WTR5Sf}?c$enM zodL`wb&U1$0&Z|BnjUXpyvTFAkYWQwyfdq>0_I|nJ5kn6L6}@+Pf0$c+n(q6NgIoV zu@klEHt>q@`Ow$@@c~*;)>B-J=EC(4-aOZWuY4dvjiAIzn;&4q2@wkH@j>=V#?p8s zHpw#$@Tg=cgcO46f92JF(lQhQWu(9yVgr~wkXeRt4zIzE#x`~X_Ykyb^=l7lk7FuQ z?3Fz6w3oOUXqIRV;+V(X`NV!UYvp(_j_7kA%@pihtA;3rW$?ILdJACVNw`I$?>37f(D@|i$ z)i2@rwP)^$dQ3l#{6ZJ+i4)gd+A#YgvPZC;G^p*H5>s$a7qmBJ?{<;)q>V*ED4eg< ze>7$YXBAF#?~sy)xy}LfQxPscN&?@Vd1ZggSU`(UuH|8YvdvWxRkhXM{vCkqCS;?S z6{R_}jhdQFU4JGHqr4M*4a1yuN^kM*?=&|tA+QrmP%Vyhz& zK38M?pUNPK&YSb1@pOOj8f@Tpkyv$aUAX+;j^bET!zm`=Ka-iX@bw$PFZcO?*xXE= z+25!z;U-N!CHBIed5a{Akkf#xb+1g3SKDoMee%>tH|24ggQ0`gZl>3XvtltV#Twf- zLFkg<<516kFOU<`jgSc&KNs6ldwZ?rpS&fIVl@_OHKpIZNIR`mW6Ukv#%4baxJ9F} zW$+bE&~*Tx4~SL{L2wCn)lyfx#$9CRBxN^~aag9Bo@=Z0)xGYiRXxD^>x_!e=Cb&3 zuLzBu4_NWBpKNNi(|7A%x)-7YTnc%{7<*8OakA{l%u9K%&?7 zAT2iX{`>aihGD8_{eGLwvs-*C_QRF{Ea*Ug$^Ao;Gnml$qa2Vg*OmO&7Zfh)t|FOoqwnC|p$tpW-wfy)=4$maT zrqE3Tpria6jMhDT^Lzxgl@hy|qhOMpt*6YIBa=`?cAmDI{ta+c?@z{N1|Pkm)O9gZ zMMoFvQ31Vq8Wb|^4WYNR=!r4fICaf*YEzAqXUnGU9Q=gvrA=>qGQBU*=yWm?$yS(t zyI}1Eiy}(KE_XQP7~54qn{K&tj!5LPw_3yi&laq(A*y#)A(^z5I!3SclTUD%DPG`@ ztrM&#-K_x^;3VQmiQRy)POw3j5UVyZjJ9h(&I`2l!`8&uv+lh(JQ933M-1}5w)8Bp zV%>M#kgb#FY!jRXHaMuKj~cR#w7``F9J^;p%0ADM&XSLIozAYOH|IzGMP=}R&8G@r zMRXn;i6T!A7Ian!UuGPcIfv^8@(P}f7XqD87Pm-e=(v<|qPn;g#PO(_c0lE~^^WY; zh-;mhZ>;0M6N@RqU?N7FIl6k{7uTs~<%amNlx(r;g^pS-}- zP?^_Hk3tfL@us3GR89=5DHA)RSUw7%uFAVDimxqD1>@>6<8jUk-N#zDbbh)aJ)@3e zEIV4gAd`_3=7yBVSSsaaz9LP4L1l%S2U=&E8IRiTC(Qu|Ay%fYu%ph{MxljwCiKI|(!TX`m)xFw_u5wH6ngs#EA{>8EumJ! zKF|iW!^;9GJQ@yH-dJ5-P*!l?ciF#%t-b-v*;n|YrA_)&NZGLo$H`)cZ-f{wrq;AZ zDVA_ zSmwFqRCPdxSG+rMMh@)tFF!KNIT*9OKVrYeGbC0{f;un}*28SC^56mbG~vIA%7O0_ zy^L0ap!A9e)FXA;u20^}pzVmfH;5|;FCmm3QrjdsqP)7K($$bt*oFXw-WtJFdMT;0w&R(EACvw%a_~oJ3eGApz1enMJ`XYxp zX4nyvV;3lnS~pEygOXKk;i}seN)KNMU`~XQpbeX`oF8AF7O-p@0)wkRjWs6Px6-p; z8gm6%&kl_F@iP=n1D-s1OZ0&)hqB?7s)Al_ieq3+E}(>+2?&7D7p0Fa)LK@=wGj6~ zih{Z6rBq@at)w9PNMKLt4DxD4oJA_q6h|+@iz(Fx{b8sAlFe-mEu_*;O6Od7l{U%R z?O#rEpk+4nMX`pO=k3sZ6F(KbbMz?CbwyE*EG;%H;VaG<2WV6Y=GZiS+n7Q6)b?ON^!8iLLy$m|v9$H!vYhp&t&sD+y^h`_PJL7=JvT-+67jHazM5$jk%l#~GA|KGCFkG0e_p8nGV z%neZbZ=}?0-JWK(W}r-CF;^>!j?hZ6P5{QXD6b(_SU{p>bhKC{iQ4Ac%dSU?E#tZv zx$y!K;$$>_qDS|~+xBP?-^a<-PT7MfcJuv!kcp(^*^5ZH4r-h5F2fxy`s_fS>LS7mKQW{c@U|5fCXocO>VWN zIRjG(CBvdDU3t>34r>vW*oLnvcg)tir>y2C++pk+=?rW?7gYflXg7=!iRaphf#-+a zAz3TI82c~K7Yc`YGSX-+523a&1dKpbiv)sHOW-1R+Um8mrss& zU>Nie{Cr#a!oSP(x-Nd(6B0eXuFukobPZ0*ky@*?13@@UKsDU(p7s5+@I}Pe3)`_W zHR)?8qBZZC7LjtZLN&%-BfE402u`eoO6OJzfW|~BrGws;KZcImZDJ2WKy*MHP;I;9 z9%T+blgjgmO~Wb?L$=V0>~wWi9#A@q9~C~5jEg_Kg}?>p`fi!DTxM}D??M^{xMB~O zkB)+^tz9$w+eH+xg4{hnxX>?+PnPgx7A5k!OJ0L!d;YNeTS*){aCE%@tSyeL@&+V_ zxKZ!LY9_RwQR@PAbUpq~2bJPTWr4is(jVfJ<2jDC`>&vZ?R*qQ8VB)q?in^8gRif= zM*DP)|H0Je))u~NJzKXgH=cGW>yJ>4;lh?xnGiOq%(@t~phuT}bYhjK7vU1H6IrXu zL$SmZ(b>>JGZbtR!gT!v7y=NKXEX^HiU7F`k{O;9w%y%cTk5wJ`nCvD>m~4LNZt7^G7uR> zrBG6#(!yBw*dIs{8MI>36|-!NtU;xP8Qda_&#O&4*Z6kLZ493Sve3*FbTq(q%&qe^ zvsHwlJ;qYeylSWl)xaf?@`KQc^cLbKXW1((JMmY92|%-_@Osj@4%&ex6>?eC1l}?b zRcPr4${@DSmkrAsd{!C-d14AH09aV)o{X9st_jN94`0@r7YRH0+Qi{Nw$PX;zEB1b=v+}ZJ$9jxB2U`BBz7b<+%N;XYjO3dz z6X{BuT%uT^q$A4Kv&0=3h?KwG3eoFYYlHoQ<@-)O?E}!pQ7%RjiW{-5OfSNMc7Cb% z%VWXLWEgHv|V=vZjOqME#eYA$?m%EENgVZY8y~i-2$B`tl%l~5o!bXNN4PD zKH=bNHvm`ftaNw4ZdO~8h&BWajeC4C<^0`$Vq#fpx8PYD6h57nF|ezCI_=3!^ZSyU zeqMe}>Ib-`(ecJV^&Z`LS^vOVfev^pguUb*?+uTri{S_cNFDl0?^diCSfx$@g26 z;-~5cwc_R|QSH^J=ESZPXZKbI%HL4!uoRAUa1oHBZqWCSM$+PqWJK~bX(A5aRwN0> z%<_rgmY%o|i3vL%>S{%uboFqa^di~Q*Q@nR*6Tnz<8SC83PV6NyV3qDKRD8jbFspZ z|KXGKe}8ec^#0>5@Y}5;;;LQ?5KANnP|HHu_Wx~uF7TlWGeXjgY>ZGW6jF|kZTU&B z83NVa+!R#+d>O{Q+|?ddW{dU zvSA9%Qfe3I6f9f}dQl1K9r+yvp1W_UX&<9l#KrVu87@UQU`^I5$Q6EE1Ms4bxzvn*#K~0`=wK{_5&C(N&|F87N1wcu>oz542danfzOHoblz# zx$h`YL+DhZ=~ZP^8k#U=)= zts8)UD;!7&5%?X2?$GZiAC1G-o-E|_23MmK+X2DEsNv8AFW$Y8;A zE*S&dy$;nl5M?=7&oc$|=|BPj0lNb$EpX56Nus0t1TjjGi;~ewLBQ&HwQ)xRc)}Mc zANcE3F+}6dJM~CO!13T6KOnV#ZqD`=_vCFS(WWY7vLI8cXJvQM{i9nQJh}xnS5+wN zdg;aBD*mAd9fUjUT&hLP^Ec^Q;P&p%qiwH35*I$g?5l}{2LoxY)<|?|aI~z>`y;LC zuRgA9i?{4eRc3DqMX2XtmJXQ7 z-$J=Q5xnm`L|xL4J;jvNYR=6wJ19M36iv-?9ZBAx_KZ6-%pc8oepxWo zeEk=yxuvxkg)52VH>(i=y;D}L_gYKR^$6`Xp#vp+KVg2at*nlM5c26`T}Vg#JFmff z(-`e07AFA_FVVf?#0UYV^H-=?`PWB=^|fm)!@s0 z?W(U&r^choVDGKy;IKH{Zgp3`M{w1g3r(&)*GX*`_sTKO5-Y#nhaRY+2{}+}@T<&tZKezdpR#B&*8tRyT>Q zfNX8_6U=l&oKb(K9wUV_XgruEf+ES}WHc9MIAbP6nVw)uWjj_jzO0=So)IbYf~NUYz1LM-~gl9TBOV$QhDrmu0aY>J7XSAL+lsaW+hwXr;{h zhRvC2Z8mq)uBxh{&V4+6r+ddHK`;Q4J_y11;!(w0psOXEI!XRA^iyh;B-r18QI%`A z@yltT;fRL=ML-r#d`ml%fGUw@wL4e%|tk0ShMHyppR$N60w{q)H9OWve0&25Vy%1cNQ>Gq6MCASDwlIsCgIPlg zGI%r?;IxgAbV6vPL6D9NW#!X+JA*`GsRq>Nu$&?6PB($1rE=&(F@&aQluC+5mF(3; z=h^|zWO9CzdA12~V8HSy>`JdGK1bv+D%FMQ6vceK#hqIBXpK|CLaABP3-vq^1OHN0 z4Qw^Xcf|r3))2T534=7NEt%!FtG4dk*_O+A&=r4Q3C_dH37U@#=YW{COWLm?KqG{r z68@HgdFBGt;#{~4*#FkrPJond4j{s=Y8x<)DWfGuQs9zN?!R+}W)peQWlxM31L`4$ zH3~4JQb=MeW0%RLng!1;pm~jpL;=M;kXr8Ff)J&YNe&+u5u$H{w9_Y*A1C=mhAB^< zCZxgJORte~5RnorW4;`&p~LC$4U(=k2`mbXpPoAjj0dTVJB>CPlb_x{39Jl2!%%gM zV^@!9(h|x+a^DZtqq2sBgo;_palv;ezzW4w$LlZAbve*1=dPqq)3q9Bwun-cE<(!N6-q{YAcD;v)=NIow@YoEcP?E7Ios%%8ZvV2= z9~GyYa}lt!;OUUPfHC`<3oBC%FL>?q0IT?yHeQT()X+c~$rXh&9q7YuLi;#6xhzfHsX$;l= z@9BLg^8ghTOw{~(pFt`>3nl9~h)EF|-rwRN&*BW^X|(WMtsV+%`yN}@jrgohrb>} zn}MdtjcLCRXBCrxt_8l%ducHHRMoB(I~`q3?*tjIF&BVJ$C*ZzMd0ecnI@6~DD>bi zeYXA2nnr4C?H0k>EhW~{6J~(_)!HLLa&#Ver!=dbb&AV}N?Uv!#vpSW(9sdB*}y~Q z;k*}u(OUgyy={+gJ->+C(@t%BlhBh0THobeIDD2g4GFQws`3pkuREsuKT+fJF2ydm zcbvv$hrM<#VW(n1RVOUo68`$2GE!mS1=hQtz9vGq6o3r#gCFZ9Cp7Rcz%Bx7e2}4m zK&=-II#-Ps&FA$$anDOmyt|R46W_w4O|&xzuktyeg<=6(T9`>8*8tmrs1!*mnH&@J zs0{;j+*@%1QUQ9A4D#E--rMo}?tfpyTUwL$|2k&(DQyV+fr{zt2e8fuH1{zE7_iBB z7*c4GI5srW(e9!R@)&?PhrAt!A?8j939QV=r?j-Ps>5f^cGRQGyU!EQwtih37H;(U zy`8-c=>0LlVnp!!`3N@(Sw)+ur~o3c67fAND+}u-9GT2a*^7|5p~zG4{EiXor`EhJ z#glUWPT@+GO1dx(E8ZYI_W$8ZB8CDViam2YioTp)Jb5@g`~sYYPi|gT4J>1!jXx`g z-=9isI51qeISUbtXb|S_Z6~Y~H zK&0R36>Pi+7c}_sY}(!yWRQNpsMQ)w@8RwC*);m>FAE+Fzb^~U=)xZvLC=kMM>1}T zHa&WJt&ZE9SJGrWRB9O7A$|bZUh_!$~&ZjdB96$S73CkzX zgbl@t$1yMQ$D5vvtnSiJPfyh zXrtNn@-RwjpG=isXA$5uyEotwAUNo(HB5|#bmroz*2gd#M(|Jg+_~rtLX-8s!NJLm z{)mJEyhd4BlT%zVF|$U@@LQ-ea_DKiz?HWmePBdL%A6#YYE17LBC_g6nmZw4srP5uL%Bhvr) z+GfCOk$1|Q7*^HZe&WJ{l{C4+NT(=?t{PED9j;DJxhho0XW(Mhap8 zm6%PsbUQ0|o@4#+9pTGQVcE+&wlZv4qDKl9NR^gypMV*=+Cn`bJHYI-&w@9^z4-U+<;AFWlbC%@o3 zX<=W0e7#^dv`@|bx{?_Kdn;g*ajPWrUN?K&RZO?jJ3i{cZoO4pVO0Wpn4g&i)6q5mHU<` zt1jrxi#KnL9qn-vvU-d|7pmlj98=7-<2p4pAE9)D4a4U)1W!7RM7&M7WRy-kAAYH0 zx^&+4i|TGm!50pJD%fo{0R}X8nqA?x>}5PXfcH3aJ8ivT)14_)<76w%Ux0#S%mBa2 zZ6yP3Kf>y-RSWUAa+D#xheA?2JJ{Ap&p}^WDXM2!W7$}xO*Ou& zulx~|!lVKo?Ey$L(4(SJ_ztysxrG;9owp+%Fc!OvZm9;tEYl_$?W?t@M>tk< zJ%AC}mLe7^aOeTS>Wm1+(z93?*I<%(cSG=4E3E`|*;H)+cUiI-=5EAQ|JzKQ( zF=}o;WmINt)_IAkx8_VTXb)y5N#}pzK^m4Q$6%?ZL}0efvturW6HwqHl|k+BnfJ_% zI8j*^G+YxIJ$w=zaHPNz=#!XK_Uc1TwG12(|dX#@6~2dSVz?rm>~o?^P8X4o@rnAuZl60lcjeF^Q> znY+X}i3SXN6WRisks5xu_mL?Wf_}s_*dT6#=)GGcxRet~dY~Mn9yB&=eg0(ceD`I7 zCy=avM|WRG_czL%*vFh$ekesJdlq0#@x3pEcPD5s3;rUYIStVpp}56%;Zav=a+6OO z%H&*#2GTG@j!iKF64@+*+ge%rKJYAsoDO!7da9Vo11hRcv<9pq{XkMfwIwdf3VKdd zaV6F5;wcXqtr~1Rt#p0OJe`_|x(@l+Xks=3t~#u6^Vr60jKx`wnQs|8p#Xr2OFiWv8Bh!u3!@8-5_Yc!8q{fuGa)H378 zRf@46D48FBq4iqO&}p!*5ySOfLQVGzLuPl+aTOl(T`UgX*WhV;e2y%?c2w2B122HE z@~a(08XvcExpq|#5ZSUOr`9XAy`Xys=isl7alQBEiQpE7JvIONCkjGX_TmE;bJ8qW zAMWD$kn#40rVn&X-oLJYFW;5!j9rFPsZ@iJa_Z+dd_GU4%BP&k}oK&OUnYAGgJLq1U`w`9}vn7PB`4Z6kC@yfqj zjZ{5EogMowd`O;&0o8?QobiM>^lD66pDLk9M>QTM)#k;dOj=<)=`bp)EOyWYTX9B^ zh$K*nF5$pJTWlIr(GsN%hCtPRLKEb`FkbYQfbxid3frG|HdTT=nFo!4_1*KHgSS<> zp^K;UVX!y~PtMQXr!N_h&YYO#e6nDcEe8i%7I>UF5nduqGjR+gxFPJu7?Sx5Vkc~5 z{D|fX10|_X^LbEXgb@tlvl3HCArj3Md)9j&vzyRy=P}Y*C@c^uvg7V!J?kJivKmW> zEla@*v8qB;7OD;AMB|v59*FDL#QZM}&j1-L@~PnW!RJ9~dVzbu^uWP%5|U^lhJN>- z7_!6$Ocy@k3SzL|S~Rjz)VCGi->RSQl$-CgzQX&y(6d1i-)nF7$jKLHTxP} zIkvNuo7nsk&noLY0o2D&{fdjD)o5MYP0Mw8G}cxl z;iU7ZVzsSx@0#JT7LcoDmd+=k5uf?iFCDrh4z-Y&HJ<|b`5{F#7>0?IQqoDQr_y!B zG=F`060D(*oTT#&W?=#iZ~4*^#`oR-B0~nNpvD&+3^2hSFp^rk%9R=-El_*>?(=zw zRfOT8Q_vEe6~DGC7`TmtUDhlakB`H{Nw3l^nqwg)j%7L3MJ7!$Bo$i#)dksN;-U=C z-z@^=l>`Pna>lIzmV(5_LL%dCg{Pt9C8!*AZV}dM1u@Dc(c%#_#Ysw5Lzs|HHk7#J zxOSN33m&F`mkH}bI_4DfHk%9wehr~YzOyOxX4V@j7j??0dZcm4PWIHV+Oc-g>zW)h zo4hBQQA=Mhh}2^|RAr%g^lgWo?^v}}2^<7*@1+Awx>ioKW7LjHuv&D~JZ zRj;z;e1Q%t6zmooYQ%4^x%ZKTiq=A{#oNXd7od6q?SJJAyG0^!#gWS>UCK~m$Geyl z;22}d%0toiUk0K$!3wWY)_)>g8*;9%-&{J}*m=BkdUEe#n(4S*x0)xN$4M|N;5g&< zbE^YvLad8Aa^CgT)?TZA1mMa&wq+FD1)Nd3_2!1So=roys`p&@Z?ONE2mgO|gQ1SRheSfvxv_r~c=E$PQ%>cSnC8VjY_%IgqS{9P-(Qu8_8>C6@PY^O_f%FCMDB&dqhnNHu0SIsrmWr0<>^0)D zG$!pZsV9Y!QR!FV2-Z@4^4FE35RXZc!-|Bo7Apq^hBcsewtvylnEGwLSCSGXkov!fg^~>en>&NI`Kl5VR$%XpUE%*wTuC&=)<~4JO)e ze^j3wIUCzk2hy8%22`R)=tk}>l_T>fF*fggCTOKOf?ObYPfYXgQ;KJg$tLOKml#<^ zq(gy@FjC2Hlf!SzDL8lfHxl-*^Qi#+n4M;j++dMlM=Xdem9nw8s+Ly79YMf!o3;M& zI>(AH?oUh{Ir^Z&2c34ZqXt>t{87$i@kk6CI+tM(`c{27`YE8_wDiWx$~hL#${Ogs zYtZGJ0aHqe{0f_-QKo5tEL2@d^eel?2Ah{4fH8r7bYj=RY+rBxLpQ&5cuZ?+l~W?O z9aZLe_C-3`{nt%mWGHK8E&A(an|^C~g$;&oCTb(O(Y}u(a(OojE_O^Ab28Zerl&6S zjKy|+@~;ub1;9G0#17)Ts-9PLE+6(Hrnoh`I=m!TyH|du)#&fVKZBX)C2og_i^Jn% zk^%3Y?W=_v220rwaF{s0u>=!xS@_?b(C(e|o9A|*-=J9f%{-FBzN7Dq)krrLWz9N8 zONn^`Zrl;{>L1pmd1gkNq3s=#JdaDX^ov(H3l`yl|4vLakg~QBnuphUN z{4&O2{5{Yq&J zwzRPIfo`nkQ7fO^{!_0~R)$vZ4mp|1X%k&0PASa}?3;@RH4p5b5PG_9l3+z@Fp?4HdBEVg@ zBiJG}=2lDeCVjztcXkTY5FRTc%BW?hB${o#B$XEJ?e5%K(63xPtc}rP@OXo}LtR;3 zD?7!wVODjTTWU*d4TzMy)X|2~Do6L~u|8jVXu5P^z>&{NR%XAyMcr8{NrD;`YX1?v z9iWzmf<0C;czJ{Xau1&cJWH_Uw;UXn`hXi;(OWH9KuHIR6~&SWa_r7V(2_2hq7r+j zGGNFd%|%1quzakm4GCIpl>b7*Fi&nrt(u6u&JCi!=M9QIBXSPhlx%I3)9`8Rq>%og zK;kr0A>Gawl%igNseM(N5@%`-_v$>V4Io{66Uv+_KB|G1+_cf%2*~SgCSm2PZ+PD9 z6R9y_Tn;hdPI6 z}(35DW8C8FY=#-W|EBb1x+**Kf|DJ%d2< z_{+K5>~OVN%hCduG6q+aGrDNM9`yY`9`7YBIhRF7#GV%oP^P2+tc78A`@~ix3V7Yg z0*TNG%5O>57BiEnWYqWF1eA7wl}0sV#+-O8OyhzoOX`Nk7L&3Duc)*zp=D6T3n-=q*{clModYOh9?cbu`;DW|t zhJk3tNoazMxH~LnC6RY%NbBOKrV_e`5Q#7!+)#%%Y8t?I^gy;HLTEylgDAf? zT_OZ`DB|AGygDvXjcG#GJd{=u3&FK^r7ks~;sq0%IYR?}8Ge1>+AS$UB^!G{Ctr0Q8`5~xMQj}v{s7ui zFU(Zk|CiPM7|`^eP57tRYHs%hX*VSl>y`sZ#4M^#y9_G92Tj@W)6Wqs5UeB=^YnAi zoOjTy<|3SIk2smkKQ2xNcZl{Indc})Zl3Sn=It93#-I!n0VQpkl!>V{okv-||i7C+r%eCbqcnYNV} z`_(w*o1OV2;PupisF~NeyR_N`j;_h^iX8lZcY!i|hI3N&9OBQ+h3{r{4U!g>HQ3y6 z4H_m)?8hi~$bqSVzx;F4Fsmm*k=1$r&upso#jXe8L|MOY*S8f_>AU^CT#sJ~$|uv? zgpi$-_2{}?TSQ&K#sl4k5Uzh=Cx=zLxfZ5bN=?islaBINLk6874y)cSG%Uz)*TU6g^uwu&i-$}{QtyuK(N zAh`T@i&1q;&F}iS;i4R5o++FiL;yw!wTa^*c&(VM<9+i1@u4$%XJ80fbd=!~ODcD* zsU~A2i**zw73|cXKnWgnbm&0}RNYyQ@%x$h{qLXJj9_f}p+F0)BnzO@z+b9|Y_?0g zwOMCtlVB4F7jjUx(x0U^o4+!NL#yO>`on`KhPg|j+DO@=xpj-ERqD=?(g4V${pqT) zaw(ZF?7e%{22qhqpLVO??I+ zAi! z)$N=By~>S!)Vfso0|~}RLV#JnQ2Xt@VX{3K#doi#=$c(CVarr4MWP;~wEVQ`U!Y(L zFrv|&G=JN$lTPHSAeXfrLz!LZae2pS&a6O){S%sm)mm8d{&^2WU*kzmBo0|QKyhdw z>L|`g77-f_O26?Q7P8=y@IkQ0_69c5Kt3dao8t1NE~5nYMSx3FHTkc z0l47UTd?1t@A}Z{5B#8P=FY;(dx$Iw1Bcz&NH)rlY$we*qDm7nje+zs+NC$ti>t)i z=ZOA}_2iZP?wRY9_<-RdOU6j>u`jGYM4%x&FoBTe`_SE(|EgAuD;uX;nAhda{Ek?H zO*nx<4W~mEr{>0X0ym+_`-*bmb0t@Ko1S3;S59AS?DOJZitw>~*O1rEbnI-{ir9kl zFqO#k^ejt6QEwtEtF+(}LU6Q>;K*@jVD-j;M^SS`@?WRLGyvVz(vA6r#-tpT!6~ho zflah~LMo6wwllLl5F-{R+3eI+=g3Np(TlyIgzV@b*ldB9D&k}=A;8C9vKo-s{(&?XrJOg>-(Q4Y#_XqUC1DBFwt20Wsw~t7$`^k zPEgV`o#=QaI;4f~s;5g^d44ix0Dn`erug501V6!nk$7NILAr{o^=o#M_+7X^j&WuM zpqt0%iPm|&SQ%KA1GKs$54a26vRLZ=WFTVffrwX|g8-s!5M*?(G^3C*Q5S=EqZY-f zf8>8(m~JZ@;n_MPLk|ZCvHkv8f4e`e9=vcc0J_p+W8?fV5kjUc!s0sThfOy8i+g5* z!6gw;Eb5B2?KMr(4Pl^lw^_?fp~ZaQT&6H zWbS?+Ir8--D=t#YrSH+$1HL@u2tvZI!;brK6a;*>DqmM%==|edn%Hfdd~tp3YlJ-?|7cxhPvm%atZVvb zcncPZeFLI#=rJ7EU@`U)F5;)NNkYgt@78MBd~tDjS3tZce)Ic}6Qg}Z5FSJA^$pw_ zTnn@IYwK8*sO3He|1trvTry_>_tvYgF^AXp&v5$BY|^tCWc-iUkkShiM|L%9rV5;FT$7F1ytc*=&B$a=ux)e)`_yd_$AUR0Q@xnf~gChG$LQesYDtU4CT) za0Rx06M2!&2TQh41!bk6&gT*rq^ovaZ?q|9N;we@DZL*_5vV#f4v3lRCd5s%a|tG@ z)4mFzU{#?6nlY>g z5iUdwU$~iM>UGh#ePd#v(~#&E#Bxg9m}ugla-I|r(*n5X%frZZQmy_LL%E~L4PV8~(Wnm)697A`jUos@^ZT-$w{l-uxX>(}8L{!Ys#2ODsj}A#!g43|yS_QNZIOVJ<&^;Gw z--0-79F{*GHb$+6Z1EMP$x1*vGAA2|QlU{TW$=bNZ_#QssOckIOW~gm;YfZG>S>n2 zj4r9!;pEUc95Sxw@Lv;aEV;-(g7mRJDb%L8BlK>i(%Ta|wHQ<8>I9&#v{LA{=SSAyUX&(>tpXkKpe8HCG0 zK-uVwS>WK&KFy(FSjs(VvfhFZ(xMF?*(`2M>HiZ%I_1K{S${|$TirasuAb-BDN7bq z#BC)&yy+@TInVQjP;``qusQwSUd47gqLGCPX%HILHRx7{;QZ7%xqAU2I$oj*My8NSoI5vB`p(l6&LU}EyXVVfHH zO>h@#H*LJppoS5ZQ`a$TXD-(hdt+Qonv)F5yW zvKKkn6IneXSqlK#@JyAs0=$!X<4Itt{na=~wX__|vu3(+-?3*gC(l9?$uss^ zKhCX;MYG$Z^8Cc{8^Y>bo4L2VbjK6!AJ5~Hx3&(~GwvyJVXckPm7-pqj_=CEhNjM{ z@jLNG=dpmGPTik(104XlU5|X~?35poh2&4CU+BT>{%aY^AO?+rU8Y7Qm@294w%O-> zvUFP8`v)MtsPB=3)IUH)-AaC*eYP@&;dZnYlGAx6){F`r|}*YID^yFd)VoL z`=OU5^h(9PF!QgJFZ5bqv~ZuD7TbRQ7j#9HGe_3_e|-#e z)petwv-+v^7(v=aSzKJzQ&^1W!TmO!)Xi=)8PLlyv(CyfIs^N^nW!`=vATA{y>1PR zD+`-3y%PVWQ<<~HUq`CdbRIvBWm zCC(v!MXck!U$agACga?+ss%cDwU0G}?G<4^MwnZ}K#NGWQ(i{1N~3KL(RgD3`eeSs zoPI@^YOM8?4-u%1Rg)mEJ>WJl^xJ8d%A@+$_LaRI-;aq1JaV_*f{Y075%RZk4S5YV z+ykPfd`S3NEC-Qh-wZRs98y)Yc(snA`bYCHeNhR^#**=E1hVflf@E|v7&nu%e4ov^ z97wEvV&aQz(oFJTt>{s;EG8{=KonRRPvQ)G4++DT@mvb&AtYser!@Cpxl`?oPw@Xg z6V`_N3B&~YKif~7FAa#S6m?)Kz<=MpxdsCjf(HJYt)Gd*6(qDajdYruaGKb*nh;s3 zvp?@%Rk2C;=O4OH@#T^oS&g$v+Z!Se>CNkHN0kFVo}Xs79X75S0JX`uc1E(eI=Z-6F7S-k0?$7elb zhNi-m`b9ex%HLN@#&G}8-WJ^9d@3m7OzJrc`BI7)lrUC}oJ=eDZ?294s-jFOAug^z z&7er*h|0_fR9<)Y=O;D`geNjo0XNRLtHvynEth8<{ijzcCw=5E8I`GuMa~C5LRI;<#@l@Nau&n+~WHLBc zQyLs~wY?pVHCG!A(8{Tng$ss@NAK%uCvP6RfQ#<{RJhDH!hd`H^~<`8Y?xEGsi4`^ z(JR(KaO941k~Y_T_mD6*+zoFe<5P5531Qxn%C``u06sZc3qU*>sX?NMt8~bLDhZu1 zv2u!H13oqI7+@eZ93N-*YOugwsmsxaJh?O*KZKH>z?gEd4 zcMPDrS$_4}hlWMxtheH}>h#$c8O^x!O-0K^_k2o_YDg*S-;3WnQSA4AGgRRxt!=+* z_ZMz+@2iZw7+`04ran&$4c2I*Z0#-a;tX#!DNK3ip*@zjPR%_d?W4xh5$FF@2`tXk za7}8OQ{%R^i9jcz)?9pRE>Z2N)9BnxpdMSLnZZQlAv|@rV$8LJKu4k0T<(uGmBV=0 z@kN)xwT{5@C2jubId05NH`SCkD)-0EVL}cdu;p3v53ngW#3g$c&(ebv%KKs|6AJs- zqj;pI^f=OZcomW3j>eN2%LS{lHj)E_P+1d49n=|*H#ErJ_P*OzL6DnFL>bIDWE|ow zu2z#pup-JqIe+p}q6jO@6u9A`98_I@No__bIEx>D5l&Tz?5~Ja4cp*O!Yng!`qx?1 z)cD+{8o-98#fUc{n-o5qk&a9!Rq7utoF1-}GmewZaDkqpLO+&_;KZEi#D}|R2u@sG zmmn{dU?r&cegC=jb&SHhnv}Qw$GP>|I+GYrz;*z?#cugeu)JA182PW+9K5wYoMR*) zkOT{?PVXj9MN8z2jWQ#B!R+NCFddJQ=~fQoFhIIRi1_dz;&6sZCC-E6z&)w>)florX z3j5kwFp(xmPhsJ3rD$w1sB$e{isrk(#+do=DiK^pqrqmdj0I-$>>c4sNHn5-Wh&;0 zV1QVZ2#eY12qk5zqu~4n>EfFGs4{Z^Sb@>F`kY^XQPycP7>k*KHli@U0zVTp$DPy6 zY8K3FT17akX#XbVNFWv8mgeQ0FjnS8K$gItH%zbz+I~4@Ln)JCYhYuebJ~#_t85x` z?j5XX?f4rcTCk(j7_gWDYVCm8{W{Y&eSmflyK`L(TpdBhP~1ZUkCA@ekO)lrn_z_( z^fX3t6rGfrKh_=n#fTOn(Fau7ptDr@;5d!kQ81(29B!GMRsOtrV&U~Q#~OV{reVK> zlfZsHq;hmNC>cHxGxv~YprWH>^P9ZVX*pjd)8y-gd^GvB&o_t4_%jk}%$d4m+Xm_g91f!r>WRqhbxgI#V!Zflgdp=qtejNzYeS~DSH z!;n`Ajj)*9?cZykm#qg*+ZWqiRNaQA^Q2)cj^pfEDiXQXn1V}FdXy~oZ92*!ohX&s z$?-7dR7UCTucs@>gU5Eyuz&ITMl1*Jmh9I17Gl+SwNqp6t2B+zU4G!i&8s_K_{;*LsFB?uhBp8{GiX$NDex&c>xi_L|l(wl!u3i#L8T65D&Ee z@h(TnP9a#i>q0W-F&JO-CJQ_0u~@O}X3E!%%gDDfH;H|u8jfebrY(xU$U?WpNak&e z$_j?}=HLvkfvuRrkHMH=$5)S(D>dVV>GS_WF@XKhr)e;ep{Sm^S<=KWk@s@d8t5os zblFXXi~%__&dZP}It@ZJd3PPW&JM9-#QOJCxiAoujVRGf08T=?UapAc##MGwV-VO{ zg~Dd3=Ty`KGIuqJ06s4#iKTEkjUFj`Pj-+4@GlPZTbmPMN@vA~PD`8Lj0qgfP?kbz zmNR5#+XIB-d)zI8&3wlGyG%ZOTF@X(!dB(5a{zD?`$N>)f$hL4WJTX61r*K13sCB* zR9)(d(tjF={ij&lUdZGyo%9{Pf2dYOyU(*@1LaLR@qywPhU`%bSyL^DO7^%??@hO2 zo%Q#M+@aY2Rpy#FQ{5^}(2nPWj5)FG?V`pDp@dLVdGO*Z@7xi3_Po%6;=Hxbz``cY zk^_=x^XtHK4@r9CZT7WlqANy(R1Y0!|1Q6o^O0hYWe8Bl#lB(0!z!`V@SUZv*7Uaw zJe>WKg%31I8@xNY#f-73qShp--+xl@IOX4S^2U&c4(t}m8Ev*#Qq1@$kw#>=n=|3v z_A|8?0N;QH__H-Oy%%*lG9bOSq@#(NIs=Ggv=em9{M*9+ruzc>p&jCa!HI4aU_QP; zKvX=rddx|^?HR6Jz;}izEjjh5R_vQUE$oqSR4i5cdHAPzc-Jz|NqVm7vP#qCK^oIBK(cS0`hhAwG9z2el!?lJCsz(*kWsl z&F_!cMitH~$*}4!b1>M=de5=Lu^XP9c|}#xi>hFXj)IM8O*wQ`gIp^qPd}?&BM#%7 zqq(Op$@{AKyKc#QjA6vEI=?cxL>aLXWUys#b?$02;AbU=0#TCR)!owKL1&COW{0d- za7mvPG})_QO`%_Dq6~nHy}m8oK0g#peX5mi(@+(^dj{j${?&Itevd#59&RnAc_=e-4ycez#~g0b-jA9#p7 zNnGhQH*80-=Ae@MjzNaFl@yM0RW-(CStU8{E7;Ry$>vPRjq3Z@nW4Etsf>Sy&#&jTh@>M4G?9eRw(La~^d7xv=eSkovC&4!Tc3Cu)#gI%pp6n>o$(ZU(%OVE@d)kJm zs8R`!UxS6k>?;5Zr!h&H!gXJAN`+t!Ma(j#Ml+R@^0LLGly*Vlp_N@ywF)+bDd+?T z3v2^-Vns3KZQlcs{TrtC{1gt;=G z%^(mpS*Wl|FxIXFwE-|g(AuqW7?H`aLraE?2v2U&EU~=pV&N5h}tJTi?PYuQ|j$S_z ze3n7w2OYrJ#+SN)y(k5BnE40G=;_U$jSE5!PUt14*)PNc7%Uc)h2Up%huu(2&v|Zu zBbwvxX~4;)Z*PZuxT8|sXW?~f23mPkSgr`CyuL|Tw__oS^3?2kz(1Kqmf!bW+1VZE zrI(n)kMuLkB5x`C9+Ql<_o z>>)CZO-v0(*uumllT9D?hMaM^6WLtU@+=R{2ekdkRyr*T85`7rd;v@`ugBAG7!Wm% z6r;vYID^zG+$O;iUkPP_z1#G+`NoYTLX*jRK985pJ_SpGOZ>ItTS>Hhkcx3>;yqVf zJz2o06}wzmP_B&dnqc1NJmf=n580X@pZGyrPKQ47`g%{u&XK1^D%bh%uBvRV*x~w- zwMa_7)FRva4!*a6^Gh3eCkjR=RLgs~?gkFr*8^j?^`qZDC5~ViNe_$`4PG3=Ixe!1 zq5Sn@TO#961Peq(lA2VPwZ-^!@nnBDh^SXa6jzg$XKN1%yR-Y74%Xb&BO z{*BPS)99>Y#3!LW^o@7QqT}{BMB<=#d%Phy`#zS%(fsQqjzVIu^Ve((Af&o!k>`LH z#%b}Eh@c{o&ShZXA82O+jq8p<(6t#^%J{Bu8!pUmW+JS^Da>x7b-hO*aD-Xsi`c3} zlG3@rQy#oSX~Muy$BQsYYXauTM6~^E^YFi>R-y+&r#;2aV$cvH5c2c*`=>ONV#-wW zA@Etfl~o~~XMb#I@WNL;0A*YLvU&ZIfTg+f25Dv0BvF53%y@zJTLZ}3uIz0c#wbhkRRn20Q@%4m6nsK) z$^2j`JH1dH?eIQjxHwg#LTd%ui)ZW4@4hl?QPNmbA=`MT%X0%-AB5SfUzy*O(qFdt zPyP4^jiqV7EhuqXJ zR=PPKG=>N|&obKafW*<51UD6^^|#-~AP&bL1C+S(;BzhJH+(+STq%tLbhrXpkhxEL zOoVEE58!%W*V+GGteW(AqP?1G(SJeIJf}J7K^~UIyDoT~LMA#()yQnnvEjSu8NWno zcx{?1v^<--mb3RCwMxLgBFxz9cXj52yo5R(g2BRXA^e2*{>S?u<8a81*n3k)=sMmi z_!*C4kz-5Dx>lI>+e~O^6>eZdKvojag!TXT%t#(RM6bePH zXpw4J>+A~fOfK#B!2WK?qzoOTHHTGhpEjYl`lXf}U;6=rPBs+3=7%6uusDvM8J*b2 zVE$ed7c*XXBoWi96lG%T>fe89khPzpUaXaBmgV z4qK#qlL^CXsZeC6>7f=$MHZ>z#f9b_3(1tU)j@L!ZCH~6f7QIPoFN%@24_}B2U)Bm zg83icpixmCo<=Jsv(!1jf}HUjfL?hkf*Km+J{^|XW zMAFkMWp?frQeKD-^~7Q%jqFcUjvd6ZNYc`}Zyi0(pg^VYtJeXE8LHv9b$agRDG-|$(j>= z+m3AK`DobRYn@_{(UI|fJc@)by$h_XJG)u~(Ku+<#o7Pa#rC=qJ8gD8hI$bbZ++M& zBDv_^Crn6C>AZ4r5G;|ic>-O)&qqnZfr;o;`_CAF-nWj7Y8bv@U_CyOTA z3?v*Krq?`7O56RsuRGV+MK=?uIp^aiF?o(XtD+ICi71iB&StfWIVfIT+}%6|IeKw* zUOw#ga)dsz_|Y4WvZnc2Zu1U}$NaxG(7;9d@}nl}Y#L&eaFa82AlU(GN2GFl)_e8p??~QWpqpKXxg~X1LRDf>AkwpGESAxp#Vg^KW+{ zW@h&?p{YDJzsWiSCZf4WLg&7!N?ILameoe0J^&lJ)so7LQ{E5S3{|<5v*V_Tv5WJr ziSobC{O2_6%IqP16L>#EB=C^mo6uT@zZ+2Z<`MkgR!S*;|5=G{&Zh$ z%FrG=N>iqlqD`9zmtq!lsH>Ng^$)bZ%I1YDs2M5^ncx=!m?O{$Ow(RW~%3?r=AKEx9-*bh?`yWyzo-E7t#W&_XNoD+mnf8RhU6MqXFLdk7pfp;-?DvO zpUyZ4`%(u1q!3Xh4GKHFq$ha1n@L87iI%?|QdFP6`m8HOo;l4oy-$j>Cfv;f#9~aC3Se$`IK3W7I4EC>!>kXgGJe4KF z(dDV>P=~!7!D?0(pil2%s0*A~*RZj(dC#t$;W{SW2AVSVklQC3j!HG@9C#`Y-&2oL zW@VB7p2cF}7BSA+|jB!Y`wK>Vh=V zMzMhL0Vu+KcC58tfdB+q-pVw9bqk{;MH{!u)JR;qIed*|cNy_dEMsQav~}v8>;CM- zi6GOCxTMH}l59NXx5B;)PYHjH)4~lmgi6c=# zpb?*Etvv2RCrGs)euU3W)_JS`rKB^CrByo|2;CP@nmIOb8k8q5CS2&^o#93sI1Vrx z5R|<6;aN4unL=nADK4-wFjB*XX@h_3TG&r(n;}T61F%V27(OsDAmi^=Iu%Qx&?H?P zx^{{f=>ipwK{PqW*PWpcI@|~Cx2KymH2opX3w%=zR+Pe?MO3Cv!4Vm2tW}l_D8Qzw&V zSflQ2DytA$91(RzKqaprwo!8kfMIx)k~=NS&!81N-Z_x@X=as^NQj~Rmo2ynnwMAVJSw-)+xQrrv=L6rF&!>fZLHOOpkoEjd*7kUR1>u z_0i)8n1iw?%DNg^@L|xu@ zx8HUIh5kE7E_@|$d1{wL8A~GjyNabZ09hUE*+ zxS5$ms_~0t)cK3$h<~RUzS)nrMLuJhjQL)C>35z60N>l;Zl&?HZF^7=T@YqS#J8;6 zOvIZ3aL^|GS78qUw1PeUj!oU9>gVu<-RRe~+EXN8T0mhOMDyJ%QKQt;bwk)m!6v1H z9yJCU^jA}S`O%T8cm}>{y066h->{-?%pz>~KTD$H>0I7|E zGHmT-3rfP+NY^be{H7O&=iy7NKTt+3!2$Y*rM*wcSFO=aknYDmX+HcUrXJ(`qo1tS zx21E1xT%Pc?$a4huCi%6`^R2w48ny#oCvQVZD7dnC8x&(s!pGruIYyj*;=^&9?IAP zz(9?pE`)Woqh)ddOCIv($H=ybt+_+^~Kz84KRdipV8i-H)q z^mI{HE-biPpwhRIAC3sY=hfuCU8m{lmvBnLoL0u4?BVdwJKncgSR6Xk=zxhEgtM2L3qO%Lf4U>jM;nxfL~U!Ndr{rIE8z&%6!G~hP>-y;@IO=H)b$$ zPw+*DJ&M#4d|COE>7C9US9Wk-6rCzNSz^4rC_NLsQ-XeNN`0i#O3mJ!`Xb)P1t=uB z46AG^y&iW3R6Tw(a(~4*48{8JT#KFj=i9!k4-SX=h=VIk$)=V>XNW=4$}-?JrjcDD zqKQoP*SAIT({{Z&vR8dn2IAy5$baqxml^-F$oB7h(b!VWU-uu5{oCEHMQq4`$(Vlu zZY%h6Xu7s?suW8rH~cj6`ASH|=BOtV4V-r86BPC*dyaMez8)@057uNfxehc#9Jrc0 zk`-^e?!@9C54UHHEIcy@;ER^Y9oh|&PR)^AlqOXZdO!6lK7Uj+s^<@TIx(zayRmIl z10$QSuj^J33mi`9F>?3ehU|}AOwF&mA^M)o|2q&{jPXPjW!SGn6+&ZYpq*}>>|bz5 zZje^|C}xY9B9yMTQ!heLMcjUC7CeN-f|y63A>&&B&uwv%0(W-@oRNn2+QH=*qLeGT~N}8K!qbDMJ@>4^t2inVTCCpsRE2sSvBXnC~>`-tM#fW?|w^7>Ro%TM^CQ- zI4!?8peYO@ctD*1Mg|Asf8yKgL)vfe&uN^(2<^Ah+HbLtIqZq9y`85W)P_zn4kmuI zgQ(uQm_AHb0kqM|%NQMdiD%hq(6ooirE7A1Ps}Zzs$KYwIpp>bswj;9619H9?CL=% z%tj%?VX7o#u-oVvoCMBDpVhSmjh45o#ltlu$t+2aKy63a`foESo6Egx#aTQZC;2gz+`uN)Tj=VqjXJw%QwPn4r8FM&m)+Rq58~L{**_0oo7lX zC$@q!q<0k1^CkPz9;I@60k>%p(hrh89EB9z_IKt32-P`1_i7OgdlzlUd{{~ip$TUW@aQfm{UbvI}$v}LE1lMkSB z0+NNsCRtK61VSWmKLPq-^NDcDr%n<>gSw0n{U=aw@+o*PPP?L(&ctzwaGA3U zM%f17;=+Pk+AghX3+}~U%?b}Nc_VDefXt`sI0;-f->7)BiAnpzCB~&tBjl^i;wHZ+`!1MLnSh6oH2uB8_a5C z|H3$y8|+15TUav}n&1b^b)rCi3nC{TWdJrGTGyMFA`TXdb~GlQ*W2#{&Z4 zh6RbV7lgSiz=Rd&ZHnoN)Nq+>K4w}#a_DskS%<3LNDm)p@ zH$mF=FQwn92XJTtsJIHGhMDt{ww}Ue9$^A28c((X)k9ywSJtXH$Yg)P;en1WBZViD z%2-KD*p>}rLZeXAYkK4q5f-CULdNvVT?A;jVfMd zzG2H1y^{$W=MHIb92R3>JaAF3pGGq+7|fo#x_?qb+yNcQ1N=127tc0;AePw^ZV6#{ zyRmPoxQCnp_!DzX@qDOLyuMmn>;4Z@=M<$`v@PjM+qSJryVAC8+pf$%ZQHhO+qP}n z*>%t9?r~q&(|%ZE>@{aZeDT&5u_KVnV(|%nlQay5m$Hs(Vi-Gv-@VIcD;u+!r`)nu zlD5!I`NDk>PJrR62akT`cRp~lMUBY&RVMt&&?y;(ce=J|Qt|a@Q3GTat&peX#CI{j zIg*{fJANi?q9*u9l?x&S1TAw=l;aCjZ={U`K-w8X4v+bD#mr%Iqb})|b{-mc`#h{_ z$UtRiqND7eVS>@>e-g<~O6NRZ>&DA$DLN?@G&2L@ma>{9nK!z+?!aTO9AmFgLa)G$ z`wc%4XA|%!CxiCzYDas_()a~@gTxG1)@o}SVY6N4hqR-uB%_aQZz9Hf=sDN?w=>OdeswHM(Ex)umxm`}v4M)k1h*e9o0E+^{b zLW?Mpu=9&}!-e^<&@F{6Cu4s4ZZcM{#UU;7c9&}oZxdHf-}YIkV@r1DL~kp1(a9@| zg={NZTtCD=Eh?{E8tD7g*r(0jZfR-@P*+HJ_pCuPRXY)^Z`4?aAxkk~t3cy3)@bo# zwMMAKE4)WwY1)7jbvBqXK|~gG)b9HgxZPc)4Mh?4NNG6;SJi46#)f&f`!;sSDi9X$ zwqK9nSro(g8aLbX6^NHTLS9hT?;&pmB(ha7us})y+QX9bzaKruG zazpnyTZ^J%vs%~8IkEfT5L14tmP=NpSun?N8DJx+49Sk18wJwKB7Ff=)2Aik84QRw0!a+NRSvHCWMoDCpBP=iJ|n#(@k_2OXZ=6l;nM$b0Q z#>gX>IBjc1`i#Cb9mrvJTlF{rr0WQBFXjWxzw?{kV<=#&QzepfkNHUqboee)&vT%) zgZyQu@h!3;gXw6LNbVomUZZsyI}*?Yn^1Uaz!JDoex}$&gioAOd>?Wjt!@ds7|Jnp zMXG-EI<#G^1YJtIs;k>*Rdq85ZL0?zK~K;_ymxhl z^aJ?q7VaxwfusF%eh9E z3$=Lm#+_aGG~$BEwl$3K6`g!?kXFzKu8n+r6v5DcU-|=;uLqz+qxTHTXgnpBO;gmi zjWHM>qHWSLYC#Q?@oKwWu}#}aFT<P9>8Mjtc&wd%bVP@*l`V=0Hhg>G;VE(92(D5FyTk8}f<1G=oaj7YB@R>X6 z3>Yj;S^p6y)dj#5aS!`Z(Ka@FQ4R|cqdGAxQHBy3lbx@u8>jMh)nhsPYUKS!vj3S0hQuR*^rYYso3n?0{KhX zU}aCZ#g*c57#&HtvGEdbWZEgJPAWRS_|wy^``~)p0ej6X(59LcuU-n0TD{6Rzop5z z0>EObM49q3oegrk%YR{GJOF(Lo?oHWO1BAk+&;gf4W1SyPT(43ilxl-BNnRPYAIju zd)r4?9{`;FF({3hkf`_E`i4$H0u7wgqWd_Rj6!cTD6L!dMU}G+nA~yo@xG&lw)iLDwe(t2vg(hVkz(xKX~!_kJX2LjWYVrk>9q7d1s^@^ZAezW?cFu7$>> z4N9ySaQ}^zTD{-w91nh5P8ln$eAie;Nq8WQcA+RbfJQeQenV6O?Q^h~>#0OUQz3#G zE$(tj-N-5GD{eo6tt-Xr)oWtX8cyJ#nZd+g$`{^%4V`>6zlWZuV$p6J%KiJGnaOJa z1qAS_N@=$B-Tv&(wGfuNy#m_Kzne_$vpZ;P3#QF>0tLvpr!Hu`RN; zZs{KWPIe)F^MV_q=Dc<~m8gn34Fg#t-!Ap2 z^rGW@UP1O;Ew(?!YW^r%fr`_mG{-<`xy#P|PG98kJ&B5dK6Kb=GGkC*)Jo772zBvV1g9X6rp zg`-Tc1~Ha+$K|UzM3j%?_Vm*u2-q%}&3U1ET%51nndP`tISZhq;k!fR-|aV>D3ya1 zHS!-{dLPzUc&Z;ZJ$6;jYL2L$iuDd&6?2t>oe!;Ie0S@bLtedJJK{flo1uV7LvZKI z*&X`qNkLt#jxBsg`R`CW^FNaI$jv8Oa$GT^`AP~u6EHTw6&BuyerAGDFr(a3=UI2Z zhR>&mD?=pU4$35FCi)_t#&x+nf#t-dD>tan`mjBh3J2Z~24r*r^EbyIQH{jJF2}F0 zbf){$XwOpcvDUo+m!b?u!UnJ`+2aFqmaHwCON>qxSal79Y^0Ba_F~EkndZqsG*QKl zZLs4X70ER~2wWpXuTz*z*owa3TAmY$D!Tt3Xxk{sA~Ds(lc&c}Mec|XQmspVrB{m$ znpzrtvZNZ=%$=mPAKYU=`-)j;J1fJuHcxqPA{yL1I71veB=@%Zy#ca8kkDy4oE*a) z=;W{RPgST`y)JA01(Z>K--2C~cwZ=p7g!e=tQ~Ox1q?STO;22q zmPxGGsEMcsx#~11`1MYCmr^;i0-cmD78dfK6M@_rgQ?qa5!E{B5aECpoT}J6{_=Xp zQ!p_je)5Q$z7^4omHJ()x{sy`Ck4G*l~7$qn`?q`G-j$L+psYB(D3l z=sM&Llw;=CMQ@w35+~!Row z3TXhuphvyX^_w&i6!A~t=V(+Q_dd9T9Bj1!PLz0MQ@ab~`FOYG4L7CoCJnCM6+VHw zvqpg&KEnW85q=T%gg@!vVUFu25P#CaTz9of7^L!HNj4!6k6+p_X8!f3XEk~tE(<~?YSE8btM-KqP z&DHfeV8Rhllk4j#Ik#~8(z{!WeQWz8Soj*({=R=9C%Q0`Db|La~o zRNt^gVMO%4QB#lL*G^}#NGR5BSJZO{x(nO|${W>@L$1*Ex(gx&Xn$wj5j*I39bAD^ zmoAdZxD6kqIYpaIPB+XQT0h!z;mn8zB+&MJ-HpYSl}_foswqNRPI{P1CtFb_vZ!|S zJCbE&m)CzPEWBLMQpoH~;AP<#b3ElG+uFX?7xIE(xrt#mIoB?ZtAI@VOr4w<7aY*` zq#*CAOs86iEeAz-7U z<7pLJ%JbwXs0R(RlffHane^K{5iCB}-N=NFxibaK&U#p`*%=WY)L$3g}1j-Nd+Q0;ju2Pa} zm7&c1m(ZbkWInTrQuHaplqyJ;oj?bXzn1(1@Z(cn3*&)p&<-I85Lmu_UiKk z;{~vs#!3bg1l=lly19(zezp71_{;<`t0uKV7{!{Q9S>3rQ=DbM&(heV0rjzKn{9`% z_-pcOlW+n^ielV%wzg^3M<+s6<;~EtS3Yz#G{!8TBCa&(Uv}y`&#;5< zU_k#{L?$I=4UoJ`ep%F#hpQ-)cXVuI5zz{k=c^3ZJ!=$cM>;G{7BC!qVEOGpCl!@k9RS=Tw-2a-!WPO_`SEq)Hz2yu1{J z6`dFB-js?17q<>?{X4O-i&`S-V+ZZ{PR#N%vJuI7{%0}_mWCa6BbDTo@BqtpkqA0c z1P2lXghU|P#=LA(<+}V_qro5)SIzI09dd7V^rI;LD`^F6DLNs+Yyc_ot}kenC1j(} zz8xEDa@jcG(|&F&xM?6IrZBl*ksh`VNA#0VSV4wc=$cKsw`LRZISfi_cK~jsvd{0A zTgcOMn+TOG%G@^s)$6Jb+EGYs5~Ijl>Rx3MUq0tCo=!u!kgfHLBuiqFTmDV030S={ zc+|Xdrs%ljNQ5K$000p*u6w3p)H}0d2MkJvL#ky{Irt~N?#YZ1SHvQ%E@|gTpS&!e zXV|(BVPVmc*m`yKloRFGZL#RW1?&Gr57Q1aT&-SkL3UZnskXwsv66 zo+fo(C5otj$Bdqh4Hgq-fmMx>*$xu9xu`WrnksdYtU%YQuEv!fd<0g`+z#}` zku`NoY!pYvgzg@92MG3|7mCd|PG5yKx4of(T-UcO;*FG8Q2Cp$>;db4mre+Ai%Q>@>UUx-JKRWBkX*P;1nE(i-BQTC_kP%G2e z97UH#&do6uuZdRax(lEG5jR?|3Vgnwk|g%GM@kC@NoQ13ViY&?IbZZawc z#4WuDKd0LRg)0broCuTTS%vc3%lm~jI9YwlFm;)95mz2*Rl}(JLW7vXW(MNExzR>+ zS8Ns}cl`mrXFj;CrIB2?^}Vz*?fRGoGMHk#10fKkyOR@=Vset}-UQ19W-T3ORX{H7 zhwRuB9dLAN+1v`Y0RJSwqu0}Grjvl|NEOALy-t`9@eJi9V6k$?J3Q$z__p-%arNoe zoSY$_vxNNZQF)(k0CD5~fhRsHi3 z{R|8n6ip39cP3@1)P3OneJym=MtPHI8|VeE7AEvUe3be+I9 zHuFC1CIRn#!ZfB;tYIW-Xxdf>Ggr$>!67(C%dKa%)U|2zM}PxdwhCK5Mn{$Lil)gPuv>^log;e7&z%oE@?>1Y852?E<~Y!)lq4UCUf zG?3O+!+|-WwR*Q;mQrJTfvv9Luae6J_q0d-Ql+2;-|xN2sIz}GU6nJhn^lIx$g-tE z^ha!E`(IOc++hy@Q0yOwT&|@#1&wprV-f!&eaoXs&+}s{Og$8I9~n8WFCV6Ix?Uh} z$A;hlBiBHcWvV>;<=x)g^5eCeWJn{4UpbQr&%MS9lVY^zlP+d3Ck79e&1yY>6q!7U zj#i5HI}qs~6X>QFg#$mAe>^DX{_HmY%#vo^lfe2wE%Ct0un=*){x+PMNcdJMucl{E zICDgXK~Rd%&&Plt9il+?r$(U}zk;vu{8;{9drNBwo@gs;!(D`pq)Ugx5LJ{Fa2aEN z9tl_s8H4gznU5Wd`QI;%F?scP8;os`g(}RPQD2>ry8w60?G*%t7M{w_FD^)+sxL%6DA< zF6c1LQo0xaycvq6IOn*S`0JPHU2Cc-K@)DMQjYbLkTPy7VPhL9+<|4D@f3#@SIz3$ zU;}k$nBza^kBkB%l?N|AU?4ogMTFlFv?Ghs2F*m!@oM>Ir%xJAm|?PjukyGO>Kcx2 zMN8C~=TDZBoC4B@z0czBz4y{HV?k%dJry!fyo5Oh4Dd_T#-XsQ$^1>bHF8{uhKW&| z6_QJQ@IfC`xli&!XuKXW;HoHlFxEnwPGt_GO3w~Cx&2W7m~1a;z==i3cI`J_I>LZ< zl%{&qDq`?UMr=Cs@Hq^V<&G?tORUzbD9J;Vh{D}~$!kC5$4%n%`OdtETkm0P%->n`RJ-zdW*K5gT}mU!?+&aO8Z0N(Bl_k%3uFIlBlbVm(=ZcmrrmV-z4uo(8kT`e7-qvmxUvtYFV zvqt%`L5L4fPyV%}=2RrF(+j6ArI?nJewGSlH%{%ee$nHeMnNPPZsM(>#tp%1YsSiW zza)tr#eI*ndo|~x;lN!>scMDFTAnv-iYVx&Jxlk-z^@6VAhRVTA9c3@KbufGiTOz*dVxtUMVgp#zf5 zBUnYw#i^S+T3$Bng5z;JXv&(|E$rfgP{NoqJvbO525)+ zsnO=Oy~3K-S{F3vQ~K^{z2pHeZ-<0;8nef?rGZHJDsrKId_ zD$=6VrsG6JYA|Lfqh>+EIcwLeq0LqJ{$1ui zHSLeD4ubFRfiJZdzl18(4Dp9a;Pj>Y2^xLHruMa70~JXJ#?}fpGKv_I!-_PyBbS zr5Omhn3dT1x0k9?b->U2*dF#g{(g?*;(uWf%t@9$l4~M{Lgyy%h*48S)9sqyY!P5vs4u zeH`BpfDuTBia{>40T)jovJ|~kjEl6i8ZL}4W&$^O@#4ovY4-4=C`)6Uj18PYL4HW| zBPT1fpGEN%?wb~*bKABEHsmj&#J+i)!Hj_0OZN2*qH}$w`S}$-Xh5T$r%{L{Pu8Mr z@#iFi&P=oD+To+PZw%dznH1Q$B=RtLNWU=wkV6GTjaV*SQKnz!H#*s0p&i(3G_YZ| zv{^0iaZI^OM(-}aS(|>cgX)~dSGNy4c|1k8F8AP}iKWqbwi~VSn!Lt`Q(p*-=?qK+ z=dQ$56fD*umSw7c0j^~~?BM>(W|~&=x0Qhg{hS@lY(a?qHB>K{ZOxeC8CbYUy8j*$ z048(AM03!RD#&r}X26nwXos0sQ$&wJ#x|#g4)~(2fv04mK=6GkLcVcQ9U^4hC5hZO zjFU_XgQ)jDZ6Xm=4sqF(dID@o97n^c^kMt0N(w986X2-;Y@o#_TsKV>!Dop64Cs9YrWcas^|Iohq)GA;3}2*{w!uoY5wDEA%FMp{)h0djcn>BOhC^=@ox@V za;ha5>xgYki$kydk0Uk?NFIj-j1FtYu6SLUmEY>6K^-4-0^j*WW2fxH0VUwGVRpR^| zAI96Bu%WZX^@mc_k`BibW;PaNHE~Eg>ct{7Ndc>|H)^ZMdU_b@plVWJVvKc(?I~(g`_nt zC>Z4`vW&B3?<#WA0^o)l_oofz&SvtQTQ{Izf_n`_FC+H5O`STBf8P5+pBgla8{Q^6_AE zTDXcttRiZ!9 z(tA&)YnNN_T(uD6^NF5~0I$SyCUh2t-b|6>(8X%TU0^WK)WxbzjM&ngLbZOBo2UI& z_M;?V+x+mur{o0e1`k-tGWB;0(#&}gc!*8o{5p)e`6~!(r!IS}`?BhOnxREq#Jnuu zGH;I^vg9`7c|Vx-z9>qW`y`mmm~eTeRCOEV9b1$3F0h@f zxiBSAjbNuQh57gtNk3N92f9TbR<#jJT;d<#bSiczczTpRXfXZuLke`o%@*<$Rk?qU zXX_^(jkYJ;rJBlVk#R5(0xNv8crmAqcoS4VQS^Rl^;QJ{9j2FNYWw#x{o7NUjkBPW zsmtvMGvKdyV`cA>-~h{&Kl`mZ7{V{#5&Tr@FIGPme;!~h%HZgLMC!U3l3dU-DL%o` zyU5{677t1^_a0Q{BP1SyL#P7>!(8Y?0HQvoNkuP{=S+BJZQ@MpJz;R^Z0bnttUOnC z3M`JTO+0*|cH`O)kIV4)T`eDWCTp~a5E zG4nN8?3!I`FGGA!*RWlS8iS;$RFU>p`Vnh5j(7G1`KN@Z7?90hVhP6RB4$%=Y1GQsEmXfUHRpWruYM zchCj0eoEhsOoF?4h{bOWmHA&4PyQKiMa>13=x*ljVF)=Mw@_^=Yd+$TCv?+C-}G-& z&18paueI6VbD^d%*AQ^kK5E0ItzgGuc{N*1MTOWR&#ad+7gIY zdlE#IPXf@nK^e|@l7lSK0JMm_BP6dQe`C$=ins$hEOKOS9mS9FUl5*2J3>PA>?*E# zRJSwWE@xTE76z;B-9e%y*0ZceqIdRr=fPX$h%lZJqVrN{k`wzYseH9<5lFv^`*hXI z@f7S>0p&J-KZwiKdp&(Jkl-t<=m&&*SStNsl3o_^j!3BG{C){TAiQ#@LxsDqS`bLK~!0X`%w5XJH5BstmY~ge37#(oYUi%(a zLap08)@a2Y@74jcrmeC)ZMZ2)A`RXilV7ssVSHV^G;msP8>X&H0$Q3|tSakjs4iJw z8$v_j^kNau`g(U$fO&e`(sVDWA#LWXd6*x3j_2Lra9X{*LGt=d*v}}hL3nw(CT#0- z7r3hYDjaM-*sk4>^%tNFSx$F{4`G)X4VxV=7I>Qu{(+qX_w#ZOx;}wGov-gV#}s)B z(&XM1)GNXi15YVQ4X?wa(L*#{H_`Er?7LB%u*vb?BG5pzin=()3JZbQ{jB$l0_Ec@ zCk8iww$69`wqzZ;juFEv>}5q$U+)iQt$ z$a=(eU#z(Jik8M>(I2a0J+lq=!{653)#3+NvBGmvo6LU2@vT>6AbG_A$(f^=i zY*_g-p4QK(2}TC2zva}=SzhmREHDadC7U$aK#m}`ox8w?18DZjvaOL|6U`525$wS< zgC1~UV4ks4MpK=8JbqDnFv8lA$8&sRQ+%EQH@Q#&%l>(57S^l)&77t{4e`^MIR!s) z;r*$FuG$F1yP^_E%RE>|{WpI|Kyg#OeA?n!d`QMrktbbo(ixa-6e}X1C*q?PakkYt z*M}ub*mk?z0WQDvczQf;SLn09R%o-ozN+98X_BU}7OCI^zK(KLgK(FL3|`aG>R(C) z@gbOm-}8+hXPAH70y*W7Q>9o7^vIiK$}U9xO02^PNj^xES8I2ma)!w@d}FnZAlXY< zTpC>C!qTXkX9bafr5XzfeEaQ#8p#e70>i)Q!Neqs0gW(tj#0ttGmvpXCPdp=4c5<_ zsvF0QKzQ75b{f+XNPD~avyYR5r*K?ZeYwm$78}eLG40Jvaa-0FN6Zb;BB-r-(j$W9iDXGOG*&`-Tg@!_nAST>j`j4YSd!DxQxp;*#edF^df}^UV&)>vxGzTtbRf^$2bFdXfD=+&)2}qPc z*ML7D$yQwUhN!>ALf7E@)1-Q}EV~F}vBb7&_ff~@ok#hH?HJp*xq$z^5!D4}G$bVW zm~ouKSyO#=I2wIaGD>S0tU{W(f!TEQPW_ge2Q;A$p|iE@Oj_QxH4wiu{b7fLqJ}Am z{ca(30>Gg3B0L96E_6+c;*}}gT@#pnzm;m)sooRx*!arF9$?ceINLhHl zclN^FH92!NuoIE15wMH5%X+rx*}{9CekR$u-n(SlX=ZWTu)lUH-U8V&DeelKRU54n z0|@A(8?Cb?13}G)M%Bo&TXM1<<;Lc}fh7gXt)$qh-<|Cvleg8$n59E7nz!eZnR9E! z8wYXJv@t?7RrobNAU(L3#@+QELc73JgL0H#N|P1(EI$p{3i^AJjnOiZ>Ws2nIbb7q z=E4L_gT4#H zVk~KH#zE@U?!SlbKV3q)Z9YoMThJ+2>FDpspvZt@_e1R;G)WZa2tMgM34@!83~HL( zKqysG@bq54m)wDr5E~4k4jT?R2VB~{3N>$N;&`zqdLqm&oUR@c&n;*+*~f4&4e-+v z?rDbz6t*JVW+u&LU#6J5m%Mi_>hfsWZP}l)JRvIMs`sIYv9B;Ss3OmaKXb!CxAmzt zC{WUn$2!U<^t7#^MpX==jwdD&`mR^#3*aH*CL5iM_0X$Lt+pTmffh6bE)Eac^rRQQ z;&FHXdXN(Xn{&Xho^`70d~tVc0WdnJXE4s2GSZ#H2c-Uv>3q?vi6LX$lx|IJD{4%w z(WSc1W><>g>C<37HV8F5#7ZK`VjSr#wV3qgm6i=?8GfiyZBH%}_rP^KTX;%>3pbO( zHX#~ZG+pPApi)ASQlx}>bF0PXU3ad}yEVS$5O5?#ZxA+AX>&O==eGZw1<=S_PvLXH(;Y4*1l zQ{kD-WE=vn__~Gct=aVN%dYiwhjheZj4Bg4{b`0#y<~Rv2&o^ zpYDw)tl2#&Mq1j z!L?I|L6Y|I)>*@fp+6sy=!{aD7SeM6jyWGNXFni3lg38x85oEI4FDUX!T zBpLB*%mr)wUFp1l`IR4L`dlpb+`iYhS#;Hii)5W`t?|hwZgqtI7@knPwvKcGEHYlv z?cz#ha9soH?lilxV}`fTS4T=mQj3;Yy@dEo@mbf6s6C-xj@MHL5Yt5fYm?lW_Dja} z=Fn*3j1g;!Vh8Ad?;2Z{G=UKifCr*ljxB%@|I2JucZkOxN%^q>76u%qkjiRn@vpR? zoz?52lrNgPL>zS-TfAnL$x_d!QqPV%ur?bnHS;Z_DTX z3NG9k`NNFFNnnEX*fNr&Y9a504jsmA&Mhd@YbzrI$IjwdpV}P?1;80RTO94VPajnQ`F+_jQC>zK5vi;c|3G>s(<-v_Dc6*%_`5>d zA}&`rkVW-`5?wM+V;E!#G~@>gAx@lCYE_LI&2e_3X!o@HK(}oxE=B0Fa!5jO#9fv% zNf;(ffv~k07ah>>mk!uy^`?ER3Wf2&O}s|2cHX(Hi=QfzG)2%37zgljd_X|3^Yggg z)C^V&O)ABz$h6AZ=Ka+Dv$r*4w>87RJUT(~W@Z-5UPEZhg0=?@#;8zhckv5%n24N^ zLoB8}G$=#BeKT|zcOarYmZYrkoAOtKVPD|p9Ic4QKc<&NYj_yAf9tOt6-jr-B#70Y zAUYy=cXfRKGTSAmXzzbW4sr)=)^tNuWSPu1RE_ggNN%;P2;CD;`vs@=)t|aL1%wZP zDi+k$hS}u(q$(fX>9sZ=(k4{LJ~e^E$4y)Ta^F~o8Z8A14G&@!}8g9O11+F zU^&=#B$%;z@6C{4rjzLayFvT*ex{+#$8T5Pp8G?bvyS(0;sC$}^D{u@t!x%^mET&T zSbs|?o8BNjQ3-AZFjejTMLxHYDGO-ggvh}V&E{;|GnBh)FVHsTntJ>$*z8u{uSRA+ zUo%Xg8a|QLhlxtSz%G@9&t`{KS<37FL-0*UuJ!@V6Gg!>@<>O)m2kd zg_SVP;f4|wW&-RB)521P7{Ys5QPWPu5EwFa0A*Ole8mhSpkAWO4RTO_@-N4*Qq^`T z%qUC$@J@!FvSlWX9vTl9&L{Lc8q#iQ>%8UYK$sip@o^i>}=0tvN_;++DnvD z+5h%36M=9^gm%IV`5)x1&50dyJbC!U8uxX()QxEXVIG(^O+glD=b8s8sS2Jz1VgVm{YwUn$k-B;YN9o8hTPWsb9Epi{42~touQcG^8jbaV9ru zTp&BOA8JFH+sl$zJ9V}&PoWSyl^!=Qk&>_A;p}9l!pWkCyShJw%#y&W5;3dK6$Pjf zG}PjKGFlwG4+#=#NFA0)zK?US6*v+6Xv7K0eJgx!LdNAVx-qj_=nHLI0W9zjhHHZ<*K4KG<5U2n})kYsGR2z*}b;bH7Us zQG8pHe7ok83d7A#)>b4FC0uYJY6R-97LFn#K%<@lf|Niiu|OI{u`q#YHiK7j({f)t zoEOW`=5sLac9`faQ|_-+hAZ{hK7s6k1n=;?UxJf^V?aNit|Ws;0}u>_n(hw9^u9>l z(QR);fAgFpIExE-gfGDZl&zSAGhax0MGXsNfLCs_t5m0TN#TW`t}`DYn+=d+!v`OX zQQ~n?KP4?C?$KT^3hjb7=`a4$zT^=j#ppi*&xN+CFCxq;l^>H0L}{mM#qgt zTZUt;#g28(a6wzCc&KY>BH#B96;?;Q%EUAB`zwJj3Ogl4GB%z7^kGSqHHae($fLC| zs+Y#jJHo)qIxZ?La1p;$#r1a7sfYLscX1G{YSw zt+(h%;ml~xAArg8X6Cb#qe3Kp^QuS(qug~-S{N}UG%(+ciX4Y0jJS;&RaKr z@Ler+9?bkNsS1?Qw{`W;NWk!dq7iNUTEDIBi6-9&5 z`q<=-?7jgDKIt7Po(o;Ogxu9(0XH)*o(P-DQ2CPor>|jaKCg4)8d8JA%M>4^B$*L< zrsVKyisZrW*r4Stg$#M|h67-h4K#)%VvgKZ@eWv*`wb zM38j08-;C3x^W0#Xg?Tosvf(tTsa)>L;v)s`TZ__L@7nZ-@mEXhTT(QSb!?g__svc zQ0`r!a+h?eTZwP-!bu#By3^S)d-sAJidzUH5vxr||1`G7q&!}o{1^W13y)xrQEswd#TKm%5=CSw{`(BeEN>F=+-3szuC zP`L6ohBRwx668E&-Ds=LyC~H;)=z0HXvYx*$}XRrkKH}sFpa5C6nAL4E-4$Uk;?f0 zb43~_$qETFmvMv5>q4a>h_(i`megSM&0&P2r zlZJ}RJ;Y3K{Fo^olT^9#f+pSnP-b-3DW=bcFIAfJK%tl{S$MM;KU!r(L_UU3 zKF)J*mg-8MPUN&J$$;E$eF%w95@0lc|MCmvp5*#&ZWBlLG$~76^jbcEGs1*7G+RC; zbywyEd8gp`+Jfxpg}gW%(gU#d0Q_T58(QJeddI685%L!KkxNpgk$T|Y;9-m_)!~eV zbWKzFQ`Wr+um8dT!4UjH(0Q>t@8c5y>)6v_1?m@&kCo7Bs2|gq08+T!L?pJ?!wweU zP;<*)djVDyO>tt_gW>VoS-F{^hI>P_V1WtW*4Wl9Pr0@xf|LOv&;&fjt9Fypoph59 zZ9CtCC$J0XN(T%wXGGDtjts5?)wy~adXPC7hfl?A)$+E-MP#p6awWRs!$(QF@L+f; zWGLZrpmn1kiEdq#&)0YBwP#ka97vEy^#UKjBK^PL>e8$>VOAV&C zjDlS-`c=7M#{YHwhLndyf&mV(j3bJ}yA)FBfhhtHN{^V2wb(!(?Pwtb|4xu*@C?;9+Uxhn)ZHd$2P9>ib`TwZ8|Ce66JDs%sA0wl1I+BG zxa|s)vT@Ia4hb-dC(O!v0YJ8l8|ykp$|QEnB5y-^V4UpcR|AUR|q+E12ce>lj2ZPY;{qEl!-R@=C1 zmuF3O&UH2W#@*ZF%jwjSX$v>rAB2Oshmsy20B>Z;$c=5z)kjtL@u{^_2d9Ud)6?1e z;r=DSIaVH(k)0>c;P0m%*}9(C+uF;9EoKa#PkX!f^Sfa~$Hz@AoXa0jgo6U_h-N!< z)vt%6v51GO>+@o=qdSu)$Q?V|pDmk^JGzCwH)Bk_YpusUL(BrQ^_~zrZ?-~JHh0!Y z3NQ9=SIj-ARIU(Abm0g6%H5+Ip;||bbe%MS6lzi}to1g@W50!bC}J6EwBfI6nnO@l z6c*T-#l27~Qjy|G3+hY1YI>T(#Qxl0k=EMxqhGne(zwcg zl|{e9hB_= zNyW#MLdCV+nOHahr1U^~jfliiBCvj>_TQ6#hXslBC9KOeb16#Bgw4l;Vj1~6?0*oN zGf&mVI825h-^JH{xW;fP%d~&>^A`cnWs?!s|CaWa-4g`4(gax`#)Jgd<+3!$*H&B- zaO??N&~*9NAPS_}Rs*i719D^7%u!YY2$4Fk%*`%B&+6&l;a5QV0nG_rWWki4Sf>cE zcq%Udx{v(L-pKr7XI8nsMxc#H44Fy-SkFKn$8*UgfuNA_^vkDe%=de& z-gX}F#(Ln6{xRS1T^B|0WbmAAI_qnnzEWx(=mMF3O978A$4dRMSDxD0ocXf|kRJ2* zMkF$5gbWrJb$?w;TByqnrlEu>t%*fmMI+&hRxe$w-C{11bja?McBDNT+)$o@{yd?o zgW_E@{Ubv6>t1$&iSnt=ALa#`)WT~X4_)d+>0QFeKR_NbFHoqDKA#(aZVYwzf4F+* zzs%yrdpp~%#B}?Bb6a z;7ziPJ2nvsZW>5sx6~jpAOh^%bJc^Tj@&DW=v!wU!te>$f9v*#+SW$oW+m#kYcZp(9x5H;mRcI>t_g;N)t_v#GS4t}i~z>Kep+-({3=^k zGVwuaeTVwbiFiT#eA_W_1VjqykWt`wwFUt`uk>!-M)N?I36@Z zzW@aX%&1H!-^HrT5CsMT`Vb`i01X9ChFI$j8bE|PwumP}Qb7z0k|V0jEgU@#68wh1 zO-VAI9U9H0zd8JujTJ_6#B4@G0veuT(&O#|2nrd47%Bi#8)W4#EQb_?89^8+Ecp~E z%X|=NaJ$P8Yh-~l*;u^XG;Ck5Z!Zgh2ptVXJY?`*2COSs0t5yEk||vTN=#lL3l8az zMGz!W-obh6jgEd+77{375Q@ZO@$H~$57Prs_#z857m-7+$iM?`?uWO4f_+Ie<`=FW zo{lYTmP>$}+38aSU3Eucb~fuOL~c6xC!tDAV*2~Lhdq*KoW7k60D>7rrmoWy^G3b%(OyTHmZ5TzmT6IvYAG9(^3 zC|Q`%tXb#0Z?E8yTN|&fl1}{-90QtM8Z&&|(~r}oeUq9INeVj3%&*gJ&qu1Tz`%YE z&cffS>8fI;2Yuz|qv_;9Qj7H-TxUTd=&v3ZEguU20dC>d$}>#l#ZrSGxpWb!Gea4d;+Dh>$}zOvF4x`FEz(GT+^ zy7f-zLx9~KphtW#mw0}Zx<>gF##oH(v>^lkTHyjuevM8gpPfme(;~0BitA1C)xajU zhr`6hi~=^Y zU@A-CKGxbU)<1T@pnhp>P5%C#HQDa9n%a6>^;ut~8#`czO+6e#lt?8x$pa-SFY$-t z#soXwc4JKN=zctjlP2Kp`cW`7GmK!1$pQA8>%a=Dg^u8)nIS@RC&>1~V6@P)bV5-@ z3qa*0k55oeq269uT_i5e;EUk}wZK2EPwZS*YauxOuJ5nHTdBU*EYFp5EzJBdkP5qQ zC5YLs-q_xlj}OZVRMmLcFk_C+H0Fqcxh#uGPFv!!S@Ic0f)oUs9)=TWQ$$Wgl)iF^ zFVxCyGK=O>>!H{n60Ovm1^L~YD#VA>3_z9)RN+=pTy^P)e!xM$KjMs;bq)aApu=$6IyW1m_poB724J;zs zPHbTn(1;S(c?rHuc$3aW69RW{y=Cm|hcml>(OdFP7At?3-7J2^tq*Qo6 zNIoaeFE$clRuwF2WBJMOcF)p*gt-3q*w*`Zl5kpGct!eh2{<@0qT?YXcT;b7_& z#fXpd)%JCi)j3rHKwf9&@;+6I9>7ySkA>;)N2Z?C@kHzT$B&8bj>L^=rCa}VimSF{ zmMrsmo}7|MyV+Rj#$;wsi$@;0^+nDaEyy1AL1#%R=M=yAt-tzut;d zLXvrqnkcyGisBv2=-I1Nu!=1pfqy4dqz@`>40g=}7JORB0e4%Tf2#zL59p%0M*qrG z#kowMvx*N>)w4JFaBl^v#)f3>{3esvMR%U;#JaA*>p@R)3VcMWe=iDffmqW{V|j+ZU@>j;&&D({LXT*{u|Z=b?UFyI}916Wd}GXOr5M zrfsv5+qO~&@s;|6JOHPfOds3US6wPd7<_j1)72NM5YQxe8h}+O4~=bLi<%oIFu)VGdgvySS z#k=1A?_Ic{)91L(g3$d$4?)<0Xg#;4IA5#?ao#vzKQ2UqVo!x?D_=`d2iyAbcHT_* zLnI}gCj!h7&|bY#IM<7nITmCN$!#es{|VwD`5T45fVLq^Ud1OW|jWiCq!|>Yv zgpw7Odth=>(JrM9wsXYxM$t_1g&#p#(lYAS=6X;p|3r-v$~=Ikpbi>#9~skRy20 zdor5RtS>XO%VKO0wmnAv8J@BYOI57wMVo*em@p2zNlXN>1^E{x<4Zo;SrmtCP&eqwkMqkzh5`0w39;! z@Uk_6)vui}+cc!6pV1A)?!!Pox$1r0YBy7$XTN*B_5gA5!alvOm1KweBf|LEUsYtC zyXzI&B4F@Pw@r8Zbvw4aKZseKr#G-8dOeQab*-#JyF`#?Hei|Bs(rOHYpCxD!9 zRBBD^wISf`L)5$n*(MO4qg)j1cG!iUP8toS33I*n=6Q5BDUc3`2Ezih8Dd+sRV_W( z{yeW+H*Kx%unp+6dt86<@z$t^0!hMgz68?zQ8s3JbXe}{+2U~U^;aVfX95Vce*RRM zimPdJP!V!)N>8qR5#K9zimS=4`s{AM$cqIUbkmIGZ*e^Qt5a-J3@m;BvrbLGVV+X# z4~RJO_`@B(9bQ);`LYQRPHz23r>9-owAJ!BD|JMVP#z}fF~p|iIiiw0@Xr#|;TTWt zZsqqSBKqimW3SYz`tV#j@4V=tx5TRJ*i_#fE3~q?jSTu@pFNllw3o->+PVPAEJ4|La_2>iC0$ojAaqFb^4cj3wV{?dFlp;ubD8Sbzp4noyV*`|U4cr80QF#+0saOAIQ32*r|$YwcTZ@6sZ zK5hRd_BW&%!>;IGptWWmHu6VFQpFZfFGm-ZM5vY^&<`Y#aD^tMp#k%fxQ)~A$DjXQ zBpur;nn~b{@KO&@feaRxpduzS!?Gr?e+lG0M#;ezp*~#=mB6zURFi@E>q4R8dlVBF z|C@}U@Y$GF6Jx;$0ZLtG=1U8Zvh8gn z(tcigF?kx@O@|5-a6V8^nIX+-AW>pVIAoR$w@hrdk#s7og^FM)ZX9UDkqXbQ^-M!- zJ-u)J-hpt_>v+7Ic=NjFDRg*mC;N94!~);tcIuR~^n#aO$K7 z$5cGPzGl@oG>`~p$Sp&eHai@akAijw@ZZ5z(LZt?H$Zn`<{0pYAnB za(x~5DBQ!xHZlW=$ zgX%{k&p5nruD>OEVJDK$zx4!Na(_>*S^==Ym`9z5zK^bP8Zfc#a1zc z;0T94+G`%NB4;atMa~mtk2On(>XYv2Sx&VxLVC?`{i4SnvnS)i=+XimJ0pUFNg|P4 z@KGw7SOvF?Le)@C1)5{H^$NKHxN+n)av1^O1P|XkAzjjByEC>Pi!sN+1)DcD*SN}g zP9JzOl%dG_xDR;hFcXqajpAHA(i?(kBp1UGG-FAzw?)Iy1`QgM}c-esU z%3|nc%IbfP+Z|`ju7(9EGp=~viw~K3Y-o;-QflR=k3yrJgO3GxU|GuI8s0}5N#UoK zEPP9#(J3!J0i8mB0-%aX%gH9|tMfPpPD_gm&Q(H_sm$o5`z`cUY&1NbYfYK{L3R6Q zWaA$_HhQGEeSu%?vOE`K{q;+O6`}-a#&j(#dz`vp&|gQ7jIB|ew93wynt@1_4GW9Q!!K6jP3gw{yG z&%c^_*8P8gB@Sb{fV{1FY6N#(iFQ z&h@0MMZN3VyTn@bi$d(R`98I`c|jTXDE0nvKsJePOkS8;ndeuT9yRM2q>g~s)yBoY z_ehxh*;^N4hzp%<~SXtv?w|7z}W8V99|OyYF0{zRSGMC+O)B)h(gSp``!3xfl_O%B(Za zp6FQe{?*%gJZh{6i zl@|KEd5ABg#hsQ|XwX|BIWoMa)^ERSyLps#IszUVHVZ7HeE~li7>>UHV|LzuKQNjC{q?Vcyu9G4~2oTH4&{8(0R#N*KFrf;zp#8KS z?2ec}%t>zTgcYj!(BWFk6-n^_v1U;v%SdHTSsFwmB#qni1X zQO7iN5N>B(H+57KB{K(n#Wrs-<}eeS$EXoe%V|T!)hSf(8e{rJ|G09(_tHI^0H$q* zINyIpa0G5`Gb2}>Id-4CR)Ox_)U7+ZbP6yq zZ(nf@U;Nv=@F--!RCKmtnUAQv%8UUEOoTmFP8~O1$BGox*oqe1#`n~+< z<%z7ncs+}LI*P*`4IXOB7};;@{P&4V(P!|rxA6O6@^3jepzEDC&9tcs*4xEMVb|nP zX?nBcY@0QgklEvdxeUs`>+Dtk*W=ksn-Cmr3BKR*!!;U)4s0L^rL-jN!l3fef`t9@ zcp@Mnlc9)RZ7g9A%L>LOrEfE5c94d1^p+K0K7m8__i%RWOr^p^H50EDEr!*6q^h)w z*0PK|YuiMaB9HHZc+8wOp6)f+9(=6VM=zTyki}cVLPA?KteBqD#a-u=~^EFk61CT3zqSSfNkqeVjxcGGZ4N*msH*eEGXNQzGm5RVx_zqSampw@|tu ztk%yB6V=W5h0FeqM^jl-vw|Xu%7)tz@W5b)f>9hmbz@^2k=dqfM&%g^Ds4BFR61@po0(O)S2!VAxnMk;Kqo>GD6gaSv%@G2gi z@>pq*gROl?jH1BNbE8^>kU2qK?;(q{OlXb>ZD{9|to)XXft3%y&;ql zP6{uN7^U89FNNp2Me&SX6bZvoj~ANUWNBx>iI|P_faNs?D-|}0z7oQ!2CcFZE%a~Y zY7+wULcO?Ue65~%Ge_z4Z_<2tGzt)e)&!Jtsv`>YCXCSQnKOJ}%c^q7tQM>&$D8p_ zN{TSAkSL0>Gqe95lmCeshOfM%LN%$NFxFYlH>Jx4gh1rO3@ELcA_?!5XN8jr_Y|A-?90X{ z3vCW7HC;U27Plh5HUaHwDn@wqcrjL!#x&o_1<_{3iLHzF#N`v-Q2)Z=(gt8B}eRo1pTTOmCrr^-4mky3#6J z6XN+VOe~__^n@ujXzUSCFg-hQ=L9chL4qlnO^XU}co*+ER+s=gI8$gT320I;u>vH? z_M=_vZ%#pRmIo*xb0sBOD#I@29daKGoifE&BnOq>qJh7{GYAYV4hI;#-~p0I6{~wo z7dSkxMxA!fdEUjrbmKSJ;-NXIXIsOj_!@)^If*l4J6+p@0Z!p2*B(P~KO@1!A2NW|rG3;+M^FIQ?SFd9Ji zr6c8mu>zLB8qCL)2>4qAnMPZUH#HGXqD%>)qzW0AFe+5k?XUj*xsq9n+mvxVuw4m; zP@>ox@b&iU>Ws-V`0(<6`<&#A(&!H^O8gXw4!N;E|0`D_KAk;%blG(V$NRwdtFKq@ zm^03h!#qQI7-<)2?cFnEB0UHviQ~_DUx5lAs|+BZbMr|eix(6o#x%I7S0)avkOj_+ zfxU1@sg|n74cfaZJ&H~$cw9h@EQm2(!m>QnkP$-q$>C|@B1WLo96owKBqo=3SHhIC z_?K88<}OG;cRN}E*16}#)Bsh?Ew1V~MlpsC4f6yWv~2&7QfXR@IV-8HgC)bL`MCs# zS{|UaMH0v0$Lej}_UHTKVHN%INNrHlk2&=Mg|Kz9wsh{zgd&+(WqMPk%sgH>qA8le=fV-9E= z$M|JrC)mR>{QXysXrnEw@^`zd4prWOEf_(RHX79MTD(oADmRv$Z@{)VC7$dXGe zV|3_y**R~%%75hF)EtI2T)?``-AN}a0RGc8Xs<_Him$y=|NFKIV4c2g%}CvA-5(%*M?z6rKS+e49uoj8gnlAK{qHEyK}RNRu7XU({6GoHl^=A z@PH1&0vjI=@wh-kdY&<`k189id>%xdz^P^|>P#O)N`01%_zP(V63Qn4g0vYhfI8OK zxib=>@Jx3eL1pX@wn}^!VQHc`zr9q*%duEMMk-V{JG6=te{K&%(baL~xKOXx|KU(7yn)xt z_EM^sTS>7z1?;^Vu62%69!e4*uYi>Z!_KWJ;4A2}jXDbJ7K_EBxH@+y->hAu&$u=R zUX4yeR!<-h^%Lh)gpU|^%tCwssSk`sAy~^pmgQDNPJSz6WL;P=7RqnFpb9!X1Epyq zhbZI)4fZ;B1Gz9B_gYd_lIaJ-A;j^|;Bv9ZLrzP#E{Klsf)St?Iw>k3j?8vn9f3*J zAsTXdI1ik+45&HZapoqMqhp?;ERn*ldel1ClE~O$m`y%kOD^E+?s4w}{&nBH?38~} zMNZfCW1RUalc_UFT}ER$gc$2GHPYb5)8b9?EMa-YVGm;TmRtP2%%Dt9=tkaFT+I<| zIlEl=Im<>F8S)do4(aR@We~(Bjv>YJwBClR_Rv)qBZx5?AN9e? zIrkrfQoomsI?l@T1??V(baX=K1Y8~udc3sC7yQ3~*hx9Ga2~1vhpN|bf^oR6yQCPq z^^aTfABZ}_#831^9x_r?0+sV1SM)NiJ9uZ7#J^VfqjXx%_3QK3&raz(h z^sf@JZEL;T*hO>8p{o_JgqueGw#1VI~o+ zOLv&7c_qt7=gdGFY+^{bXG!!{U8%Syl*ku}=cAaZpRx&nx7~dFI-)qi0}*Zeyfu%g zdww2Ah~4PUS;}ZGTEo<-Dt!Hg-I4J>dce7kic=8}%75qq+6FE34rpK$ z6b`?dJNyM6DwxY`F}?dZmVzgPfE2n9f3|S|NgVq1(P~WV;l4Jq*D-%^r~UFT%rhW= zN`~RWq3dJ7SJelf{gGpGBxi8`aOX|}&l*_L3|#A(t~Pf0Ft;3N$7IQB?i5EG-l09R z;QBq=?`m&l`PRel1|5efKz|4nt6|4PA!L@+j1mL=9D;Zz)iTlf_VPG5^l)V0#LF-P zB2;Z5<7@-=A>v7UidqOlLC`-fc;6?3eS4(z3=2`ZR5Z;gFkqTs5)onLMGra&N{o|8 z^ht*9FsM-!pjzjG+&u$h0SRp46b1NMdNKzYn6OyIwXv2B+Cd~XRNGk;P#X*}wrtJ8 zZ?XOJ^^0Q2=Nz*=7qQ(HSCbBz$MHh|sUM%hv9=&lCrvi2lT$&9YpJD8<2~(>29gx7 z<^gOrpuhzqt{lWab6{yscJ0UQ+Cu+$^lFgl!DLauvzd z(W@}kS4EJGoa#1)XQR`8$8)Fj1u7qx-QiB=opBd^FZH*~k|j;dKUN6f$PGB4ab%># zwk~MXJ4(S=kRlQqL)gInY5T4#3Q8cSKbL}00F-?XLsr9&rwOyeaw2o6Q4m%|)5u9H z5q3s~J>3QL*m9Bx1z>lPiZqn=LNykjEGg#>#39Wf#@A1a@!IiSWmu7Xb{!-&o@7_i zdQ84F`#kO9GuerK?;?fs=yCC>I)-ePI-nCk0CjZ7@s$J);>9PP6%g>2MPKR&&8r|& z0DyX6t__5- zQx0C0GQp<(;vGbv^wdZ%eWx_L(C6&EQw{!6Vs?RP+s)@BiilsbJ#VEVJk&IYFL+0& z$G9{>?(OI#GW@cz=q(3>lvN8yxvX7#fO>b(xct7y;RiwAN8w@a##!`XZh|aV0rk*B zluhi>E9HkpvGVx)qrI+bqTFy)>Q%TWS5j1Q2PC~tk2mO@2)Hwh@KJ3N!qG2>}!_rn&pxNM}*mi`@~e*0j7++ z9bXOSDAR^M5Q^q?-0*1Wjfd&$8Ogo$lZIEOyodI@ddF7wUU4a_DIDAIDMK{2`UZX9^oks zRt;l?I$#(D%7}{q+EiQ|&1CmdC(^28mXpj2iMINtC3rEpD#!-QqL}}5Hz8#K6md+_?!vYLDXaws;Jq4i ztO$~5eZdJy54GudlwHGgWPY-$c{^vp=KT8+*>CYJ@t|bbrvUf6R_P`9OX8G8nGp6P zDYL}1L(Pm0`U!Z5I85NYWlTXg<#@jczB$;)i0S7p*!6R8SW`_I(^Z9jVbstuMe`#N%a(Rnv~N_Zz!MS%Zqe0_=mMrJ!t`E` zzmQVKh_l0K+=bU4iLv$C0jKg1O3AZ`Xr@t1wF=F@_8!kbzWH*|7R$fgF8kTBhWnwD z@Iev{b6IK}`c7TZfRm2S6Z$G%U`-TDrY+f+Uvk43@Fb1YYkDugPVPa?ZBVmSbkSXD zylC6PY2>@`xQJ-2SM-nP*t3Z{166EX&&-Zuym3y$E=~dG5X}!dkNuKA#SZ+hS-85v zriQ;cOm+d3-DDvYU0<>@kg=#xoj$>)I#s!qW4!#?hWfsapBkNbZ{13CjOJIZC|h1O z38UU0078dDVd0v)L!H0IvlsSPwPtK=ZtV7!EmxzB;}1iw^_QuEGRn&!MQinU7Fal+ zY(E&4FA({qP2I3wu<@yNCFDHC(|KxQu(Cn4@etRT5jJ6<|D9R=j@9K(K9aN+M*DH^ zo)OYp--)c5Xf)%T=VU`SWXp!;DdYQ1SYvB6O!Jkt=Zxsuku zG{p>Y2juc3!PLg%qMY<|v*r>gX-&pblEUuj^(jbs%px&Z(#`L)yrR}o%3Q9-TCQZ( z(!&Nw7I(tL#6@Bg!Y>xUNP=;7GfvQE$qde<#X*4$hOnTBqmEvh9SLr5Na@{qJ!%p|km|nL+_<(;D^JS}%d6n#s1lH%Ro>Kbow}ly%qq>XUI-&?du;s@(T@ z+5VX_ySSQOR||#-sIn*$Gt`HU_U6bgo99sM=NU0HGFXa=Iv)#2>MHi|Cp-^6&#rYt zHUB1CFGir8@TH4-UbqV4>;`8q>XNKU85~tIPf?5*l9nP=smmgsEoFAQ+;lC6%O-Tc zACN!npkHpcJ+P%i{$eiKZThH(&R#*|S6ngg@_ww|2Iis6Lm)Rg5iTq(*!F3NnfyC%- z%YDepPw)Wxf4vzTeB)!j&fu^ zo<2ok{xOgNbm!A`PH_zfKC80}xwiox4e|2W8x#2((_^VH-}TTyg&7ZAb#H~W6+JiX zVOp$vA-idDZA9~NOGnA?cM1=JJ+J_FY1Jh{E2G(KBT>!%LS$FgJ-l-js|Ev{D<#FK z@3kfesiu7P%mIqFMZk7SFeWN>4CmPnRN$^^!e$uYS7AS0MQ>tO#$;mM_G0nYkHHb$ zUp8-ARj6@5kY@gOodFh=)x~HQ3d*k$$KnkpVMy+**^A0TAH9)Wm^3Nw^PrFd#a(4P ze6*kfQkdu8Ix7oHefuaiu&OEz)Fl%Y4=n-W?V$5n{~9YZXtY6@ROV0DY$~gNrDB4& zT*(ChrXSLIY#fw;!6VB({o{YEiM$4?zCY5;GMfL6z~Ptj!ZEusv5d z&_R#Kt5+4ME=V0rUN}@eD2NmHgc{GK3pRcifF8CTL1!1N5bISh1tz!hcCmtJ9yTCm zwXdwd9LB7W8*D1-h2 zfd1!@wW+D-l*5A3eMiqA9g9Y5&ibYTqa&~3cFx+S(d>$4_X}=lP->EH5^#kh#)WUv zD5Zkj9oS$cz&?EyTd*w07R1G?W*Z3!D7C1mqqdAAs(v5Bc$iGjU}AhEI(-Pl$xK+H z!D5cMJ94i=Pv7_pHGkDIXL0lE!mC7Vd?-DQBYKmMJI;R`ttiFj(5b31Eg+5QMj%UC z6eZdC6kTC@h$r`JI3vW21@;S8?>Kc(*+a0ONwAEw3~b zT&}wrXOTGG5Lj=Uw-G#7!XCRzAek0lvAasRlO~fNd`(1O zRPMn7vn(P9cKk^Ti_17=e`7L6Zfr)Lf!KG(i@VjwZR;hI+M!h=ae%f?mUB{YIkigmI30(| zfZ%!%vtrD~79D`VOxsbo8kqLVIA0le*x-AV@qL(crPp=_(3P_a7`pP3H(i#-JKhLRIA2tsZ80jcye9aD9DAuYH;>m~Plx z(x4vE*p~UtK(w(d)yQiB%+w9^?X=HdJ#-9cF2g_K!9R5Ew6PU<_l=jHz8YRzWupA$ z=P3{wagFU04Mi-$3aq8hceTj>_)bOgQAsMdX^GBFuZ$2C*C5jd__WPwMSE~k27G?t z*+M^@X4y=VYZhD;M+zxmR^7oFJjevZ1`5`|8iiDEZPkGS##ICvGJj;9K+D}+ z#?27;6Iw%g%;>vR5J9B%O~v4<_Xb~HsY(WHml$4X>HM$-OPlA0|B4wbu_!JrmSQLc z)}yBaU$<5k_M_&a`Vcj;TvTM=EQiUwygtXzjyiF)BP}>n5LI$uIZI3%q1@yK=&6c^ zT9N2`_4%*X6-#l-&v5KF_*`Au_J zO*VnVcJZ>Wt)PDse&dt#>>7d1x{^S^E|0TU z6>d7&JMuj7j2Y)>8=J)RbfNYbiQ_KF* zYu&=gLPQ4ya#_cwdUg)C>G8`m@9L;$D2jOZTSaQ4<%t=p;Wn0QC(;fu{xOUdFrSY7 z#*WOesPp24uki=`oOw$t&*-goweq=*D1v>ZCMfntg ze`f)B;JvjKBZLA%-qbktaT2ynp@p-tDQ1NXZ08@+dpl5JHmO@=!B5nd9zm{>R_uL# z#sU18O`1%l7c&-?Fp(z_r;w9N>^kW zT1s(YzozC5tys=Ax3o4mfROa4TY|rb0+OYkF1g-HBGtO><{tWw+24LZxSNkZm=C)ip7oDPX1y&Ls;Y|s3&w_1mIVOoVfji(^hkm=@KowD8Mm(CH z?rd|{vEyejsz^gU;hdHX)Iefcn;ii;qL3fAjb4~Q@17sq9P|e0#3smBN>@~ST3`Kb z!pHag%KQWSW&*zo=kW7R*C_~uqJ>&3&^dpS-0l60pZkE@z;nRz4H`90%WQ9TG|gO1 zI>4Vfe^22nZC5N29f*VN$4a{FyG9duhBc*CYl&0{U*!`Tt>oTUMfRcg+1 z@;0jn9W}XIS+=WUxX^P2*WBGhvz zpKuZ!(9ZsE%d8`>yupIfb4On-BT3RA?>WGwLj;_0Jue9-s-$y8zcO_&EsIn8LHtGN zI}csMhGYg-;;g+A^1=p)cOuHmA>!i1JgNAg@RrR!WJnO@7Fqg=Y|ptWd7dn_&ou(`G=Q&{v*>7Gi>flm)sfj@}t&@{xr0XXEzdqg&S)FX^weq!1m_;2L z)uS95cBS~u&}m+dowFpjJKenSu}E`8U?1*d$u(lt=Dc~;O{?O|q(yg3HK*uZwr(<< zpWv=xh){HRvEG2OGR@Oux3})M`SA$DN7Ky}9FI77O1nA~-v@o3w=b)HY31~5>FJ4?N%MJ1jMGIq2U%Jji^ z9OLS#OI-ZxJuAHEaCsw!{OZQQ5#{bzx`{{?9?W`?%FQIFC*2&!_KqNE>{=`Ayd4r} zn^}yYC+I&^UF*Qd3%eV^S3KJzYy9kl%0kMoezvEK9oIlJ;lPlm?SJPs&NP6UD(70!to zQ3Q9c;ZuJQBx~E}}3o~y~nNCZG8~H1s-N)6|7r!b_;W_E;G2}9NzLeQ`VbSak z{xY$1IFKRZMT7oMr|hb|hvGHq6x?-C#*t)_zMj zr?F8{PI(kkOA)$$6a0(Oe<7+Cb`@mXOeUgzIrZ&xNih5I=FL+4wK1Cj5p+$4El3Ne;h?MWL zDI)-Eb^}jr_;FU#{)lRItIV{L%Qvv{Ki<9Ng437f1<0ph8>sC$hgS(8W64+Fdk z6^|jf+%uotv-Dt*(CU|7WuWh0(xpyTexuF*Cb$PW2LCZ<`(M?y&j;2*o>R0Y^C;ni z>P3Tshaup!9tCpMF_K|a+>m>AJp8!=s5WlmB-=;7lsELOgcbXa`g$>W$Cb=IwHm`w zCF%I4V#uy}0|;vE4fCmK7YwwLDLzIB-d5`g&WL7Yebn(Yicg1*fi;~v6uq=IKP{V? zJA#cU2X+VRe{$glClY+MMAH+oZ-Q-OAngg`Jv-*j`}XvWQ--M{0Bl!Y6$2o zMo;xmkjBB4YLFVjJefh@lx>2)cQAKHY|_||xK?Bei|mQFzF~20-u*J;UR!W_gHE_c zO|%J_GQ?Gr{rxh2%6CBlTb|eTm|A>1qW-roJF< zw?>gF&@NDv*3dTBD7qH_Z7rp&XW}Emk77_qOO)$NlP5h2OS3$%Lv`7t@XG3)h`%Ow z?&)3vBDn;G~yK7n#Fdcu-7{2denT}A6YC?Sp{?~Lbx@{Vbf4Bq~a+L7yX9CSl#|G z=~T5r1f}@I2|T+3pfo338ND|3iJS%_H<7UqfEeHp|BtG33ePOsnssd3w#|-h+qRu_ z^2J8S?AW$#+qUg=bh7vUZ_e$yp3j=2=BRq>IS#f;o5Pjb{Mn{8>PySP(~k!$8$w2Z zV{}oi=KFrVi*`X>z$Bq(wu~S^L#k()oSpk!myFs?RJMB{WHk4&iWcXx%7G&Y0W*}V zn62$iv)H!b-T*(Js`%0&l=DBIkGNObODQ&R2LW=g6&?IW-fP8$zcUIcC4$o0@R@d% zs@6=EL~Ndo>+u-`W%w4`OF#9_&1Z2Uc!>4a>vBt)IIDtRQ+iKO_W^W};9UtD-^lwb z-UQo(W|s&RsdB)X%G>JqS?N*~c*)0dNcDR|==TUvFG1jx&U6OyAx7O5jnw3>$>&G? z7yu(y=AvNq@^-9xofJ%zt0R@twR!YkEec)wjYpL;FKn>`3j}pF0>2y<>*4_*wq*9_ z*OXUR%T)0`8vKL@-PfNoQgS%%8j2eNB4pE|S6Z)bYvlY_UdF#lyQLPF7cLC`FGTV` z(!xv)5gMB%=fumZ8wTv?fIkAjas?Q-whKF2OiPq3fwYY4y7_xsgxVcdwoq8?y1i4g zR+PZ+JZyubSxZwvaF6eyDRM3WB-i=nc zqNUgTi21V?e6abV`y|bm+}w4x?Ze)G6FiH^i*3cysT%JG+aCMd5b|>XH*^XAF#crJ zTstk(JFffUh}PICX_ZrY%P}}Py{v*7&1l6hG#k7nZOj*CxR%53t-Vy;47$E)td1^f0l?7x zc8v54WU0@?2|!Nkw~^xpsGPc4C2(RkrKl{r9d8ivd`{$xdMVS!e{(Hanqc+xzFQ~J zXNziSl}AO_UVGxg;}2@-1evGfv$njNr{l(Cl=`UMEz{eX;Mx__#Dt2>;N1P4@owRx zcrLuQX15!K*dlgeb6Z9`5`6a6h;b)9*Iscj&2!gi&{-pSQJa_rumd~$vuHk~^@>Tp zqR%A@b8YPZJ

    TVwR#J4aY=Djt^j&79;$WM@j)hkF1ApC-ZWZ!%pFCOwlbv%9nPo z>%i*;4OVO84T7Yi*2K1eYQWMP;^I_rgSihW=Kd5VTUY)|0+);=V;(G188Ox{@a4ED z(Nh$Rt;secCA+5tXcmE42iKFPM$V7}ku|I~Fob282og%rvXU(b53M8>v?yq0?kCp2 zgQo@~!R{(xDMb;52n(3-kNccZ5Ne2B??E-)a0+!%nrIj+*YL=C+5v@Ah8>L8r(IVk zx2piPQ@AUBc(djDD`)W_WjWEDEc`^n>|L`k+7M)$!gVbT$Pa`vF%4qxBCnv&%WUMp z4>*P0!7ft2sAj3A^=NJi+Kv1|yxJYTxw<5h(oLeSgoVWlz~t_0KATHG$OxQsfq=;u zdFFdYdxxE=EiC_b0^~Y$_d9(eU9yd>F2mEHQmOsbO6OUK(4gF1`Xq*?w;GLE$m9ti zpN`Y@+fBm*l$#vuHGc7LEOJsZ5VkRzcRnwLT)-!nJv=d7HntaVHmf~jFmh!PF9*+@ zqwjZ1Bw2aUWJ%CJLO5>8H62AJj&)KTbf?BVPUD7&KX+cg@WTP=fpNxQOqzio@#&$s>HS!SW5 z6IMb(>y+<4k+Hi1?r4sF{p=v*+|flJz8cU}zO}0o1FrrU_M$N!@vi-5`zuQ^Fl1|l zKN79lGCcCRevf5*Q9~~tl6s$f86agVcmDJK^L%CNWD<2q^Rmqs*q@FTNT?LG?8F4$0uC71 zU`&o3gY(90qRvB(4l6sy;9I{7C6j@2OwK^WnGmYb8+4_rV+RbU%3nI;yTXbPVgT)%*pvN$}1# z5HCEiUUIl+vcc>;&KlaZShYTr$g4?34$1G^B9wDet<5jDwYP=|=)Np7(40YL`bGIZ zfolRk%fOme(vMm2uR7QdGpTaZlfCzX>$CT6=4ZV-PMwVh2ZXf<&`ZPf7eEjJo}q0t zVC8Vf_G%X>APST!PZFbQEBC~^m{-H6xy@80g>}uw*Xr4}=DM6k$7d}S%%>PGQE}S3 z1HCbt`ewUdj^s>2C#A^A?! z5fKc?z~HiGM@#>5h1|sw1@Jl>bbSqC|4AwKs$vr*+A&!gVdvkv3Vj`amXw)3>ALM1 z_Oh4l;cd~=<+5GgX{sIYBy*@_^FGXvGMS*=!GxQeWyd-`8+@;pxLZ*v--PJZO{Uha z%)k~DA6k6mYo4;mKIIr?1<_4A#VIS@vI+;no6%lBLsw>!5!2V43ZP&+e@!%BBcEE8 z4j)97VYClLi!Rw-KbNPY6i+!mpP&N?Y`u|I;?#OOLCYgS$koJswE=bVt@3&i9)Uhx z^mMX^b6p2j3}aF1*^_1cN*@WN9j49IZzRA7RKX~Soc$HUfS#nC8Ni{5T_e! zG$taqcL%)v*V(_7IAI^ege3Oz26HI3C`}HH)I@?Pqve{4c#6RN45S`&wO-8lbYO$y z1{on)7F^#q@6X8kvY8jC*&&9r*D$T41@Wi}5a9w?R_H|CjxQ=ZYne7zuXq1c z^bW??GLlynYd`_yU|X&iiVC4_wv)52lIQXB8 zGjfg4z5L>2M?a#7CsM3b`HOsJIUi^1;5)8R6K=fwar!4wEaOjz@T@^UwTMF z0s6~VrPQzCPlVQM0lgRa;RWQilL2m^Z8Z_ZOH7&Pr1PdhuuS;g72U%E{8t{B+^g78 z%eo(hI(*;V87f1)gzNr2Owhl5Acy!fWRt5=JnymL|9{JR=F9Nu2^bI%D`BeW)Q>If zv=)`J%E)K$dJbZ=8Jy!x!~l#zY)zI-`Dl;p^QnmHqI;7>Hv2F=?g5_r@DHmNfQ!7& zO*Gm>n&Wx%$qp7-QbhI{_x)}!>Y?D%h*zfsh(Cz09xdZG#utf1&ue>PA}VTt&6#?` z2X?3D$r5Lcwc<4q?3+G5NJJDYmdO}^%WL)sEwpzjpS1X=2vTD_s4Afbl}r!{Y?NAq zm`+P&%yUHj(Licsu+pIc29C%Hp$yrB~t@s1LH$l9{ zMlY5{LZQ#;4h3!uZ9q-~GOlRpfcdq*ewRAE2}s+iu69t@nCMy7=a9`D6=(<<*MLXD zBW9RJFbqK9V+VkgsR6W~&Yl(J($I-xsyMVa$W02Ofw3Mso28Wc+sd&>MQLzv@5=^#FXAyq(9>4@_;)kSb!9p<2 zp^>S^$0Dh4qR#^}EKD;U;bkQ9C!?x ze0}P#-KGY2bK!il*%<;PewD@q%9?>lk5VHw#^YSDsB}_l^LH$kP^SbNGK_Cfyc zUveWd7Rm<6Va|)WMKqkejD)%k!r3Bb_=Pjy6T~Uu@7<+%-x2{(iUZXOIrp2o|K)c$@L9jd@6$3{X#L=U3HmU&gAM^OfiaI%NpY${_`b#@nEz%C5aRE0 zA;JExo+yEEQ8AkhQ%EZ*_p|=RK2g*g&Q|CD-PzIpeYC=8UtGBW>x#0C$`zPZ$T5`Sg?4~yJ`FNf zFt{TEc~6i5^7wW)Gt!YG(1x>^H&J*3ak5^~Vz=^Z2y6+^UNPQzzq{!;;a#cU!*X%w zu2an3+vc?Xa9Sj<3ppj4O`jT3^$0wXyWv<)J&!5XQ=HP7G>vBu&pMIjFX5!E^HjD(Fg(m>zN_b5 zJi}~&1H)>soYwW#=k#L27t^r;W~#rK!vTR=N?PF2Nwyl(H+ne<&46>J&(b-KZaZ1Q z_u<+j0+;crzd85H_4=#)T?x_w-Pt4YA11h0f}ol*NaqC(m!UksNVLymmi=;p2qdz; zA)OkOM{+q)?gv1Eh196c9HST#O(FC|qEEe_>w+hG)J{ciJS+wR2OZ+dK?-t;pRh}v zgN~HnV?6cy2s-jL5GaICT=Pf?SyT4A+GKWia<6ZUQG!qqx0NYj z@n3q>5y*wA@$DOcy|yKIn$!^?tvDaLk^)+YSs$5LN!DTXlMZ~5CAJSB=Mf;IuTtzF zW;X4C{|FgSLyP}-t~8_~gQ*IpwO7_HMlQLqMyJjeBS6wZ)DIG9>l+{))Q`pJoFdUY zRf*>Qh4pcH#FuR~zDn z11pQ!yMAI)CE<)DpNo6Awy`hyV|*;r`P=HuKw1L8^wnlWB&U{Ldb|k)DaudyP20S$ z?f5(t__LX_`2Yq*&cQ#juTD#OO zR`0>qNoQD7nb&^i6C`0;AN;CX4~=)?K*cZK8WR-bG=dvjZ)cLQ7}Ji5j&l937wayv znWS{awhIA(*H03A`E%ml%gLo@^`@rdNzTKmgNu&A;Oam5>htGz*6+PC!q|(<0qX_= zo=)H6%hZ=Jhp=hBO*g&>?t5Pcd@j{z zT+P4>y)5%$RCgpX>F``B9TaNr_2Ev7)OLAH`?l+kC zA>eT`h5%b+W|aH>Z?P?z{y73ebCE|_DAzCd&ZQPij@jI52sc7<@fg? zqX@kC@t#D$HiLUq*k|Fk>*rJ9bdq{CLeKV6Iqzy*fU&9cIDb0h&}+H#lF?f&{hB=9 zj7rHv4yQjv_v=HJio>+j%LPuA>Nt?^R51#t)fX*XahD(hT=^zc}E3COWv;m?|cKuGT~q?0YE_h`BF3t{p<22m;QL zfoD&wl9BK2@CnrjdS&c!m8G{c>e5Api6=i+Duo$6t;CZSgaYv%N{_vu4nb7`K;b!G z%=5&d2+MsiD@(#S?s*X|d`a>OUnnnWt{MnPaw@w-acp8OM2)_x7R@4ad{E?)3pM3j z7K#$UG!WJbT}yXVi4H==)Q*H6ByNSOzND1d>s{Q58CV0a-jYPkCNOA3_m8x4J(3Fs%()2bt+|HxUl;R; zZnKdg=v=hHibG4Q=R6_{96BK(W~|*ml~pVNscIxhsnGxxdrcoA2qt1Cig<`+_oP9D zIk-fOF0(zs9H7sDSx4p?ZH)7`4g3&at7yg~`!=ns&W2KbQtts|I|tB0r)uM8exPQp z7$tG*eD4DdVPN8V!voV@A3NdExENwYmN61;r+Ha9oCn#r-biDuF;*zShfB*Bd1_jK zt@p98)6u+pp&?uV9ltPB9~-ZecNl?Kq*krBByxIaiVR79;Km>b2q!IyAjTeYdI2Q@ z2qqgKSy);;BkF?Wkd>GAj|vl~%QN;c%JCEGrsfB8C9gQ9x}fVbH%scT)oQH%-V_s& zQ*4tEL&Oho1!A?|Q?Jj;Yq#RvC;}D$IT(vdDbrbjsy2SpBA11Ck~wl#^ARrDWb4SOyfr>=P|rI$)8J_k^=n|s87I$Ux@ZRGa2-?D8J zlc!El(-VNx=UFx(=md&6mo!D$nv&y~EMkUO@Vg1?=?AP!^sIR!M0vZ2SCkHj^a^t$ zff#G6B}yq;t3Z^yTC z_&TfIwcZh2hgQbCG50VyL)%{my$acEX~M0~CK?eG3n9Drnz zhNmXf@+ksu17BCwe*%g@%+SpS$FMTD9`Y+jV=v%*wNXU*Jsx4Jp%Q^Y1UrNSEWqSxJ7q+VLImN0 zwp*2Ro}Gc@#Jj^J;Hh|GsckFP7X^$e1`|Bg0+#77&jT(63jjM#jr^}T4fOlYQqPnB zhN}Oaz)Ulc8H15QGOl#mwbXWXCU&WHoP8dG?5KS&9Jydh?y$ue~ z&u~Ql6=ii9gVo3V_|&^-vMPzpx=5}5jIXA$~mky~M;VcuAy2gQIoTCT&uqA(!`symPF_5=k$vJS$}0QY?t~ zCwXf-xED|7|Fj6IN>r91fckRL{44QHHCo4r7>7QiF|04np@h0(&yN|wUFQxkm>-vd_f_;TbNd-XpU^@#zR&NCvjLy@$m$H`j zJV{6ukzJuQ*FHrxc(z{dVSYdRs74v2l#PMUffGO^dIP7TRWWCu+4N{OI_r`ipvg69t|D-6#7!_{NCTP?R&4W5NuS&CCM3wy06H#DE6Xt>anl-x|Qak#8= z>V{-+EDSgme_?gIraRRkfU)lMvgB$ZD#m?K6Rwt=kbELK)sWN9IC`xmWn%cQs?hvG zL$?7sjW+n!9^*L*F64Qih>;XvarS9{2>y&_b6l-eWNFHZ-@cDF^q{y6MF0;FD~H2_ z)z`^OQT@wIY`}Sw)0DyXve385Il>#r+kL#E+XuLp7rsR_=;L1*lo{a4kgz%}XIH>@ z@w;5uS;y%~#_pbC55X-2Gdbyy-YzB7Lm0JE9qA#1zqJ7Ma9JOFNc3dZ!M zrU*dmsJ?lV!lBI*=V&!4co9EeZ;GS3*As|7mMT^M&>~%NsrdfylkSwdo`^+EdRACMpPSq4RPWpW*KT?va(30feVh zI?YK7)WEe95sI4YyL9l~X-Y02T?7 z2t=oVc!pM0=#~ECmQ2UVMoE{~^Lk#=2KVv`d~*W?Q0bPg=!e)(d^+i0=RVmjP{f)m z(_||BU3!}2&bcJR)#yS4-wI)gSB#}uY$n_E&=4!uYSHhOUJu}bF zGK5zq&euDIYD}XoEeXG;NQ&bi6gZYw&F)W@S;5jC4}j%uJaH;~!NbOwHU-LT2xOG- zQfpI)-O~>QlC+f@v?C>y$c?uLE+8a0-;6RXw2bwt;AKcz-y5ww*ssP80Sd5Mb?0s5 z;6|%~Gn`jC2)#>_!oKUmq3rcYQd+=9^FcbUXk)b`PwSDUit!1tBv&*E%|scpyUHR- z{-iNS0q+t~bcP!-=)#jYHV6R@mAs=`Cg6&2G3-IYc65a2OA&8GU)IdzRCK%j3f9eDeDECi89+$ncj^Mhf zUH}7qv#TXD960?P25#h{Buz?P>P|PWwEOp(FI`uv(yZ8D6nym`D+o@2 ztVofRqXTrlU(*!wDlLF50(Lw&z&HG;?MTU-ih54kIPU=LeqDBc4a{9#DJ}IY460}RVkLjZy zP=$-}3AUt``YI92fBe4j7MHsJ@%v&{Q)I}A>rckO&+cFVP&;?qm-xKiH#SmdVtpw) z6m3bSSY@JPt0S}c0Bb%^`z3^*aX_O?RG1;Wyy0@SKo(r;cUhu=IN@^t^ZN?YHBsXC z9~?<|eqyU0wG*8QRsR{)0*b!cU*=)k*ivBXu5FIv)3nz!tdlM1ANR+$`*IhyZ(O~rVlnyGHA^o-dTLNJ zlDS*T+Osjgn&hGKaS4?D-MfYzTZTiMek{LXfa7s~U#=zviNdRwPxm+DpMJ=-*+M(R zHJiW?^uTRa&F5=~6hBlt#lU>itF^b#-p3mVK-wpT;jpZ@qP7EiIsh35WXV@X_xT1Q zRS1>gM7O*qfdO*rtUk|Ov4$aS`BydU-X6uj+B40+E2q}O&UT|rY#WX70?k&MahSru z&6=LA?m8q&E@|&$nibJ{ zT@_$N&8nFxVy#w*(&zxVVr)p@gG38B02&jEi6~P!xzu8`rr4@!V67e#Gi966AO+nP zIcjxGNXRp!2+0(YWiZOXf-F;l@K0QJc&Rbz+@+CX!mSvN)e)iopJ^)w@Ply62n8gJ zYcvO~mgGaMb*midOps~!cHY@D#9w=l*^A4i`cAVY;W@eT*YfL;%r=DU&lodjKtW^^ zzOJM6BzwgsdTKknW6>t8&z9ZdmO-%L<2dJYsmE2Bk=p53f305rB|Dk41+Lu-CQ$(e zM-lsCdtccXN7?*0j+`HS)v3qER#%XFN0i#75WM!3vYVoxRZCbthb?=bc*6d8Pa8bU zswJUn<;{xA@dbPIKe>Uq;Wrp3K+;D$CoV1jqVkfr|Ac5edT^?yn#767fZ(icQgCn& zdSWfZlf&qaM#HN#lLn%7>tP0(l|lcm!w43=s&)~)S7WF^0!t6R$7E?Sb%P)QAuqbH zm}9kxVS)=v1N2nLmY95$%@%&qGX*j$J7hIK&XJMa#!G}9@g}9;GgAP{PnbljZIIW%3$$jDS#g5WA5wg zTtSx8N+eI5<{0)cmf&KR0Mhazp6p>CRNo@eimW+!Cva{ivem&bzbwQ;^{|ZIOe

    X(SBwQqY;s%P)2XbdG-#Ib&_m*p_vh{I0meEsBs?X0vL7`k=)@0nAijPr|BT1v?UH!?S%xK`N=lOIZtDua(heqT-l#Tq|Evl^&u)_w*)5i>DAcs{;nZdU3{i;aJWn~0| z!2^|Jl3ySCgB2d;!PRnp8Nj7}34-DRD9Drk6j$bYlK9>e%+m&faaOVw(J)QeF;0w% zLibMT{p<1af?Y5Vo(%dJXBth#N(ypOBd@Pd;&VpjGF1QD4?}8hEoFlL$-DKq5;9+o zh)zQ|9QEK>73#!v`T}_+-($cVnlHzowA*V}<4uYsy=w4%4+yBXClkx8g|dYQ=q@;} z`){%sX(;^M#xc%SKV_>a!hg{oYH}Jl#X9YWyFyIuGJ<=o>bKDv+Pz+fwc0g!kwQzX zkIVN1HH_aCwU*$R*K}7Z+u?81?}zH+Ydw_?9rPz^+He2Q0Id1>pB&|97XUoZ@Css3 z{?NfA!oPAg6kbFTj=C9Ogax9VPtSo;6t8-Dc>vfUHS2JurHQv8(gGO-A-li3-s&Lr z@(PnU(Fd@kKT3!I4n2M|zP?|I9;t#^9H$0DtDSG6>i3H2TdKV8)i|u`@exFn7XJ;IjO-ll2WXVH4hADYSj21^qB>J|RUPE1DG1r&2u zj!LOp(j6kxmnlLo!BqsM`KUiCNF0ng&SpMO2_26r31t8*j260#h0X4bIZ1XyCAIZoE^ ze`IpF4MLz%_?~H(ahu^LsW=KoC#m{h`1W(hxjZgCpN%L&DllnEbGH6nWrw7}@PRgu zM<9PJoA`y$J_+&2y3`8Bn|uXdCLTF44787MQEn#F1Jqw<4XBK3Vq>0Jb{ z`9S%sst&;P>Q#E-HyoiSX%*xWZPtCT0gXm7Gj!FHi<2SNRy)?T4!fxWnsNsK7oAX; z=gfSSOw0Ooof#g!K$a*pVRqW=xtxM){Nj;i^u)a7ehQ?`=2K>DHSSwD2fx92?&X+c z$jRA&t5!Y`(^XJIO3@EM9(~47~h2+Z!^Az7JYHFvl$tH61s276|X)M+`c0h z&iiLx;%|2BBl-UObjyPq+hfuaA z-~MK01vz2djyv;EMsDSDw-R>-+TtKaoRHPXiuX(XQ@3x5xL@zD=G=X*p#j$}{SjJZ zjItwb3WlmyA?9W-H#+I6|c4AG}PlDs5x$76xnmAaY?Ws{y zUnD^p0EcPIi)_Yr3r!~!!ci%Uj@Tid znVdQX*1QJK?)wvCchKUdc25EFq7g?W9PhuKpqDEDF(HCfDxWc7!ZmS`hoFFGg(xh@ zuaC4lN0=#5bLB}C5@|%UxSm_oeoG1f#O7@>3OJbsfFKc9aEcWx&J7QTm{T{UK(SCSz2jGYy43I!Z8rl= zG1*e{vm8?)r9p)O=-5jFVr4lO5}*5+l%wobk=dn5_CkNNX`~pql~lu)rdwuLUk3Jw zo__&!0c~DJz%^^$n(3VWO1$RV;zayc7P>@LFyFY`oTP%poH@QPP83lLBQs z-urwaytt=w=N^BQD#`CP&MT)-qHkS3)nPyV;zhKRSIjEw{A? zAz@7H&A>%lzCU99`}1(>CHoEkhEaBJ})l&U@D`4;p;SL!3Us)@z!$H)_ z_Jd3U%-s&)u9&{De59-e;#w>c#VWDxB;D&cy{^E94Zt)M`}_oQv` zt*_0??{mqB7<18galC8gEuyj1QGh~MW-yo|iEj3$W;`W`pE)Hufd7>+&)+K&#|1T&MM zJ7@++v#!QDSb9acOrT{%ZL$m|plY`pRdcY`*Rb^EX$*)Qrqr(k-beU}TZ&*-ftLH~*9-V~48_kd(HX7Yl8p&KYh$CBm z4k=UO*C&D`h+V^QCqY#1b{RN!)bv{hphsA{8MrhF{_D8Lj<;UDwX?h5Md{L1F~NYU zkJ_93AAZ=vn&m9+B~M4oAWoIg_6Jz1OVw?JM@F0OOi%v8R!~nB+YIAC5RP2WZ=?ZS z%9)2u=(e~in&6=^$HoMAV?L-BCaSeQJTTQnXi8Hau?9GArclk=nQ>mzYf#vyadm7D zP=KlDwUEt9);D!w_HRn%8(dKxW?sPi-eh*O@pB`5LqpPJw*RC(<0DX^VxFK$i&1jR z+EDDK9Occ+{F~aGba~W5(sj&&R||1@y&KI)Zr!xc;VP#}Az>|U?@hQhgIxmNG~QXt zb|C~^w=Tv|icv_E`A<}v<1_Pu3t^aR4uYB)T<+-CqV=H&J2 zKS2fK0UZ@JX*DrzD+bOlQiVU0b6#!j7m_&9?q1yS9Lq13jmf$rDL$2g!HD)kY z+m7>~3Xkv6odXnbR9G!r-@gE7k9%pj#2 zEp>mJO?O0){9&7sKv$nLZ{qpKzW`NVOQ^3YBMC16J^gaJ>UmmkT~K`qqoF_OVs zYQXB?=me<8u-F(0k$Ox81w^Ky6_*#Cb7EO}z1eD`@m|`Qse{B)o6mVU)3`d(&j!I% zqh&jR;@l33lA0F-e2C{3b9-qTEYZEH`l|xeje~3FVVe)m<=j7QvH!aemYoffbbtq7s!(N?m%DeTUono#-gI_!Z zBe{JKy-*v#mixlJq}TPqL=mvX^nQ7!-*dxxlsL2I(>#mJ(2m_-R}zO{lPH3mG+#Y) z8!IK#5n3n!qy`!F{lqQ~<#hD=7D}n-X0s|=RX<=B>1O-N|W8 z%Fe(Cu3Y%1jv*~a4YKmLvuWGnkuZO?Dsf~GO;J%JpQt-kBHA6e{{golhsKGxVsoOf z(LS4fQ?M1C%cp+}J+e8g_TbM-5vOjDPs4;MkZH38*h$~O7Y_kkr|g!NeWgaQd?#pk~j6^GifY}2| z`MxDQ58IbkSCjLWg;)3Iw_DLW(4{e71J$(^r=vo>*6hkE-;o-~4_0@;_BYf&bump$ z?}n~Cz_g>W3#C2)ck@qU`(|!x&F{#nKJgoi6aW90oL-7 zY}g7)7s-XMflpGm@GDzB;BRb_^+p=};P7bF|I$wQN}X6HfiyTrXR-2@;m{QzU-7|y z0vkcS?KWtHJGAQ5zEQd*%&vU^CXR0Mn4%Qii3y@nO_{*mo_UyHWQV%0RzJ&YARF%t zS%_2dj^6~Az;8)$;!@e}$7&WI@fL!$I!Kakf6Sxf<+EcGq&$=mTB<>BFSoruAB z(iYh=)MT~8F@lO^lqb=fs}U2qEe<%aDkHl3w6Xj(Jft?I%FHKym)7+FH;iAyCNXp5 zHs{+Zypl84G%>j;ZDw0CU*rGC6OZ77YRQ0pZvmCpa2BLIV`{Uz<;+Zv8L|+_mn@8N zUF0O}v#SmoDMxnOOGavV+`jiaxMm*S)rhafZQMTUKALwnZuq33wNE{Mcm_0R4R1>6 zDS(;am?+bMq_#oE)eJ=a+`n<=Kg-ZU%%pD~P%mK1frI_JcS<7<%)?e$^Hd)-@` zo%h>iJo~ij@!Q##)Komc;doG>q(@4gZ%4+rq(V!F+c6?0kZx`PoaN-HQ4GKAi$?ss zu~n7223a&Sk*@;oYE6UYS6}uq!z~9x^DTEE4aLL`?qQgbOgSA)t-M}uk@kPLT689S ze#A-E?p39`tH_3+f1RRMzI%(G6sf9BiM8}url#JM_Zdt0=#(vWTQt8`qCJU2{T@bd zcH+GhP&}&YI$UD~)Jjj`1n}Bv`E2*A^Q6)?iB=6XQVb!Ma#gD$37Ljx-|&c@ob9s7 zm8?;Lhn<#02YRErQ{ix$V2l00F2i7r(o+`i(NX&z{$lihJexVn`{?x_ZDW(c&?`UN z25niLd>{n_Wmyf;_?yJ#+}XGin!Pe^eX9o_e1CeKp?n}>|5vtcYEIdsG9z`rQ0FKA z5hRgGkXwgHol*nEBSS)gj4PPgawH1FTgI8;WZn7jT+IDbX(gZNmVhX)&&ICu7Lf|C z{__B-A=^B84^G?;D+M~fw3)Q7nyX6sl!2=_y3Jfu074Q{ZB6{{Ca$a7eU34Nprdn! z9_AY<<(iV^qPFRzs?=9FNLnofjDc+_D(e9W0uK*UIUl7i%CCY;W&D7_=%-O4QW z6+Z&UcEDzjrhSRcyeyE2EYB(pB z7$SUX+wJk@C~N-$kk|sy{f4~(F^XFHVd7(&=*pBR;%*V`$F@VkrY))A8lVI} z7`yh5bG)W!<;WQS*4^Hi1*cH9f>J?UoUvN|#-|Osr(L(s!eX2&9p?Y@at`%GikF$*Q@~U?ld{nkpDqUKd{G)xt5&h+t z5cQC89jZy+jPPWUP2mAd`c9;?4M{dY^x<@Zsoqqyjs!BRHQ0k9knmE~RZ$=Bdy}w@ zy2^uaQlM`1Jbu|334Wb^^EyjS$GZ>3v)n-Rd4yyA)pefXiz7X9w)SRy_ry;{q9Tpl znC~(_+=iqpXiulKxlESnghw`u4~ME*$X_1_A+?*3GZ6>GT@8I zKxm=B^$=egWNzIKL)Tlx9NaBtL8RPkpj{HH`KB(BC<`eQb@>`t1{nyp1GdObr4{p_V_wE?ae&mNHF4`!t{Q}dUu|f3ZZ2;+Wloy{4W+JKG?-xS0h-B*h&D(Jpk8#=D^-St6zQ(8g08)sS zPA!lyFBE7>DpBqwZYmwFt(gjw5S^7KePn~GmoSHoZhvw$V!#j3_XV6`uYuP_gVCcU zkZ7Z~`MNB9-hPF7Y&+}*ZNMI&n&mPm`Di!QU=INsA(<`wI1lJ=6NKZg1tTLV==BwOmv%MvTb1TvmLL7+1tDa<~>)v-_p ziu)rc2D6keB~sFlA=}a(D{zK0iI@cXb+(J;4qs&Igj?w=)`BLZpjjJntkf=*a?MQu zK{unNNzL+w6&b=Auut31Yun~)7?CT9-$1vM%R1jN9H;;u184%7qI5)bLkvoqf;+Pn zlT%C(-JdFc8L|=vy?JESrK4`^8hrCSIyTeh7P7Ba_Y8yUWCe|QGV^U~J2$#Lo8(oLs0RoC|}QJWXU5bPPSO%!X( zX`l|3IcEYGiow_q?5~OFsN7X>MuLHpMWtN*R2mRV%L(iKCDj8nY~)`Pw~@NaIKX&% zVMav+u2yX)S!6siO(bdP&@uuKh{<+`@>LZBjwud9#Cx$1Gw38m6WgZYu0&aac3@Zp zi(7xNdESdKBbs{wF>yqDOo4V7FvqXiN#ON)Zngj#Gfkm%Lp-`fE=CNQbW|L!SwiuZ zlVbUBju_Bk%hV@2XSf#Y+RhjjcyIJr=Iy^w`WV&IIbT|{H%;$X+vywb{SF+^LSx=< zK4VKB5X;^65Qqxy%n0M=mia269IN1!bM>*PUu|=0yCUKZB3Owkl$SX<*9Gf4@1o~R z!lQtdGg!pLaWT&+^pJVJlcKU*blN|M+(OJ#d4p&5QMIBrUJBSq8LHb%;go_YSmRCg z(f#8{me2uYpFu#>0TqfxsGb?tQHFo>=uybCK)CO;b7<5Av(l}n%)~^kq^69-HN_`_ zX6LCKiL^6>n{mg^iwtvm^yN7kPI}3K&g%e`#LEkVJU3b3UAeemTk9yf4INwD0%#Y^ zS<;A6``>GeE8(E0oa4TezHW`kcqZsY9&kKKf3MCLgYUTW4&21)Jo{Ss7SyB9m;YM* zVRcmF8l6j|l*jU9KeSxF3dir(_~f!HCG4U{9C2$sc>2>%x3!kv;G5>|n*S6vWbcHx+{J2leh*qxxb#F@3XB$;KKFbfD) zKzwz!uf5rusBY9-@L#?ByRC82-dJaQikbmlKGwCdVB_*NSUl8iP}LLWZ`qirZLgMVp5G|c zoEFfU9V#!D5w6PHUCy2k^Q;SIQBCEY+G+4C#1#U&CTT_ZmPxYS{qIJb;3eoPL4hBiL zvyVGB9@WofRo(f1(2jEF4ydlu);1o1^=-|#>wcN|GVx{Ne=+e700030|Lj=7PJ}QJ zz55lU2cuCU5OzUbdN(U_1eO=6sUwfZ8~)3&6`O(uPCE% z!%#-8U5XfsKn|d7Z!Vgp#W1SjjJN&);E~|_Z^8GL%8%!Z>;SyWW1Q*?#EsOFjVOCA zTlIPrMT(K&jHK5?OXj_Lw&LuggXZhCw}6w7Q4D zjiVs+Re=4p(W8SI#sFlZsi;o$N`D*8fq-UGLnaCL$zUv^gBGG?NgnhKqvs1wAFsU- z>FU&cKUyPEvq&{-(|0c@WkNmBGiTkk7mWgDzD+I$9fo<6NWF-a%v%)MD)-`Oj4&DC z0EgHgdi@(jA5^o_7|~(qTLpYHpXF;{^hszHPzeL#PgA`U(`s*b>=Jb+M7yWsO8(QB z$zzH?aILyPqklR(?jZ490M|XQ8))l5JEYK#ELxsMSJ$RfXk$!(wX@*KYJyEB=~ux4 zqGuKm%x^RV*6+$%oIbCz@;>51@(Eu5u!?VFA~(`lV>B&@2Hi z?$QLbwd<;oeOR#od$qk6f#9anStC=@`K{B#?;o6Opnp0i-3_$MtF$AFmZ#CxwdoW( zbkb8+&XyCA6{$l%P5S13`Ui;l?Z_rqz1}Zan(8{dJ5ybA1Dfjnn>eU@uAk*M*82H2 zPMSb1{{uPcps{oYj-9mfsjMDLp36AN1dyvyD?Oc8xn5!J1u$?|uoo;%nG4XC{eLoW z-2blwZhz?iJKD7Tmv2vybY@E?1WzWhi1@1$-9s-6XD_j@4_>@Hs^Fo9nYG|`9kM^8 z_)h=;0RR8&nNd%}AP~pj{S*riMx#~MbzP?$U&NS(ZA=$OsioBi^}B1c#Hi`9aK)`5 z?IV=q?)WGC?vOc~X)g1@a?7=zlUKq9Kfl>^J%9WAA4S-E)A--D){C3QwI)m|DYCMX zP4hWnH#YX2syZ+XXL| zk7=d~nbO?|Y7_8kN2TZ7#+vP+ns*O@gqc^32zvu*3Bb5ZtEvZyUA1Q)s4T#+_Wy?v zbvB(5kWSa7XTeM10{?f_a$}$^z}rpL)_>!BKj9Z3`4NTZVf5$P^%T0qcadq4g56?! z02m|Sz4pO=r+6Q_f0rg$ZjYUE``Ezk4tt|!CnhL-Pddw+2@2mQ$Hn*QA<`K~D94QS z-l6vYlPEH=bx{H!UjQQN_ywRg*r-bLDyV~j)IelvR`jo5U6KD{!}T%654C}?s(&*! zRN9o!p8x;=|Nrb*T~C8B6n*zskUkiVBbINbTmOhLOBI~N4RB@gLH+NFB*tXD-P+Zy z*786?%e~xt?rAxvusjV{SJNSU zLF5E@{|$KG(;M|DZ*ij6P)8X^WzASGldZ3KNKT! z!?R+DoIbCc?z$*zR?dRqLCOEER#?sUC7lF@O20lZc_rP0a7-ng zgol7L;P%H`12auYb65E15`QXJb-4h*x{}<19wP1zC4+E9gW)l>(v@ z1Z^FRWZ$H-8`~=)bLf9pQ3Q5O@Q!EOC z^JByFB06_h_&N;@?|-O!f6Jm?L;t@U`a|7&$s(iXHs_0ru+6xFLBcH$uCywPKqNDN z77i!jI6MnaPJ{7;Mc;K((SEK|7X48-%CPA5b*b!+2}*+(!|I`v{Xd^HGT%2@^S`G^ z>TU^V4FTIdYRaO&>PF>)MT2^C(NP=G-cW4~-Ye{%PC;nQ>N6x^67Fd@ov_+)b}rB_ ze~-7&!AjF=S-ZME*NXqr@SijsMkySBh8G9!#!FuJF@(plN|0`arfWI^w+ZGdDKiZ6 zGP9zfKSCCzHS7*2F}6aHdKJ<#I9vgo(fjPR?UksPEhZnH~AbSmbDU>YXS%{tl^WhIl!PnIi?k2F zkbBEjd&ZZjfg)>Ay52pTBFu#|@M#Fi$TGM@(0E-6jR{!g9t(1f9!NxGj9Z}LJu7kq zHD9@a3ye#rsZSY<&V3SHLhsOjhkwAb0L;WfXa-huX)zmVu+?uk4|)F|hHu01^yGAUIz5{NgNB3G-C@X^y$E+5hT3taA=4)11vVIa z^EhVS@NL+D%r2=?WKRaoU4%XG1hSo}rh{NDs#Ya2JeW!yJ*-j(|Hn#yJ=xbvJ?T=Z zqaBr6TdFbD_AB^F(MGe{U)HOANsD{SqW4SwJ}%I-F}4onxS{BGY(s4eW5+sipR_Jn zekU`(@ed!J<9%G(y5zX&+vm>2JWq}{KR#|Da1nh%GeSX4V3_Bn<(C;A!EGvHLPjKt zf>`B+G8GyokaH=DGV_3c*&{OD;JesyCz34*gn>kOR3;K{*TDGpjw-X|QH6{>$#bkG z&-h0{{jX4>CM$`#3h8ZX#Saa8GC zk*CAnIPF!v2|`nTg9OW^6xSufC2^}G+r&9B7pVL2yDA|jw}?xBosl_elri>JE8XSN zPeMVXR!6fgR%F)8H_nl57v8)etSizg5i5WVMD5L|SB(E@8bvf~e>Hi4q29=bjEw4fzAB9Wb< z`LoR`=a_hEPTqy&yJMqgaJ4p9sQYd&ktcdRN=~Qz^4p@ z<&v3yaprP?N%Hi&gn?|KwKDGNA>&DgxJ7`2$rMI~Q>av4p{Rb0x(A753=GLUIyU!2QQW-$F6Tx~gYc z+tV>ZKe9q2CitEkjGUp;mMQ_L2P5cnfusL_TMac#nN}^?kMQwgwOlOYcnO5}49os= zDtLy#);6Qsqru4Sznj9f*rp!+Q5IEl@xuD639RR6YE~-fus%|vRUCbhO!Xb>{m-P! zW$MkPGPVC9+N@M4Q>&-o3#4l8NDEBDy!QiR&1m73jX=enQs*FI#|bv1LAKPBn<1!w zdOpfEGZs$JxliP2QqvU)Xs9g%d9mf72n+{dA(?e7{iY(SgwAi`Wjv3f*(+2Lwb(98 zMKpo+q<*tfU!RJo#sBJ5L@l<)!m5;sW z(<`0+km$ZpRilj(>pJ=Akl_$O6S>{2+ zH1qFsS!6zsuDt1NIvJmhCd08m_JTJ}LdpwX{aHx;omG1`EWb3X5Bg2NKY9T)Y9`ex zW*It^U35{_>f5Oo{M{HVilX3u$uXQsxfcxgG3=jxPU@}0w4Ng+QW;^Rn=W@KFjrmH zi_icJ}iIIc@cK6sxwFCncQDUd5p|2 zE8dZN-79j=9EY)wRik|Z*1aMoTNN<~Dc9*wmeXfE2D6mXzevyz3#Da$>L+Z8x$@Rd zW2m_levB;Q;+~2K;F3X{H*6+^J5-GD6Tw8 zBlO5TZnqgQ`$uI=RDv3xv=@AD_k$noe&iZ0FvT5@=`#y%7O<2BEVzh3$4rAMz1Irn z1y8ik1(bHu&InwnVzJ{gFuW_zh|_BX73!C#`2MT6i_IN1gS3N%l#4@ls;EOCW;ETPUE zAtV1Va<#+@!*i>iSgMA{m=o|Akegc!Fr5_)MC8(FjM*nH^B$RLJGg;8V<8s{%u$$7 zkS!HV+!zmklu__y3|2cJfq_9v11R@7%i_{)1q2*T7;yr>t}xiR)*#1}&+V3VxG+FM z06_sDxLPZQjpDuqybY1DPX=`Ojgd3^o~)LvbAknjBS~x0f92bU^>?>o9G;(#rqkKG z$=C^1>x**f@A2-*m8Ewrm*sYQo5^t&5Eof7za}n!vhs$KvHY)0`p;^;EEb!oTo7kr zt8yqAq&wGEG{M?qYv45%xY%?bB4?2DJeNeH-rGET?ck0q%aY|v)6tkuSK?z4rwjKv zo4y;*PN&23nKPkUg+{wyW$RL|I(crl#%t_zLXB6sqg8#UQCB_S2XdM%W&3cN|3^>U z3WAG&I{l}5V%=URp>p%kJ;J$PM{o_dXrov3wK0OC{q{wo1J~Sge2f!mb?D4-UuiGl z7p2#A>vp^t)z9caPF`;!E>0egwntcYkn69Ra1+K!}%*BK$jAKGm9;$bI(v`;_aHVg#U%*^};ZMO3i#} zXH+IChJ%~Q4Nq3nt<=tLdYeGmm4i_mNnS14BFRSWMn$;+4o=&YtQT#oVRo8_6hf9% z7DATO*@YPO);?}{7sV#xp@u~v?CGn+>CcxbKbZ|Dr^E4RJT_ zags*5^H9BXw!%e=T7@Q#Drt;D#%pA%Swl-;dU3SrUDci^S@s6{>x?wxM zmZ~3rW3?R^vVQ>p0RR8YS?g=#MiBph?!RJC>Cr+Yzb|&`2tw(Hlxy#xS7<54tff`7 zMAB||R~NgF|Gl&8N1UKlyso$gg6+sVv-6($jnnI3Iv-9a!|`Z53onEC%fA+VMM@Db z%qt`XQR+Gn0i;Be@J{ktge-kd^^Z5vIEcS)B6|PhLUW6GM!TiifX_Q zkkX9Qg@*5MKSIJw!3zA6OIVTQo>nQ`6?{dCVsn8Hdjcf@0!q+?Wh{9l&>R*7a!Ir6 z;EPqwinKop;)Ri5FE^_SR?#4TzFG4s+8sH_Na8CL6vZ^M9#fU%p4I|r!PZO$pe3nP zK{Q1pq5qy*EeYc9etCD}<7>1u8e&P*vRW^9rZ~;$^TBjDA5YQh;m{Eitwd;^-Mr8Q@=EHIbjD2br0k4Y&XvEh(Qdz=;~{75Xe=JB+^`Zg);ybkw#-#-&L{%y z_3I@W8%wO~HcXIfFX@tPCVM8E&3kWUChnqJVy!5;ipYIAbMU+JHISf`;#_|a5!{?5)3l=k3i!BRumPH)PR;PlG*ec85mn#!M<5q29ZT6 z`3Q{L@cX);p&dcOy~4qv>d?g4G0Z7R&GWbiN20gW=4f;oq7?U*3<=r(E6MEhund4m zM#7w>DXp#pv^|P{yy!D~59(>&lpxifP}8*z;vdX~@Nst`1{$6)`InFIRm9)&a<%oF z!G^|yHew1$B(-4yHy>`{0U@M4a2zl;UiO;w@R+)Q4Pws3h+wGc;|Zu}ElGiI`hm-P zwdF{Oq8-4%mZ9*BnZsJMv-#Ing%eTzY&Ls4nNP-UfQfy7?ad!G*WXyS%fa@at|m^E z6Vb`Y*{a*f1*Xu773*ZQH(mR3HaSPMQ`b~aeC2F&&7p*f{{7~z*>4@gFRT>m#6k2( zrLiOY>?k;zbro*}>*Xg~N;<E-P%?-g>!E^h-pq*BHAo#9JvB zzI%!26c+VFQ3A8}LrvB?N`JF@k=6SZQ>nF3m*a6s);Z5qBI+?)kB^e`jU4e!S%+t!G46uOOudC)`JD*wr^RZQ*j^^WX3V%iVdr26|iKzI-f&ggu z5I(FqOV(DyLB^RTOgl8$g%5PJ&f-W&Fl=M^L$u)r8D(%)ZO{rcZRfAC&dO13DosU2 zU^&TYvm8nC*$|~w<9ult(j`HZ6+{w$mJw8Ni9izKg`KDo)*}w%3Hrp@9bpu(R-40T zThnXBhdaZS1OjbPi7hgOuN6>oi`YWaB~?gcbqi1}un`Q_7UaI&nxLP+GH(?s`KqyW z#!Z!jUkmsKVYK7&)^0E0;!)$`%O(uLYmdLF@hvd|k&3RVjBL;{RbfpQ5#u0#Ug27_ zOK7<@dnic-`y&Wb3#Hnhr@sx`yYMIu5u);PYA+=$p_iPMX-lcS@Pi^(jW zMCrRBAAKJJ^VTP93cKbGX-%ehI+Fo_1559M-Wb^=#xMa4%x1%FspuCHvJ>#xB4m9{ zqx2*g8?b;aZQAEd?Gv5X$;VNbHh%*z0mPSPe*+-_jhBdj13-V-kYs%3PR4#~Y>m5T zb67Vn-I}~RJ0D_>bB{R!*!ZCQvf21P62mR+MLUbqzdPYX=@P1Yy#&3EBzqX>iQSIe z68$j$*(*`9wyFxs$W5#TZ|1^5D_?@>6F7u&ppPImT83NtiyZ0qzzC>=ncjchvJjN*8ex*7-mbQ%B=pw^ zFL~TU8}3uN2d{f&~)%_0v^wDKvPXq}ZUNKKx zotSsC14tP@_);!@F-~|)=Pu=XY}eDD4^77|sD3=NmtKJbdJ5Uu>G5oOF`Jy*m*If} zBMTot`v&3hN1m?bUzZPp0~UX2w5Hxxr#7>snbxKJcVQhvd(|Gpz- z$xb4tab^czfY=gGJRYBW_}s~sl{!7Q^a+_@)?P%jD24OH-Wj(xN+m0?%ci6Bb=UYN zUowl*pBI-}#~;v4xtwfpI@rdk5Ace)tQa|7qN?03A}x889v>ZbyMlk5IX0XaRp}wm zFQT)P?_Qm~{Qm6tboeq#|JkrkZ)<1AQTnBslw>ML`qG9zb^4LJ0$jlya0yzl3yhmiPcNQNwwpt!#BuzXt!%|kxjb3>#NKPdFY|x+$4%>yzs`+PY{XTf zElT>|8VWk7{jDdxiqcOTV%bd6uX+6PnAl%znMQ-Q5}nGp<2>FY?Du3cunhB&spQsZ ze7hl&dnq>eorteR+2-t)mj7-mlH^4YPA05^8Ium2BK`EbooHpv%*MJ*xY3@PzBVuA`4$@w)8Cs5j=drEIl9@%+M3|#v zT-c%D)pr|pxqVJO&wdXAyhm~kecS<-ty zFJ9J*w4i@KyZLmKz7O}W;X^&(5%CifKeL|Nv7}M6Hu^5Rzm0eTv zZnrOCyg#nw0F-tXK7WQ!UtWZWTDWPk_HM)43#qi_)MJoB{E8HX@7#dg)@hj)JS)I+ z6fIH1+boLl(D!eQ=#!IeSXp)@5JE&fI!8)lqXU1`HZqV{8Nynkn<;Z0!zFcwUMfl? zQ3%g?!i4F-4EAh6pE*f~n;}Z@hju|{lufV^rSNQ^*7_!IT<<&)v}Nl^GCOk7nEG{x zy1R^A=DCNw@ehHI{tS8$8a$sIhLCyFjYDngw^#eXqV{|UVAN&tE)Ze7@84&4y??oU z{W*Vt&4&<@S9dla#`{VS>QL?OAb3+*r%Je-lQPu+%=`=Lgx(*QxY#GgfNJnv0OY(M zf*HV0<@wUTK%)&Auebcr?!b2i8>|0%b;r3Edh3;cNv*JvCkB-OqX?MjI9QFTKi?g$ z9}TrL{$vGSHT|w1XjTJBgHH@XD3BfG3gdrEX`w=?cc?JG84}h|$^#N@iT3?*?g^0@ z^i5w@AH%vidSY0p4rbS*Oo^(Lwu2xy?7L*ivit=9u=Ky33NOF!=}H+jUDNs(vWGRM zs|l;#^s*g$iuQCBK?he?MxTk*-R_Ix8gd6v88Z}BS9?Umd!XbS>WH|kRAt3tnDc+} z7!9@RJRD>abS1-`05ABA4zuL$!0_sZYcFU?`04)v$y|3qGS~ZtxppOf^K zRn+_tAZUy-2h0w7LC#0?NU3<<0nfU54=}wW_po$-n80+BrnT)Gk{R+z;izRgZ~{3d zi%mFr+rDrr78=c!5$4>DHc=aS!;s_s+rpcFxEApczz0)`d?n z)Y$wB00960%$Ut?+b|4(?|BNryL3RZV9C(KkQyj93_EndupZaKU@XNJCQE`OJI$Vc zmh9%IlbfQ8GUbwpq)F*BO)@qb-QA&|_iq)m21)BO+!zdfOp zSSRV|nXS-*gQ*qSTD5k7N<~&z$r~n$thJ~F#nV}Wi~_D17;=?xk)(Uti-$21(wr2C zC?>u47^#Srso-IVN~aA)rSpFtBbB-$4{`{KQ-)&VoJWSFqPq0Ju;Kt1zO`;61E2`2 zFFgdDJOF|pq&@zM5{y_^PHRpLx{Q#@0F<*aiHnH#6VYLa+~&Yq9BsW2oNz;d#f)aK z{f2A3w^6oTP5nti!z4-1U`=RInz)~y!C55S9C91w3}VkKS*-_9SXF-@7S?ryFs#J@ z466zR!@7bXN#R2YsbsIz8Eon!5`oVMZPIW=Ktj+r|a^JJD zYcVr(pn?h4WIBKU3yG84YzTwExsT!eq*@Y;DK9)}vDH})jmTk{Y5JRt(N@u~ zPYZ|d{GYl&<$^bY)vorvtV-=&u`5-dcEWSiE4v3zidu2F4BbZ+L7v*}WDbB7-Us&3 zARL!%2pk(COr$B-RFiBVVGa!}qPzso3CpUQ87dhiDnO;{gT{Y`OA`qSmaL;O!yaHy zP0o&Ny8$$tU|O|JqlE|T0A&Yp6a&QhaftK(332|f5RZ2ltb`P`d0}_yt#_T}b0;`Q zR{E~jOu5AGH2rPn%Fw7ztg~uBn!|_+d>9S*q;k65Z=1Ug`L2P-V zCD>QB&mekY5#oR4Mi=3ASdN!5DFn0g_shyyZ@nvH-#fus$`}S|Xw#-+Q)G4{zEbwo zCP+E!gCJ;#f`T9$=uGMQ2LJ&7|J+#LZ`v>re&??^WiM2sLu$8|i6V8?R84BPs%%f| z6q&>%RvkOD9Vq+jXPf+@b)ae!8jTn5oqc!y{N0@mi5P!UnZVw30QqNvAQpIW=DkB9 zN)=gRpQe7wIUS#Ppaf!)B8AU9#=Ys|DZ_^m@Ki~-yMtf9k0xU@9psiL3JARKn%`vACU4D7J!ympva6k1&Y>Mq&TP;1H)>? z#r^xqIGumiy>0z=colqA?%x>|c@|0ude{q7F{e|Uw3Ggzvy2)+$yJ?T(KC`nl+O|M zccAfTM*E<^rS;Ie{K+@oTw}H;bO+4UP2VutQBfYv=nj=W6_aL;bVrVxUN)T_%;=C3 z*DvBi{_S#N?Z{)sMS>{#ixlAua;4q(BtF?}zeaxo#~N>XWu>3sTU~7TYyvS>Iy#9t zQbFvSXdGuzDLXig&&D`xW7h*}MuKPQ29VRIWvf&gKvUcOP@;R zI*Wf&XO1FcM9J5k!VF`l=Tof>c!-O|gi*q9&HwmxGBy*Y#{GfWS`TwSKpV=oLRMp( z)69@m-_9s!5y+Seb*!*RAi)V2YZzY5eq0|pNjz8)6>B>>R;>>^bb`V)7scXD7C9T!HUAk^U+XY06Op3IF z5UP@I?8ET#_NSXTV2Cs5Q$`xJp5|hXR<~LBBJB1g-GT2h?ylKc!`#^`-?~jUbjYrn z|MCfMSIxTEHG=)Xwhy@Ps2JR~g5)Ce$_zT)?owTIvZPqb-lP?Y#jhRuId z%eVv-1YCqf1Dhka0qV>58Q2hB@wficD&x2b1f_nV>6w!QzF>7L*ajz78p2!|;$O@CxU4*i8rOC>VwM*^ks z2o{LSa?o$*&Y88H6jSF&>U$4o z_Fzh{?&Uc;`O*PWATp*%a_|b9QAJX^c2(J2)i#$4yL}+COes;0y<2%nWjkuIMbE4~ zupJC6h4gQYgXOS7njixB!cCllGS%vT00030|LmDxZ`(Eyz~B8T2oJ+jz=?mmZhHwF z1Kox_t-!FXPwQaN5@i#SNRFhEctyYa?vA7^$I_B5vYi6UOC%lt-2HgGYhtO>=oWAB`CN?L5OH%S!It)!5Zl#qH+Y>bvorq}_5SSBm@E zil!B`<7{n3aW($J)k|-A=BIz(j2S#AS8^||#!8RpNpyBUVVeol#=BRuDUYh`D%Lys zMrq@u8_#EE{x;7Y<1CHKO1=@6c})bA@ORqY^qL?4cTGa$xO2DWMzHvijGe9D1e>?^ zw%d;9KhxVKyT(_t4JX822f|$dUN6-2%^vM2LD)!|efYqB|6>r|apZr0hx;(*8=gm`7&f;_Ne*F^~eQQ~evFN0Hvle`cT@~<8c zAV3b`>2N4u@RvY>>^ItV!DwX2!w6f|o_nbkf>5#?oJup_U_rXlxhNlt&eUi;e~15$ z5ssI&Ec1?FOyAE)o8f=ZevgB?R=N*J0PQS;r_@)fQYujQqg2u_3I*$JMJ_XZ{xq9n z)A^A-Gt-rkrRWIVQIb%3Q5h6Y7%6YKiRW{VY)yRtN}p9r>RG31m^L zg|>^JF0?^xm%J=@z|qLgaFtZZ1$4mXISem-2}8e!E&TRgs|$bHQ`ZnR#6{8p7Rhc-du&~NZ!ltq+fS83mT_(I74SMGUzmWp;o=SA&EZw z{qQ~-a)2H2Sh9aTueg$?F2h0LDVcYtM@++)>|N{QF_Q91_yu{Tn|{6L7+c{Phh==; z?)#P2!NIy0+E#%RU+UV!35bAv<-&$Pc2$m6YO~06t$Ham@&Q4B+1xN)#C)m^3sW%Q z!vxRPtXA?*sLnK;L5bH$ssxKIH|WdKMsMy6HG zRTXS$3|{0@EKYR_1hoeR7JVh?!zKhHram`Z?rS+++-qS{7HZQ6v5{sUr32;(pl#!K zkLzq39M*fb^BkE1#D)cG<2n*{Ba6-gsqA?GezW`*OoYU_vfQi*14d)Wjk&NB#*HaA zQmwkf3`~EYPCif+VB{L%08HHT%$FOcRVGjpGI1(+F$+Lerj{_1^lk*+mWea>3g5{Y zQJG^OQgvY4;de#|hZ6QJYYFhj&#^6ci0+kE<6*EMsiK`Oq}!7tr`l-dn=3~X-m=n^Czdmh~Efqy5Z znj(LBB<`U`b`Fa+x&?t3BQn3s8g*^DoCG(7cA~w1#k3>ugMDPoQJa3s+b*Yxxd9qp z==Kst;r@;Ep)aHVnJenF1NCnJ00960>{#D#+cpq>_g_J%K#&5F728Sdq>g|L*q-{d z7JFHTf|kw}5t>p-Dz=CH@jH^1BE^yo8E${m3=4w9CXe^yyYF}>Nl=QVf~=TMh)#ek z*b@knW*S#je;Q=rG>F+#*y|DeDL!xiPw_8_I92N2Q~7d9<;K^W3>4XZ^Fll}O@^U6 zO-UkQe($}rLSFdU{F?^UN(DyD$u+2z>B@6jn)el32&!`lrBuz{oenNNq9qlIQw@Lj zo=Ck=pTD=p3$@(>o#!I5C>;v66!gO1ijO}c=oyQd1Z$7-;A$uQ+Wt8cS*|b0Je4ad zeumA>OW7u_*g^ALung^bBa>mo>5fe~Esxm4n8v_I`Fl&EyziP@U zu|9(IUY2j6_JuS;{)o9|nq!_n;unWJQzZwV`NWE|lG$_q(j!zd>L)CYLEL*<=3v}o zr@1&Z5BLT_HTdsgMhP{UF^g9-N*4@Qi8`KBm;HuU?p=@3_L4w{DZ-Y zd`fc}RqCt;UzEn+BQ0< zybOXy5o9(3Q-Vg5?G(yw_t0cDAQic<_Sr@?uL7LW4)qNLv5qaUm+7@n;u72a0ZM8P-Vfn;VQ-2}TJoC+kcRL>4 zOl}9`+mVmA-g%3mGyMADgbua>x7Wd8i-Uq~JsdQv3@^H1zC?d*qv0{my*~!>@{l7v zveQsA3x72=so4^m%cRagd(>146HnJf2&`+sThqX zqsz(Uc6{B+ejK9CzU8U@FbCTjmHT##{ziV7S+Z$8!UOL}F4(ucnz=425Q}wVNxX7` zI#hoJz}D*DoAH0|SNnC>UL&`Sj-D*Hhr3)`iuYnQYENS`Sr2JD0{O~d;3lE2$dBG4 z@>hG1@6P$F_SW?&pDiTK5<7O;SwxWt$Hp1p#^+}plcm~F6`D`nnkKEHLKa*;31yOjT1OD##` ztE<7~a4={ZbZIS>Lnmu3UB`0NvpJETzhAZ0y^*J`O3wF<>p%L5tL|3ER?x5p`IaAb;iygPn8exD%{%$RFWd13!5Fl>>ASMC$4 zap{FDj+@@39>mNtf38)Q`aHh&&PSuc>2Pp182S@0eA?OQ+zVf~#IS017M?VX-+Cdm z_{ks0&--JNdwWb7e{6}Y*ED{!&)LumQFh+$O+4aAUh9 zLM7Ii8I0deg^F&PrnDh;Z!*u0!R5ubjLVB-aQUo;i6Bxj(!W&GXBz(O_R9c$oADKx zY&zF0O(12OEzqJiB_wQuT!W-BU@|6z#LD*JdG%J&Yu$UkQW6Bq$>;?5j3yQHxdEvx z;3^e7;#TCef0$bG7~>OA(m_rLm?FeEP3I94BJa&nqGlFjf=n9rz|{{U%attfw|T6d z)2)1%Y?)chPfAhl5UwFVYXMV!t80EERb%riN zzA-6MsH}|kl@z%bHZHTyt1t^#^^goU)_g9R%`{T9nn@$5+=NLwrt9RMT9ig<+NIPQ zc=HBcza+S!PQtpT<@qQ#3qhj2Rr;m?InDiLYmxz! z5lcld!T>r5By2D22(jlKh--A>14+NC#=hwDzgH1a|ha+jN@j4Erq z=?HyF`P`!2kwe(yp@a>e*br->`#T;^6?=q3PX-f!cuH;cG<<*-pr_1{?k}0 zf2yJ6;vte*a4$R?9uwKW$81z5K1PUGno;;aR-E{<1?tZ3hU% zV018*s~L@3@aRA^_=0F0AA`nG3mWg+xXuu!Cf#^czc6?Y7N)iI+k|QRjdBDFzvs-~ z7hoH;-o4J(L^oo_0*fN0W8w*GF0kM$f4Y&$G~&kbiBu6?&ilO=GvM|Iy7d>S(uF&B z-RDG3K9CA>p&k8{8t&a#X|BCSW32JM~zXhx5iqSCm- zVNA(dlHm^9_K7W$x!JI~iXatKy5vR8O!|&tqL!#jLS3=fZiKmFy;~H|;>vpAe{bbV zYi>uRc1WL@2(JX+COKRdG%~z&Ql{Eq@GIWs#;6L;CLrTuWs2K4)Ga!tdifDTW#j&v zR4Qe5Pd87WR;^4Gyc$t*P_QY7Spx?s72(-S2yf7~KvqgrItDoY~$m*TQ~aV6xqT-6+#-GU2X>%f+1{y6+T#dV zH%hNZ3F|h6y#wpPV}$kS^T7H*9ljk{cUX6o)Lmiywd(K9{n@!<d2$$%{{sL3|Nq=sZExE)5dQ98L8w^Q0k#|~Z%vvA zC^ldi&~+bL3>ey?prx}#lpi?>mx`BDYc!sctd!i|ZHOb9WD)dr^~K2aBiC z(Z%uU`RV!WG>Ct?nroUD@s)ig38z|L2b&!#`bCS_d)hhKnzIV(oM<`f;X1aBM)!0|nHBb2m@_4sjxb z_m!^wg(q_7>2!tZet?dwG+gTKbF!wW1=X{`y>BhSYGk9bv#r6 zFSi5l+uY!0H9*qKgH!AoMR*T_E|Ax|Be23CJ870lr zQPMmcBh6tg&O$|FRDL!}ucxD|PQG-bx=YRXHu=s~;l6?VfAu!Nz7?zw{Ov?+=j15)(~1h=DjsO>FzC<_v$zr?(At_5iu;HQE(#^d~w(yE_#CKXYWqD=%^<= zk2eN!ap0+H0S}#WFWxvs_fM^Z6n*Do&ZuakXjIW#v++f5t-o2(N4-nksCtV=t#r}J z9YKu9>Eyr_f6%v+j4%5C@xBuE?JHlv-r@R(D~?wGm?!v@jsFx1s_3vwFew~#^R{|d zdq}}h#sgxp-TpmC)QP`JT~N{b0h=iT;dk!$tCZdBgUm9ZDX3Zqlw=B4*TJJ2_LI|` zi4OrW%y764Uf>5}BrzG&WK{|mV}*oRthQCm+0F8Re^ABfEMsX3;yN&@1REY}OkD8w z6NZ{e!FNI2>84J~%a>MfvPEOf)`FT+0Y>AnnM`{>2V z*-^tDe@mp8QO^%iz!4aD(GdA3gAI8lOO-%F-#&jn~Z7yws-gV~?^nvcLIF z#*&O|80TaOq|}g3F!)lik0tOO77z$y&6z=K2a-??)^}cV!wL>{+|F?pwTp3vx~UuGl<-9kuqERVHsh`I4*jJ*=dvy4X{KfvpMRKfI?}0;v56UrQceab{V(vU+R8 ze=D5e;!B;;0;-NhW{wotBFzTUieh(SYUw1M5Q6dAekB!2S&l1)jbwtvQj+fQa#%t% z`x`v3a~scHF?r4?zYUA6x_w(}s#DXJDssBB3XnN7HjhmP0^`R^n%FZ=qak^1TaN%h z#iL5_ct%CaUD`wns3ob(r3Ms`FcUAfe~`QaXx(%jt?k#HLEau{CIj_c_`SIs-!~YPCoEtUl_cu3OW=~`>tqT9^PLJd0{JWkWKVy%UIkA6esAk6o zNg``@17feUR^vlAg^GoyT6Q=Ol)Wk@?6VaqsKWkk?DvTRyu5VcfIF+r{h7mAe^k{L zlIOPHuRwuF0139EqNh;nj#JMNS`?hw4ub9`J_Fkx{2a1BNQDK16En3_lcp*2rQU~c zbnT%$y<>D{(b6@Vj%_;~yJOq7ZQFd3bZpzUZQHhOJDr>Jp7Y(GdyF;qzp7Ph*2LJD zT$Kb|nO2@^0|-j5c{zf4sYC4=_ATmxd{dXP5CbyXjqTONdvA0$zX2WI%L^PRm!us| z&5J>gm;H_JP7ooC{NfP6@B<%(Rp(_+v4sY!g}6~fQZ@pDvvJGYPJXrcD(hT2nVYn{ z5e^Rn$ay7dPF@wnAaaX5ivq{s3z#}Q<1Y^3)=3J*)y9cGL}A15vA~;`2Eqd;gCzR^ z9Arp5pTb$6PLxhb=^f9U3GbYVg18Art=>k-#(A5~P4vI*GnFsEI=+sJ3v<0tr-aZ) zqCBk7zKdeZ$yKJX62CUs*}p^~Y+XqZ-bdV8wgkN9{I3s`R)d`cVd1u8A z2LqK4QqdGD4OC1m&2{u8Dj4fEumjmBS)=b?|GiQ+qJKb`A9*e!bUfLW5ZkZ4|8`r6LifUhMG>S0PEwmH2kjd7}V6vODRt{joj!No@M@TtyQq zpE>dbjTtVg2E#Oz462P9P7H5z>fvro1j3dbtINE^JaO^U6=v_48)u$(Dmz1& zdBhL^Y&yd`o+Np}?=Ykhn`UECZayw7nUOPMY8d|@OKzCCh9`t|Q;Xf&FtS`8n}D0e z<&La}OL>DTRwH&h*PVRqDJHU^PQ}RqIPR#K0!WH;T zB!KsanaY81a5mCgYpJQLyeVIYBd|-#m>f-ss(cFzs#vrjEuq2m8p-N>&(H5B-#Hrq zG`V9RFNZTd*)jvp%D-Cw3Ry~5>jmxwq&233i8C3jJ$i$N{(cE$qBGlb#MA~Qwr*v5 z`Q!Igz*cZEF^4V?tg~4{Dk}U-Y^L{tHnoRIJ#NQ1&u~?+H71W1QST!)T-Il$`^LWk zY$hwsRe{F9nH1SDDsjkB-Hvftl*nTpASfj3k#v&j@2Xv=FNLVrpWMH$3Fd+RTX$fW z5i{6sMZl8rn#E~;pv$FG z02iF=hwD>v1S5EWX_P!34l}aN4#mj}m84nK*F%;DeF$Drrs)kv+vSj9%EW`m+COIQ z&4ZgALoT!dnd8dZU;SK08sC;!VyLywg9Cf%L;#uXLX@0sMY8-u^=_n@u9LhBXBPco zXjr47vm@A#a*lj;(%P!ZuyTq34zWCOp2Mmfnr^g0tLCcJQ;QF1zdx4JOr~!h@Lw?1ap>QhGRw&D3O5-30Uq^9Ye{$TD#ssUe43pa z&q$NU3n^RnPvseHcA`9?OYly1a=gnEyxRY@ub+8;`bd@Vp-w3@LKjp$8>J$EvCvI^ zOYZq_zaayO4J_;avYo{P?1WTLJ!3X+t!iC#_%83?h>%wzB5p{&A8xf@SHS@Ko%lbN zrsl3T41-a=d=9fu6HS7*wA?~EWEM18*gSJHH{4d*J-@jGu@u1vm@G~zHNl*{X4+~9u&x9d<6tVSXv_hgj&I>PmmVk)rZJO zQjV@}B8QJkZ!F^e$=*=UN^%Dr2X!JO?$lu3@7e@xrQ)Hp_b)Mw+ zs^(NM+3K`m$?R$XZFAxVOUcZV&Ypg42zVW8FR3mIyFGcye^wIi8LL~YH_G)+^o|Zb zrAQyQxA4r}p1wTTI&d>JT$@Z+n0xy?wrgS|RqL#4J`F6}mRn|v?g!3{P^TiaFQQwA z%X1`TUG~|JgC6q`Cu1ukZFV{>NFyLU$=B>c>9W{0axtTTLEprEPuUY=1ooHzm(}tA zlcq~oXMe>RpA^Zo3-j074*2Eb)Ee~8qW}7dsWSM9sT!x>dfss(A^o9mdECc0i}SKK z4{>etylgrZI-xREE=IN7=OYF5T*&Xy#8gjNJz4MNfm&V86j3_r(= z5B(jl7|&?erRXJv0f$$0CqBCLU8fbxrw1UaEmnPh*#cS&9nB|tq8Z*#eo66!9)$q$ z=Ff`-K_mheunFB^+JUK4i#ax6!2zG+5!nD%%F~#O45Rm!o(4#{_e}hPT0A;0`}56m zjM%Z&h;3fWBEyv{kTi&WX|NG%cfkbPb375OgW4n}2nU}XowB}vbe^GHsd^_}i8fCd zAkZ`Nh>gn?3*-89$n%;*_zoRd;JBCdgO!9j3!MY7tnVL~XgmQ^P_@ZSb4LCO%F$Pr0 z!tasyA}q5OPvVLnT{#|*Qh@NUM6%vA9Jz@AcSLL#wEpLuafYAQnuZnq``Q&KR39-D z)4Y;9t-AA+xwFlD#j+Cohu?%6u36zE?px>{T->NP#1jK65hj;ntMwIO`A+f;RSDEw zD&q`ricwg@?IbK*Mn0VD$L^x>K|b>}O8cgZp#o_=aA|OpV@Jz+03eKcDRR@@?e>?2 zzq2G-I@;!NE6)cUh!>8xit8E7G^_4nyG&9dSIOSJ3};!@S*DSWIPje0B-#M>@8em- zANMZsca%cK%mF?p50RVgyp|jPg0sTF`d?z@F4ek0dzt5rso-^^g8irIk~uX(uu~Ya zX{o^z4!6Hk`FTp7CjfqxIa^sV^p=Pgu*}2oWF$}^BY{UO*+1U@f<57a!^U1yYyNYy zBWk?H1&TY!7eM)Vv8=9^z-O_I!chK{(TpFPD0TDmPkX1FF}?|@j@%}oSV)5CI{E&a zLBGQ&wP>L9WA+6t;yCuH33*QW@xK(SRAV6osK^~;vyNKBM>4n_X*vEB!&`hdOy$PP0*>cc>54} zKi~8`IP29_YrtYXR<%wxVY$t_xuyp&4vr^forx5};tE{_y`**vSh7~sR{xSaZtjY4 z@E)11g53+NnIN=}o;@cDBQ@SXy)1Cn!H5s_+TRD`qxapqH*~4EWg8zG+&o{~W4CYb z4oe@_jQD&OZ{0G;M~zgvxyXz6&Wh#84ytEEu2Aja0wr#iu|wlImhJoo`%{>UC zQ2kYnZ~z)18&wJEArMPIUVGp@R2yGFxe)z^YyrGMrjvN*yl`pc2k?+E_9eDLq_#ZT z=Tu)^%3|*?$I*8uxOneZfvruQNQ;~|x_w^F*eFY&9!Y;^bY%YWy?VE5X5af}AD!>k z_Lf{*S0xs=`FhW$LD>dZQ-5IVB-?-4+wwbK;ec%ovzl3(t0@T^n+;5P0vm0cMK=7F zij8Z=sFD#~b5~Y(PbG(pY^<;ixfm{d@)HcX57&cXVmqL9KHQh&HY+#-lD}NRJ^A5_ zMc@jnheuz7o$U>$kps3g3 z_W;iy-@ecP{_8A}D)1j)=8>B(RJk*g1a$6li}Z5Dvh?CBd5aLqj!Gqycrojx)wrw1 zy^Wb@A!smV!98Gm{;-6eoSmH+J{}+-af&}t2@1|>d8MN&plP|ok@>i^N59dvc}4Zx z(AdLmx8T&lk{v)2Bu$g4v5GKey^#UDE0)`*`xc z*bPi`W>aD<=);?%+M1%8oa_@s&)AnxWf`1o)qA9kGqBc8^!>RZ={`CDC&u_qyN>bs za7IpYA{T`e)@Rzc^;|KjK4r3(>>ihRCXF0>rS3+&eWF6v}gdZLLyl=DZ&Xd`Tj_$Y+0&E6aSX5{L28oS)HfTKP z{K(qh?OTHR{so=9M$}z-<@XHO3Zb)eUpesuDmaf;}?m*_s|j>-ax zX5@&`__)KxuNy##UUb$novWp*c)ZY`_-?rypPh%3Sp8<$L<$E)vDiViUfpi>&;k%adMYu+_vIKj%4_CSgJ(u#$*jQZT6 zl%g9c`GqZ;2o3s`aL4778n1H$4eVw!mEkgAmXEkf>efYR^zi1v zSRU+6Gs{50@CPJmH&@{;aOIS=@z1OqU|!ImH;F_wx$LTqROLKbEslkM!LZEVe20)| zt%Cu?Q1(Xm%p~ub6SyKd|9#r;zpmj+W$>Onz3&q)68G`7!Sc zB{e%cRJW9Qu%}tdcTg`6`G-mKWH|#NWfBdj$WM{9eG!r`r)O6nTf3(dPe+tko*N5W zl)kqo^q6ADfAio27xS5K$W@RvULGRq@TKwho?S4Cp}Z8#vM7BJ^5P)1dq&|3WxjsY zA8rl9mq@{%)36`KYmQ90m_zb=w+RG-hfwO)8ml&DyzY2Q$Uj;D^|EGfb^@DVl@ldtG2$D#N$I3o- zKN(E&a}4 zI*Sz^WRlWY%!B}=ZT)%3Xf^4|+vkhV4{v|0%FW$ORyDT>J3e%zaZCkEECDiiX-2pJ zF>yuH-%6??;(tw@vA)3H1FuDBnXrrYU@81nX^EzVWsT6NndApy*`pBS`1gVDGw)3b zO>7FGf-&04A?=7MuwbUM3!KeLKsV3~d;-G8d`D*oIaVhPw(w@T3) z$#*=j)+GWwwdRt%VBIhesl=2bYEjuZ;0qz?smw-?9DR=;TizDir?dWuP~WrG!_#!T zSbMCJ$zY#990w#M{=!hev(y@qc~$g@UN*y~t>@C`*iqj^Mr0a{AK?d8?9Z%6ZBPX< zt?w2m=!V$)Dz2?BU||PzYg}ep*NkSp4X`0mG*$!JdZcVx5S-h;eJ{66Uy1{Nr^Oh~ zl^G0#RTs4-Y9iaxm9df^|CY2d%t@yg*XDO(G{0u*hcz)*f|6|m@8=C?w?uSU{Ig$b z6uLmniLl>Wj2e3rB%8{a&ZTn-l|ORgnl~fTxj*}E)#wX1gGUE~ zU=qIua}a;$NUXL*%pLu5E)e6R^7{hXM1&!Y%=|S9OD03f_6I;@+*cl^!SDpQcQ$h4%)VFklhi<0|wLfChd>!q9FTr zgw>Q|DSvi^!kR;hw;vQ=34M5PG*&D@5{9JnzZ~YFWFY?f`;4yHoM$sj!zENjY0EY# zNKX=LXDgqXWLpW2)x4mUN;|TW28VM%3Q?SIDV9u}@lgD%oxl)(9m|$y?2(iyGT&nP zNmEdwPSWL11fdR008|waFoG!G@Y8AH6%u8O8AEoVP%@!H=fya6J>%8)7K4RN72_1C z2swT*(`PRXqyGDd(XbdJ4UVgy9LyLlI=^FH?fOS2yC_8>JAt9o6`4aNP6Go8h8JEs zVnMV5hrWAUP}>?ySi|@~E&e=y5GAJi2KkR}Jxc7=5{i*`0K2ILD6=mQ4vfY?G8huu z>E7;v(<<43aoY(pMXN@i$0z=ENyu+*RP%`e(|Mabn^b<4j0CciSIrCx9c(&LD;i;99VEm70BlGEBuLxc(+qRkQa{HknbE{=jh&UQ4QH;-vYMsKhPK>dRljX2D&`V! z9&|=Y8~u+6kN}nHu@w5lnjXH_21waJRMcVRcW#N}NmLis%yGtF)9BJehH>(jGb9I$ zQnm;oB?Cz4ddIr(26b*O&hD;@#nCc#(4}(l3cYiTfZOsZLx|?&pvHJ^t;@P-25_C$ zdUvXr9XLW8I=1~?6E)$6`u&$cPaSS_GV|fSh}>>kErlghuu*S3TPS0zL|@ccl?@}T za(3>GLfe8)(*P_{jt^){i}Htg9h9zjRi4t#Ul|yp2xIhr3Vr}PzmvHh>h(l{nuAl( z-K|Jn0RCD00WYZ4ax-SVrIFeAZc_eJ`g|4sJYud=(>r=AFVf{~nU@ZL96JJ2>h zXm<+pICx2t$oEYcN_Ct4_-%e2q?~Ipa;~|?$^Z)V{7SfNn&H42HH@45#K$f%VvLQ^ z12QSg4RXJeG$o;fws;q`jpNTpBmnt{S&0|}AU^VoMb&BT;ziWyAce)PaXa%S(>LO* zFBz)y7pdRP{&on)eJ@Ba-P`q(zL3LLW0Bdv`nB##&7S_<1I{wZ8|dD#l9%A~<+`3N zM4OXKbkMPDo$qHo>zF5Q0Kaa0f^@p!Yz2nwR)zW+xt&&cwO3|oY5&JI;SRxb1qTHJ zz_UGoGJ|23HQc{+0#;@e>+W{fpvOcvGa-`*)Iv&HJotjuxzV;#^2Rg|CmWJfISqr_ zs!?5$i8dHC&OUZc?9fwhPG)nIY+6rxJwma%D204fx|9evQx5pkv-#K?6h;Cc-WP>Q z9{L{f%~6gq$?vuXL|IEd2b;fRIwaZ{Aig+LUy1pGT|wMIu$pesWr9qeLG~+|N1V5$ zT7|>DOue%Qy@NTxJBiVMeu5*QWBkB7;)2C7D>-QSW8Ip_%VFVMwB4V|z>5_%EgBM1 zQbNyY>O5(K$%L!%`pc-V-0d4rvD*{!rqEkV@t<_{nO)R@8RC?vBi@8{bq+1%F=v&V z@7px}&fAggCo&?spGN z0o&R<7Zb9*3+;3uxU2H7fRtGTq)0i*5j)Ov&y9F^{3p?h?1q6=d`NHpO@v!d$A^fA z%raw1M4Ks~-wJpMI6NRs<%7nmU~DAj@P$v;#DXJB1j&I9i?k@<2q2@4t#on0tizzh z8X-Yffy%z|Cn;1$l1;G?h7RNG-SB?EINhU8*?)!MKRf;~fRVkNZ(P(kQFaqUY$TY?P`oyJ?5Xg9K$<-nA(=8mdSWF7|K^}5 zOKEyhrP>M$T3>)FAPn`BC>xI3q%6t&s3CFxiufZ>-zp_k`Y#OoDur=%RR5ttsDkQ@ zZG<3-^BifC{X0-Nri3(4L3}YW`0WP(_>nI+@ugp=>InpYB7y|g)zzREs?))H**<+91&P1C{GXy=j2y6#)I;tbIB_M!D;NOda-lqyiWfj zPd4_~q?Y2K>v^ z&%g9g#QPgJ02qXig|(LE{@CRK12wqO-oFW+VVt(ELs-7noP}Qh9alKy$MaAoUUkmk zf)MKwgS~`lUrR7fjY?_`nnOij9#%wz<0#(`1EaZIoG`d#E5i{-P zt}h0=c&r}Dx7k&L7vb#S;XXHG@6bBKxC7prwA2C>-lCwiH_zseA<;ov*XqQSRX`}? z`^I%n3t-_4MV-$WXr`!uKsr86#=}T2r_RacOF*Djrl|33kc9@U@ z{pvJD#!MC50oVED5o7*ir<&}KyTvNCv2#sjD?oXEQfy_St`sBkoIZ4|oX-#?U#5&J zG7GsRi__}7#55GqdxrD{mP3Jlc4`Gh$?wyOw=?8Qs`vN-@6;hzYV9^)HlI+H88)OD zjZcplX0zdXBY%u>o|o>!I4sv?OBPzVwMvoCT6XOFkvnqLL!+0F@1GxPR93-&!|DhA z)X)6WZFO#Lanr#AiMHAU5Rb*lDrua-7SPSaNNV7r$J`MxX0Mwz*CB2jLEw$8$xcHj zA_w0q>hVRDvWGS4R;({`+gE8@e~kxm}%*QgXV}p zbxeVuZ!LJK7rvL!@5!?*5=d3(;2-JV8WmYiI6e=WC6GS*ow-#YjkI4Zj)Pt2op2eAK|O$pi7uqJGNBK+LB>s$ zus@CR<|FUFUT3NgLRq1)9&hhp0#%@gl3OomJyeKgyF&+)>ZD-W;O2*H(FMV>v-yAm z4b?4@y?!>QI;u@KXhOcjqqX}1bM=QY*L~#o=cCN9SgC~ruvQK6SrZZJJqSP^_AjpZBg6WkglfcGt!2$ z_N`IZR}npMr5npD>|LJ4u40yzjp0SRF$%5>((HOR$0L9`9}3ed7F#fuB;bsyd_gV4 z^~j*e`UjzZ&fYkw4eM*`hGR&JT81Kj$#Qt(`1ro9M_wxhx**gHq{~ty>P3{4FD&TX zpzOKx!-p91#3)Br6O(JrhE-12b?Og) z1{n&2Sqg^veA#1%4F;Q+%ck4nI$6-vzsd;zo)g!$zzBG#WtO2&g#e6s<{Q!3?7x07 z)3@{b%ojtUMJDEE`f^CD2BorRt!{YLCOuE57cT<}Uo$q7M*G}?_+t>NS6^&{FV5=z zaQ%Dq?24g#Y3@E#DsW9-C>}t4Fw-1D+1pUA>i+Ilr`P*d;D{ObP@XPu#7}xC&HUaN z_h8`Xpx2e+p!Wqm>U`z>`3AfcP@N2wW)3{PKUp4KMg~)!46RU|M8Jz4aZ{cA_XmKV zKm5a>ILS2&V1%MS!O?8nKQ`XNbDm73v7u*IYF&qCu;I*e$)}ttDIu7Xay-%TNL$5z zHh!@fMcgOdlJn4*RWaHH{c1bq+xqhn68_CS`7L*ylR0bgWmkT&Gi9qQv-17lu`c!h z#=0M&ts3rP3S4R-yHEsCEC39Y;u*CP1o+Z~7I>XgOV~n@m~!Mwucx4!M#}LM3BiWv zgeZ2jgZSpcE%-A{a+a+dX|k|;7nw&tor&*ABEWH9BdY*Ij3`D-{+|e$5wdBaK3Vo$ zcl>??#`n-27QRZg8D@}dete7|dWhVAJ&~r? z2B6y)x5wD+^V^bW9;9O4RfBT0G7%Rnd5>>5Z7$M1S*rrS#Xq6JSZ)nOu-pN)qu$bg zgfLVTmqohG$%M+qrDcvMEl*9%{0i4pkRTRaOQ$=PZ%ou$Fc^jDQB3sOk-S;ioszvIlw4MrV6E zx8^cPN;`h+or2QLZ$NCp0=6R`hT<+K{`@^DE!%9f`SpDak3j z9#+BB@)xg~0!->)(zVJwSo~3%Km^lsx|(nqEwvgblV7lRLFt4UPMor+!`ywJ8hE{W zEe^xaQ8=k%@r3y3s=zGrD!>#*`D;M+@ddhCNYt{!p^p zb)93Uai^ki!}Ic3#eIB)o_pYbd|_%2pzd;y6 z{WFo~4%M#{r*yZ$cYfcyad#KK`9Szm2YbPx&n18B#jvr!fmzElMLgH8_YJ*d2eQ-- zc5Ya$*A6Tb)Zs|cZ}2Hix#!q5#V54J=(>=hHF%~fp^@fxYi3bSB2}f&tghU?& z`m>Yx1cV>IS>7E!1_ul3oi5h>ao#+F3(ijYelvG}*{J#cJ4d+J9ZvHHm`IDT^|bq( z=~n*h<{Nc$h4a(NUhid9t{+0ZwN^sFiD=>~VNcIXb)42-3j#X>>5CM)p-#C9$YnZ{ z-Z7p3&=81#Bp@9jCZ~gAWq6s4VsAk4(K3Uf3E)<26PB|F0Z^j+y@BwedAwx3gX`L7 zUX%F~&$($lv&iJ~wDz`r?l*9^O@Sx(BKoAJm_g~0Z zjb80tX60~@;_q%4LX^Q&1^zTr?ZAME z5g?kDfT#e7PN0h3240Yui;#(;O|E^?zYQ#dVsMEVwOB~N?ExX73r$r71DKQ4_zQ~dkb8cI*OjA4lrQ&dFz7C#jj@YW4Q}Isa8`7e5P=W+E(6 zI#s6d9e%eu?OvlSjG7@}Cf+|?5Rgc@GR#U;yZ5E)zJ@9C7qGLImB=*$$R-B4C zSLLZV6C~vG;N-TU%!Ki(p*6Xl_pE(@)Ba?Antq*ehTeD8IxuVt{o(G7@(F#`L3iEJ z@N7qLtqWmAOc00V#bMwk`pz!bGF-CYbm2%xP2k$e$mnZ~J?0n^xs^mLwW44Ll$vlP z40Pl~7B#^X00;(#2%)fVl>^8f?VCo`!zX><@Jq4%jh@AjOw69{c-$zwP~vf@3y@!V zoicoiGL3(Mu|?cNW;sXwrH{6>nkq=2brW3;X57M z9EQn9TF^l@qq6W-R4u3MX8C!&{=T;oQ^T*d7Xz(I2*6NW&CcVaYV_hAd1@B#sfNi$ zB3hQm7S*X=B@QDUNK^p5DsKFxWXgfK`QFADE!VLqJIK$--4szgA5K(E8Vr9C ziwTH^BI1!*UiH;9jc%dV6om}xk4W!=z( zQNUg39NrI#Z_zllM602@BXh?<>kR3kD{{%w;8Qu*Oj)UNriGnBX&<;|%j!7WM3`&4 z=KF^Lsbh67Sk!Tjq07ax-FTg^=@#au!vg{+2drm3WM8BoU9a6-Mt|rP?0)@seXI80 zkd2>r#<~h8Sl}6%3r>rakFUoKY~Ew}fEK13Gvm%Ae1y$J^-7%M~W>Sj+t zOBAZttCW?>cuDg@W4O5`k}9n?5*4l+1@UPW+?}$i&&T7~;M_NQs7Wi|y9T){y-gZmF0yOs{w0lTZC6BLL=b zB_~m=&jF+Bl1H#=MC$e*HAyO1%jqXfsTZ{qP41o! z)50EnzI1gs)luyQTZb{<&KK$y9p{g+A^LAhUx;^1k+jxEuLpq6oCjYAn-v5vE7#~K zN8gq83BOG1j2VVV!UE957nCDgryx^Bw{`#GEi4<(cu(2%`|S)DUhTI!jVycPFqVah z_{)@dNeAxr_dn3trkB4q%T-Ow2UJWPFMcf_o*(#H8^^M1*rUkwR&;2w*tPB3Y#rA- zskO&+T|7VNy&J(fvYT#*m~Yx(|Ap2Z|1J9>+w2uyvi~^f-3Y+5CJS{ib)H%Ah4+}HRtr33NqWs^vOKcOx zF~{UC4NUknnOPHY;~u?tzq2sTI>k=_%A8ur)@PMpceDBv4z``HpUBW(01a37H|%iVsV0ZwmuK%^{c((@?Ip~qOn>KU8y9RRj zTA{p)2cT8KpQeui(Scr~jL0QdnnF&J`rMFZ{MSrccw!|jnJ_SjGB3B{ao%jC*06O0 ziV1wU0XDn&9N59+CW`eBE3h9^PvG@9kf52GbRxf7hL1;;>E`N*X{oZM@2yNjEn9_& zd#z1tj=s{mM;m}Ke(dSNY4T9}`T%6|Fa<%K0f>1aPP)py5Lu%efBsf!Mxj7M!9=-O z{Wx1cp{6;N{Pb8py-{B%5m2y3-%p?n$vW52-oQlXyyTM_oWnV z6lP|T4v(eST`_>~gbgzY9&mZZ`3u&Tlq5+c4ZIH^tDIA%ku(91i)N-fL^1;w{Hb$= z>3Y~f7jxyH@`Ti;^Y+60x?B|!Fufp&kp+@wMnq@(1 zszGR9p){{?8rkVSc^(`ZB_a_(MS$ZYgOgTPEjKi7@bDFSQm{N^j)V?HxeJUCMbZ)M zWo0j<2(!+Rq0TdN>Ln)1VjjH zg*W&!wRUp-{2WiLzOnAYW&4~s)G;@0hr zUg%#0m7v#ILKc+3&NsY+CP$XJ26D|)ODzCX8WSw+5^$N$7r$E0Tf|!+B2XgzXTUmL zps9J2T2V1*8CLTOGHNu&UXI$<*>9+JfH6ff-GUU;w4!thKJpxXVVxk+=b8Zf(PLSH=~>Tg&*;~{=&SuO0Al%mC`wncGb3ED_9QbR7#Boi7|+D3YB z|9WV{_htT_FN0tiR!V-9tTGryumK{?G1qXK1`>C*q2JBjDP(@b$Uh?*VYP0Tk5ynB zf)<0JCX>_kxg_H)nLLoZ;`f&AXq%YV^_FPFde*77V6dget#%^-8?JR(E3XaW8~FF_ z?htcZO<>41xc1BC!Iu5zMKR1I3+S#8udY=?ntfB?92He83aluNacbj+SBtkCVl?yH zXWHl82@&vMEAPy-!{Kn^J5{vVCNUo zByNXJrVq%as4X`@yc@N_<^0AOsLEvs!b!7b_=TjAT&1UB&5iZLbIqENi{*8mP~>5x zxfh#v8LxyG%%TVtvSEk@J3}Y~{hhVeyMw^zL@!P*iUDEEY6*VHro(2Fd#E;a@qu#1 zRHk;It)b%mxzwqPYa(;CU9^YW0$(pvuhU|FQ@=;HFY^+x+a4_(vaMaiYvsFn`?`6~ z`$ks25ff6QaF9}B*FJpFH=A90n3N=3F94s8lqF^IdxEtP2zN&HaBJ$ShL7MGKC%yZ z2K`Dun^A+=S@v_7#slj#i;^p--74Tx92Os&>xf{E#>iLCD>?=ws&kw0&EP6=l-z85 z+rUt!@CyW>ZZ?;7Bw7w08KjQembtdOj&%a?p|w-)cbdC?tSZPtFgc%pTwitbxX1ed zH_FjTt>b8xr+;NReF5QYD4uoDxK{xZyn~%E`&iQc?~Q z9ttANaml{*@#Vb{iVpB3S0jMYZ416w6JC5SBK{D`r9ODL%f^0p%cIsare;|rWOWH) zxd~cO%$hVx`=+4z7DInbolDMk+>3~nx>{`slHB7*2<_KHr)xf{bp7R*%b7}#VcA*D zlCl7l05e$8TK|^#^6cg5OR@#QI{o;S*fljfy3_ILg=8oayy<&$vIA@Z?wNA){xtDX z6&Rn{V*624k5(L46t&7lXChH+#9Yczd>Fgi+hC=)=!_aiFEvcW1>0$)+>G~lb};gF z&uk7_fzk}bIuYgj)WIS8RNGp4cu=bQ=n4L*f6tjaF(17gIn(E7u*eIORc6F>6yQ zLDFq|YvI9(8GZ)DACZJB=Vi|eg=8|WRP^OguRaNazu=ua!H<)hxK1#IAwazhM{SN{ zgfnGOMANVubNO)is2}cxkxxq}H&dx<8iWrkIQnmR4p4z`dc>W!@!0dMpsG-wp)i+y3)E?s7H-9(%E=SE>ggkh}ba zc@hqNegCx^HKl7BxuHOzOoC#7M~m=#NI8(LlL7ZZq6yWi)kN}oQ`ZLG;ni2D5%^i-0JXvI(QdsqzgS;*Mq`IkL&wsdD6&OZ}N-!@gS{t|oXR7$cNra?}~=rhrM?9(GIDw2b|x-eyR(h(!s# z9=zO?7|tjUMFK~k&Wzs+$19OFav{9cdx7F$Q7;eKPL!AxUoA7?IexO(Dht!8h*-dR zRKGC@)15$50^sFyr-F$x8L>=Ac-<9c-M=}>b;bxg_P^^WU1~RyIA@!yD4jYh zvi}GfcmRJn5uVR{rMHzT-}e}qS`-BIv!0XsWJ`Gl%10+#%LHgp3@wEe*sOy~g%4>& z?RA<$#|I|S)A3h06XrKc;ky?Yij)A2H$~NroLyR3YpV=glqqUzL*XB{#p4stb~;HbXXVNYe7WvfhI&O*x>-?Zk5@SF4>iQ7 zjd!n&-xGHPFq%E^5gJ8e5ktalfS4kNkMDWoH+K(?OxcozvD${Q`4*)*y>BD(L<~)~ zU_igxdEey*)H>+e9MrnZJ%cB!Xf>@2j+PDr2jk8Wc1mpCi zfooe@hpNBIT&-!m(lNV{|i!Y}3B?!rh`J{nU1E)%Db?JzQ z0nv(QRhsd9b3h<}bF30M0v?!Zxz*b3D=i(3`)=g>YY1w7(zOEtEFV#1IHP0ZuzB<4 zhA^D9{Sp?FR)8vn^;mE4P!JEBO3h}F#DCi8+~iQ2e>LiV(;TFjHzc7dNmIyZwmDlJ zn!{?a2J=zW{tl;={PLjH?v_%oz?O6k+47AnxXN;JCJP0jz-Yd#R2v2c8=U8GWzKxi zDQ(iv1A`#%UjHrRu`P8M7?3F2=Ulgl8!@wIkKIApNeoaf=UOC6$Y|uCQ_67OfToI| z#+l&_PFgq5=jJT=4rGzr6xft(9)z<5(cM~o1tM$S+XA=rdaxJ7=$cL-GNlMh#v&J_ zu)+T`9Sak=b-0bLg8%-}wc-k8$94|P!?}lV@0Mbl;u(4Um~64<%ii^O_N*`C*`*n~ zkMs5&OB}%S_V!sQ@l{NLTcmS_9AC9pFYHzGXd}fOGH5Cow4UTlLi!7gE^Kg&hwapb z!gGAnGfs;BMbRdgJD5$-I;uB5_lhF3VVo(^J)KAc-9)qQx7p)jc~DK0Ce!wmfP|s_ z7!?Wb$4f-fljCUW|3}q11y>gJZ@QC?ZFj7WZQHhO+b6bd+qP}n>ezNB-~XGLn%P(9 zVqfg4Q?+WXcm3YynUbVa!i@HvlNe7i^D$_MFJT2lxEux05(KU8SIndk-EvSlGB|lh ziYH=RSxbtCk2IKTzC2(oZR{{Mob6}mDbM7DFq~&2BfUA`3KdF`tA@XcOs|&i3Nao~ zl@{e%AV>wV=QvUza?9czHPh9hwu|oX+ppCGMR{A_5FN4=Mwn1;pXa`l?PXgvT@5JP zF}wo&VHv7)3wf_tMtwd)$XG_B<`WK8?N#Vk9HKVoP+1+%VzQ`Np{L3nKpaV>T<
    9XL* zTeX~|_tPU-t* zCOVEqwS`M3n31sFUS!F&Oj?*o>aXT2x1nlCGb%bVM__zOGj4m6k5FAV4n^%ZTjQHF z?~RMu>`|z2WT&}A%R0O-z4=?yq*hnGdZZ^Es2|cH1U1zA4Jm5@G&D0>)Ln?I)}|M< zR58Ws(4_O1F?B7Iri!vnC*P1IfbQS*};KF3LFo!2yEp>%r$0fqX+3{1{pDovCh6|0~hyJ zJ2_^^Ga{&FWU^(Vb%?G|+4G|`%eH1W_$JW zbWPvCW6kH-Zo57PXr^LOLsW|HfGukD} z9sih!N#F53epO&6&b0`^&4^a^iJiSp2`*t2z4(9zz+zySZ}9ZW_%BnH$Fn?Cxz}5`gk;2UAVtig)llBoJt^IQ*@jS2of+dl3|KzJkM2lGFObip=XKw z669azJPB2DwiHMA`=PI>S**4D))LgBY4$?7qNdRKk?O%gHt~Xx-JR-pnF^MSn3Wv~ zKb`RaNaha4;QnlBd%ud+-+|@e$?IzgDX6tU+dK9pYc=y(F-wTbN@^#}oj2stNZp7x z3f~b24_ECv@TpjdshB??(}tqAS6P0#je)Ecc3%~4XzZtoDuHAUbHrdirgADBN_+@m zt=HV^%FB6kK<9-U<{O^5k0k+{HzRB%Y;BeRE&?1D-h2{+%pzeC+QXhC_g3%|>#z0t zG{a00B#*C$TKc%sRj};>2L8zwFU zF_n~>dVD&t?O@sI$+uJC@g|#x2jw9;kL2kQF;GL_gUSVu9o03jlpp{?Ob0p?rLgHyb zhvniJ`P}yw3-Ucn@&cJr1vOMLaRcN5Z_*s&b}15?W9WiN9Nigfg^oEEwk1=KLQV!1 zoRzL<$gIIJ9e`ZM*>PSo7~B{2Cs3HEb^q^&8mHjf6RIPNoVkLiJyRrmA_fm8aD&nQ zTXLO;yt1YTB1Y{+%Yi!Q*vf;3(X9ZSE=QpuAlawg3s0<)72+7pHq2UDW$8n}pZ#cf z@LK(TQOy32y|>Tkl)Wu$x95q^i)i^}400Bx{*xD!qKNl`kE=)ghy3Z`miBT`ZBVmV z7-12sthbfmQ;u0nwz2nC<5C2nnA%FtrILgTpC(h6qSD^v_Elh~r3*Y>qZ`#;dDEE- zz$Ar>aV2ISIBS~nsgGE$MbQ#qkXo^hI|Ellk8QD z^XjcXcU{H`c7*909QozHm*bo2m$r*67~k1F{ZPPuPPEoB3gjzd04p;jWJA1QQfb3- zud##()sbndk}m(3o#)l*4ef-2GV%pJ)bXLG`#VpaPvVd@hiUc3%oHvt2=!^^!C-Mj z&@54{oA2cNKy4BJPea#%dJldz2ecywAl&#=#j2Tp5Ex@ES@s3eNDcor{jLHI&M99~ zsmg&9bV)t-892@jK%95i&RoqYb7sqGkW8%dm>UBx6eWF1dv1P%9Y+4sQr}Q8Gl$13{lCN zVaRphXusLr-tX7@8nSJ)CSqoUH2NT(teKTbqkk4?6aRslrKX2ZW!e;`*0gF((ZZX| zn1q~IQa(F?Wq{kF&tx~h)L}%)atr)GamITiz{YnlwSJ&DWd>%SrqF#DpxI}CDe@Vi z$-E#ZftWY}ne7-J2w5>N!73Q+Wz=(N|xnCNzL-w zZ1x4RQgn{l;gEk(h^d-qa#9>SnETnN35j33^OUhQ5-^(7fPfl*m0BcOEc$34;m`Uc z?^Sm(tg6|Y3iF5x{+X090l#<6ShExxqGDthEqyZrR5mLquuEz17uGw_+5e@*CxBFh zfbYpx`vNUhD$o4TwqP_iuuzL`G}fVW;^M-Qi$hrD(^}VMydx!9sV7!d!tmU@1_3Vu zzokcB*E2&(vqAJmsw@?`GDAJH3q%2x4IN6BpXT9k5i)J`g`E49m0I08nFB|@Q|Y?+ z96S~cXm2yoz+}LYvf-DdXtSqCNXhjQDeHID8ZPbknV5pLn)G!+eHUT9e#dwem6vnh zrk>Wn81|(J)o~@!#LHw`pqEcHlO2W5uz^^|xwLVF@fZ9t74lclMpzog`BS6^00p_j zW<@MmzK6;J>lS#?pD5yCKs6_gpMoZW-j-euRoPB;(#oyN1u^5X%CL+HxYardev_2~;0t&q>e)Xq;ZsfQMT{yyOX zIB;*iGlS;#R_s2x1D_(6&hHG{u8r@k_*|<7PMAJ5#%ZIk2v>cd-lul3!F5sV6X-lv zW7ILY-%1jXNRt1;9)o|4uyxO^6+E^wV$l5Xtcl-f&LY7*=78F(i>i4pnINr)@z%&5 za6X2s1X|RhF3@anwl?VWwp_L}Hy~O8B6Bo*+IfR$eN8CO^PrtA-^bQ`qG=sXKm~ivD;pi7EX@V0w#yr=sDJ)~Wk4M^lWjx=kAkjh7j`Hk zJ#_WprUALiZt1?gFFCG{*^@Jx2I(vvqED}PXB%`{0J zraJ`=wA$&e$P?Z-WVIA4)K65Gw8G1v<=V)dt`0J-S_w`0!X}cS?TN#{hUZgQAmqFy zy%dEQX97C*)R$Ec;pHHYB8}95@zDu;S4F77m|4m2(i%39CooTHNg>JvtXtdJ218^l zU0=RKT~D*e-B|J>EkSIB7QO+Ad4K&W3h-a-#)P1tgf>qFW}lqe`qie2hb1&HU&*+w zWl?kz4)L{(`4v?FC+;{b-2-bm{05O<2Bz!{yAgJq2(K=wm2Fz`K375j?iA<96%2U? zpcPdGlELW|YON)-{Q{sO$a3|-F}dGZBXSt|eTs-M`8{8%ZQ6@%GkkvyA12v6=5}@`%WmG3x7)3b zy$;CcTzf|P)W1}ZobY-83VWXLyAD8k-d6-oG~!l32W^ZtgG&x@U+9am2`p9dCYgDr zLJPtGYUB&?9dRUGiIXf0SB;knR)rv`@~wj@SyH9e6jX_WcL~VKP$}P|c=g2YWn1GU zg3^|(_Y7v0hg8#IGO%B^>b+n=EaW%UPAn11rlpYS;vF*$U8$>piG+nA+@?nBXXE3G z1L%(c8^SfkGt;T=tH7+zgwooHMwV|4rk2L%e?uy0C5=@?D#q~?Y44gwpja-9xWk7c zxHq`Dd>h@tYqsEZ{Sd3^SFCGRMk^d%SDMdN!VksODm>}6{2|Twr`gvTpHJl~t~~*p zWdcP|yF?G(c~w9CqqneTf{#yByc!K{&)DrYrDLA8HF!!jbD6)#x|Ph|!2gBlUD|B0 zpnTSp;4A4_18F*B7K$^9@`dCHwp9O_bs6}H`>Ey80o z(jODZCgIF4I*Vo8o<~E@PS7~c;+F#5C)kiHADaL&Z{%UWKd)fgJaiHC+Qav@RepLX} zY~6U~pmO10(!PTkDI4lQ9If+2@?WB~ZKvbQNxZ{dX%1o``tH=d_HtVES(MuQgcW5> z>}u$d7Lc~U6}#?zOj!X$kpff*1@G%m@JZ9;yZI*m1kDH`fnjrJ7Q&wLby)BLItjWJKEz*gG&#R;Iqy&BFC6HTLv^6`vt?Q; zs|sdkg&Z8uM9av?#ze!1fa~8G;pLTRe!w=roZorLrnXL1uZ&ag+9HA|0(_WTbgvWr znWXGeHuGuM_jB=bIDhkOlSu*&uBZWC%V=hj>?%*ZCZ@;>-K?z_DGD8iU7#}9nXk zQ>osPbA*Z9W+*>UPZ&S^4%ny+h0+Wc9WJ-vSYe9TE1vV*=_YI79G>I%g{Mf!SXIT;6I#9lfQXdxMJVOJ1n#|(nB>XCx zmn>&YEf)iTbHH)f36bIc0UdMOK5^dc*jd;d@|8PR7n?-IILGm=rc_7itroNd`u(eM zYsxrf1LL+8G9hy6>=g^88#ETS+GF<_H}9ymNEx&p)}2MEwLF87EvzFJc(r`@#ftC_ z`1RfkIsIC1*Hka#+D65AfGMF#|1=b}q^x&G7s{0(=_n{(AMVN>qwdxKsHRB!q0K8#e{`VyM|3;DkwH4sFL$;`e z_Gn)q>?i$87OT}@Zw9ll`cMU8Nrk#C>u+z-GZGg<_M|gsVQi2vqTB6k&(RJ;g}Ob* zL3DXS1*8x&eLz8=epQY4IOe~>nB&wskD!#l0^5jDyNEvo0e_5qrhJ^V%v|8*+6Eiy zqE6;OiEZ+F15D+0CrM@i67B$}WohC`7>gR__zJmVK3_glJ(KJV!jXAP^aH*a(|rAs z!wSp4WvA9qqfXyz8fNu1ESeiQy9MAXh<-7O_<0a)yZpnjJpTe|nFJink6~ah=$fcW z#lghza~s_#3UR|A6YzQtbP~xb@rX7zQ8uJ0v07e0%`kBdE(mYGcx?& zIbupoHu!Qzxj?D0OA4*Iyl4wD3EC0X!C`Mf;j%TjrLWaN@qC6nsCdExx)n^fX$TdW zxLtxb#Y8k-+kiHHkwy>!4HS^DN#g{4Z2PJv8Em=+`@#NIHnB=RzeZxnDSX(;_TRm} z8NMxvlf_^T3^n-x)kC@t&_jq)F^nyRf2us1=PNwuKAp3zu~Q0HBap`vsLiYE=pi+% zK9TL&0qN%i=Y5musU&Vt(|oa~cd5#!1t{1!l}Y5rd<&KU4}1DFsTtD+bGxe%*!eVAZO#4lr<$`!4CGtl8>6fDue1nW*OuK z&Pnz}AluHzw%2CO0BA6jRJGZ@9?$mU`;ZZzX>Z@}}}^!06^p2V;TQY?cOAkItxri?$OG}&BdtPxY4C9S?}v~`#M zYS{V-@=O9VLFs6eI_YT3bgdCjvSI{pUD_4gM)_yKZ8@{YimQeW>p*YS4yRx!f9(md zWEV4{mz80cZ1LztaU3N z_O5+^d>>z{er;+UZ2fquYpn#9e-H zNmyW8r<5-R2Ea^LQ=4-JwYgE1`7$)*Ub$R=A%2;K#2d>UI4r)^&}Kr7eHNT5;J&r# z7|toaem(0;j@XqBXbx>h7$&0Ux6_TAY&|xF9FlHEBD>6P;T-o_4Scz;2Ffib@*-E$BfMZ0;9wN8YfMLucB<+zb%T%2gLo9 zo_dP|b1UswCU~pi@Q4_2WkQKwR3Xnpys}s=h+nEwJ}g}ygsCu{L-TN7OIEbGPn=p? zD6(`YL-7kOBz~+y?ERjesz4Q4)bdJzJmxL}Zf9$_wtYF!j0t*IXA0M&U(M=$g%=!onI`nq_ z@m1C?@1%E%)1M_c{aZjUK9`68E?zGzI6B(fFS@AxRGx1)6jD=UtRHWcxErSdbzEbc zPD~6Kd*5e)P;`fli~)iH_S7F3BVU*aCEanC-`$^npYPo(PM$!I+7zwO8T9L5(v1Tw zpKMxi(PMpxkJ@}VoPM|pY!LYc<9bk^<&LgvKcHS>EPBG6vqg4+g#>qJoVsc49nXRn z<$o)sxx2F^NgSy|10Sp8!j}PR3iG+vh+CNiP zDBeF)R!WVMexv1C9Mkney|~R?C|k3rp6cKM$<@?^NPlA!FRe}hJ6(xXl41WQb$&2O zf=HV4pB!ArI^koj^ef$W1%1k9ZaPA$4;e#g9WY&aByW!L2WPtLAB=W z*n<@C1jv?ozuVx(Xflm0&}epGrhg6)$c(P`zs}LGr+*&oU7<>&UP`mnU>I&e6()=? z0=*$0)uQM;{%YtyaRhWgDtZe2h`0iE;KM3~EN-YC(S!sJg@hB@Hf5`7)QA49PRc@f zzv7aD?MdkYZPzf!?r>pzp=rph6}LlIAx<>;&rsK^i5nKM$e>`C|rD7qzBa7ZbqO>8`Z`a3p;&pCTlPi z^gHg;5UT`0FZTS;!90u0Em2PkT{Y%{R9b3-z261_Umxdx58deBL=I1adC%$u(%B2w zv?@0Lni3@AQ;u>&yYpAve>3@G{bN@{Ik9&vHvHxi@1edOE1t@*4cdM#o_ zTua}=W};e^NnbW}Eshdx!LwdJ>`YZK4;C5KC$z}m;r(P1Uw|R zuB*ia)Yy2Qp4i|))@tG0=wX?x5Zgx;?u*Fcwb`aq9Lg@qTHN9;(^|~AhT7;`f1Z>8 zI2MEV$*<&vGC!5~+e^7b^(2K9raGvDRIu5}M%hY^w7Jg-jOiG(_pa(NS5bGfORg>{ zFW_344Ul~4@X;F-j35bO90Hu%Ipq$kEhG_cy+M{L{U{>qnE zI?!F7T#GbT>z*o$uC0D>HSq2_!#+Z1C$~$k9Lu%0JvjxDBMA`^;&fbRdKa||`5E4j zyC6B&RIdM4G6Dvm`q5`nxJR#A2v@y8Wsp0c;f_UWfbM;A!yK2tY&c>n4HK0{f+ zqP@X~A$6!P;6EvQJTGHVAX?DCcbJsVj`SdQo+OQIPeaB*763uIt^)pb>CJ4`&i%N& zdwwN7?-keF$5(SdE7st^tqblL?_vrc>Bz$ZSoKpKQ}hX*y=Q{Iiy&?Br3`Zdb88gt z)okVLV=S=8OgVVEs33-`^mHqLQxtP*E_0_IbtG>4E@)tGV9ISR`ydkT;v71L0n;A* z#tLlz(2MK!Yk%MBp$TlC#cu>NR1**aGZ?LmcL6W_j?@r4>Mdm>%MHCHIh=ChPJj0R z`VM+=blK=V7*KT>irc<&V;SDYgL)xua~*%3w$&y17(1|S-wLSWS?mI=bNbPf9!St;qj~{8^rzDn2Iq8^9A0j)z_xa9nx7Q z$R$}PB#Z+QXMYO*yzU?p)^xZac1b|L&MOnXjUKta;_)%?LNAdQ8dctzl_*kuNiiZ} zKH@;L=U7q#pk{3Qt%+b`mW;J1z7L>btC`m{mQri!qf3*DI%B#?lt8+eEbjBSE9Lle`fR&>q3t^-~kK>Vr*N>=&MA`|ZlL zYH@WDPeH2Wn2y3VlhzttRjm%gc_@ioaN3qxZ{EP{jPc0E4RPRY^|M z5^TMJI4F{Tl}8?>)Z-6pntTt}S-fS z(|MXyw~IOh^0NNk#}_yz8#Q;&F}}dWJ&V4DaRKjABGz&_L^S>qmLs2wlxZS*?p{(3 za5VPHuXtoCZ7PLr!ZpDT%oGSs$gG6USE1DAds;Z)q1Ae#*xOm=Dn(bIj>ryX?C4vj zh47bw_#oIE^06&bOdGU1D5;e>j>3F6deYKsTWUGaPE5GXR|9Mt%xITv!9STepb4{$ z(oF?{6b2F{gFDp#HKUF&eof^HQo7y3C12C{40Qf2{ z=huRX;fN7|?>_yR-E6$eFnAv zfj*^_{`G;*8iuvJ!|-64k|us9U=vi}!3pwMwoBFuJ7^PE0txE@lNXRb<877N6$iDG{$;1?zRAqP<-EDBOB4;-F@Sgvyzl1Z zvHA>|lM0%2sbA5Iw?juT4?+sk8(|K}w#FIuiCDqWRd$S09*3vKZY@NtB+7!`X1 z5)M36fy^OO3zFl2^tT){GSLZW{9~0;Rcl1nT<9t_3CXApokEYY>YVsarEK~>^nsj8+YoW;r%IGHh~-)6OBga(ujLzVo~ z`cZ=al)6gCD0NL|=XS?600;|80>N&r=8^L$WbLzpx*4Juwi^NzhUO~;rB*A zD+(C*>MOa!85n;o97tnfSq;B`MN@5;(FYockor7iw0F(Nlu-T1$eL60o#l`S9~*a> zjB0N<*aeSFe0uoNDLmBmeU1+lqoX`g2D}mg@*P}FGxokaf&MlRs2@VKj0hISCB(I$ zo(j(p7~inNL#MF)d_i`%qJ^E2g-cJ_ zX6M6nK=xlxBD`{P3Bo1cHBB|tYH>7s)Y^t@Yz=YhJ8G4aRrdCMDRkZDJOyMvZ#RUU ze2zE%3JIzjSIt8Oux!UwVYjwPuE{F85)?OP-e$Jt?sfPI=M9^UUHVJO@wp)y=xOwm zA`kw?O+jbX2@mA{7q@f$w2{}(|BFQn19bXhOCMi~DkkmO69@BU*j|@0YF+>SD0z;K z`v8+|m|97pf!RyHnn;;`Pthy@=W;~sk=k zA9>E4hwOcF<{4q?*t%ls+}dx7iS557%ebAT{Drg4N|W0-h_c>i+=nhHjK00L1o+=g zz8}#BfCb^}O4nY_xB9mMrOQ6s7!J^YTrN;J>`5@t00&B=(|^F7A_{c@|H;85RQ?G` zlvCkl>gigg9ddWFZaXF#CgBhIV>)8m{cRn@Z`Tu;97r`Gtc5zVpTHuaEhi~(PZhn_ zGHh(1Gh9(S^!2xGh3mB-jnYG-{axm7W}^m3woMsl3+>(^XNTW9xx%UX(-bv}3TW1Gh1}!Vgl_ z3Sy;@YzV$8j8_XzodGhZTUAYnCqa`vkQxa-_wqOV^FEQydqb3&;qIEX;wja6jEf#{ zo7zMr!&@Su{mgRSO7v;g!TQjk>|JuS^s^#_jM3Y0;}N4iMcRC4+CE$C1iFeDv!7ib zb9j$)2pf+tm2$n22#eW`LFoh8%4zHtca3UOCFw9`DM5trH3$#?gR)TuvJ}aO(Zc$b z!g_|4;1{n4sZ*=D2u5K)Dqc;IFlP%OPOH-lP13qk&YSy3oSsi45VAJBVwUuv;s@nEXB0AT+9sIP>E6-|u*B5k|lr+c%y92by zqu1z8m_6nliEYoy!^6kP$;rXvZk;Vpf6d3fzuG`|46?)CgE@1S?i?UDZP4nhDqm2#>$A#echZ?*$B6I#s@9tdGAkyd zYGbJ~pATxBfmyJdg*HwNq4vm*#V(ZTOcK>`o0R1+@cE3LWeQEMl5oLn&aJ;`GnHlW z3V-$}+1GH~kQc+V>pe9j`Tp#PxyCJjfj4}%(8Y^DX+FL*RG(Wi_BsWipAGbJML5E? z?GK(fQBpH96lmTg0Ae64E)1{=RG5nfO{B~ka=WP02V@u0s3tJegsx>B#N@R_3s+j_Q}z0l`* zJNE;|mQO1tmd(Ml;CcbCnM(K~_XHJq6(!Uj{>k=;z5^r*5x4jLLn~KHB4Vq*f862; z3;%kIL5m>y7YcRRWM8IeF6e|sRPXR9^h5obX=noiJh8VBk+l@A_1jPfYw%7&3am36 zeoqK^JW`4Q|I<{T>e{4(E)ChWO1&fIKXsy+Jsq<|w4F|#d6)%soxtTUTQ@Je*TpKs zpSC6+QI1*V-Rg+eTJCG^7~%Mn89G9+FHjZO=D~QEZ(_YXK>B&?`~#sR z4S^}FiYlz%6AFT;!APec{pqO2yd*6X}28|7nTiD8Ptz{@V4%H+gJ#)2` z7FZGU4?v{@`Ap&KL()pFLRs$|i_W16CaoBR20P)BTAczTH3_odTHM7l?XW(ew07cL zCsh-h-9?u9s-$o-!0geO>tyzC+>e54jG6RwF@L)Cet5sR4dv`!{wz!8N;&So$BGMe z&!21wfbhMO<1aTy14{VUC7tCSN53x^_)j>TS;Wus3(_d0DMTJmRJN)!ciqi-rvJLM z=&q-&l51ua|J?n)7006Pg+y`L8qb1Xmb;t{nq1V1a1?2xB@<96IvN_kSv#hFGm4pB zA3-loYr1GfG_!=tn74a3uLw+S+YIx3u8)WSuwa9A%r=(F2Axi%5)+|+&Nn#wR+Bas zJXzfY9ji$yl#kRDw?#Sr{nCRnCU#?#SRToI+y=tl>yTP!&;DnqH6=Fo26D{<4z*PF zPdp~NKLj(0qM)-(uO8i~0)Oe`Xrj0Y`Pm$0DkE1DB0IWDTo}U%LNa2>C~eJl0Tpc) z0EIkOQ=o<%-HA#KemXvg0#@A77*nkbY6_GQWO=SZ$^`K>P1@{lrm&A@Qns8vc9kf7 zJ3g4M8~ieX5FRz}at;;?rem%wpIb0cZ|*Yb;0(T<@}UHCRxQi zBG2Jxa=I*ubCEufNl2qV$E+HKRAF=wIEK5uSnFa@Lx_-~AoL3~s^M4&{9z1ljsV=V zrKu}U5@*RJU3AIjzkh80520zhjy3}9F{fH%Zd8kG-5lZBqTZn&tF$6kUnh^ZfQY{5 zDqVLdV=2lR!~qaH6M+_*uc!u0cX0RA`sKQc?4eC4YY4D)t_OA3s03HHHlnigjqx^~ z5CgG%ij3t2r6RPm$pbmt!i1c1jk<0u*Z^`d^0>^nzdim6cQoJAEdN%68%K`T=`7>! zVK&zZ%7Cb_hB$zLuJsG{p_?}I0R2$Ohf|#$EEDeO2`I}Wm8!o%LlN z4Xq!|Thzj-AOdl5NcGic(?o&cmnx5fZ&=)k&~;_J?Fg86{Vt`qk<3(yM{N3C5uzp2 z+rVHw6F}RN143q}o)bPgK_dX&?yL5=i@X@hC=nYeYtfActZY~>>LKvw7EswcrHpYN zASVVI;ju#a7(C81ja$!qobWAsKkvHGi!Wf~`h3?BkhI)%Fnjr?Sv4eRZDXdhu!cj) zbj8{Q+p&HSlXKNE^oRR>c^{#$^_}*KHS42VLSa_83KP|UKQ>ktZJQj*#e;;BuQyNMZY;WrE;-wygf<_PhI2Ws8lN8 z(CbPPTt)SH*Q(K4+vVJN14wK>k^@U7io!V*f{D$0j+cX-!`%n#Dd12KTov!@5+-55 z`n&<3z?1S9+w02#3k5c@>=#ASJ5c#ReMG_8gpWf)1KGl-S?`sE4j_n;mHv1^+W)4J zz=9LCSYBC(Qe1_3o1O@iQIXuY8SFgp2r!TjuWg&Qu6fi|?!IZZy21Z;$@AsBJ(A%v z&lG$#@#fd8`9iF2X0jpz4b9(Zv$n00{_3&aDGH`6X%pjmggw)<_q^t3d9q)ixAQuR z?0u^jIKFu=7f5JB4?y^{`RT;S=G{9mWzw&|)52eZrH4)D((uQ1F&_>iS`xhcdleGj z&n|W^mTOq$Q{Mm7r4=(ib4tEQjFlkFd%6Hi{tD{WOv_xTN^BEUwM?#MQ2jB2$DhX)rjU8|mWjU)UejIZu=md!C&{+*hVtpq zRf)YL#js+QtnaKr4RG(^5XMo?;P>xAt6@IW8iEk z$S98MWjdsuoS0-d?)qmy!_krU?psO6Giy2g3cKMZC@xymPx%*8K1VQrhLf+|hE{qP z;sqG2l!E#t4Oy)#a`wAerM1%jVhWzyllw7P$gR+3IQScQ?`B zF$Y32=lgn^iIh>|>&rbyQCv~oKOYbKQ?|T#@sof zkm-N{wp71g!cj^<;)Qhha-Vzqe55LNY$x!^CTz%LgPEx0#y@@lp>tpI-2=W1MmUb& zbk4bH{--hOsR-49m7VXK`fsv+ z`tXnZ)|G})A!eq6FsT`_{<7h$GGK_b-c$gBKn*uGLNZGNn-?y557juBPMa$v(f|TC zW!gmJVeB|@DePVgOL!Qj1vYBkf^cX3@9V zCvoI)c6!X+LS=$q3SwhQ<+vnm3)aMc`S!$QelCN6B{S&)-TCS&S@J-^b2X3Owpw3`67HiF!c8d$2`wp^2Cz%LMU(?M*XgUR<7tL{&! zhe|oI&(~v0v4%)InQ6!5%D_NzI`FY6)2XDO%4()Cb9n1t#uooOtA=f3Z*bPmNU0|&*nsA&^xP9uO^h*VcS zzXHwXn;kj#z5)I1=sFHrDtve7pkRisOf=WX-@v^fS$3^lA??XqaOzl+x_YG4@@gqk z$10mFLb^&4Nd}ktDzaO1_D{_W&v;z3{DC%cd5}-eH-lmRo}%tKH1~ zm8GR-f&uNOs4X_P%XY@FRD%k6&Za`x3S(492YpAUAJmoLT+8e2>UR!N?F|A;t2oo2_CjIKC>NmoO{^}I+{Jz_ za68XH2Kqk1)gRS53|KXss2@=naz+vP19hnN;TB8%EoD zUmGd%w0oap^-%QOnpF&W$@hmFOoY+a{gpBwi=u7H(fU)O_Hn`3R@JOFV`V(V0E;K# zBEb$3fZBQ-z0=-(fT6e>@$GEGkFNWhwfowMi+p>aVg~R}HowKa2k&b8JY$+UJFsTp z^^O1R{$!iWbW63^wQG2bbS#JGwI}WaIh&9-V&^OGi$UZ2_kV_<|EWql{)fGNuX;hJ zD9d3O9;ZFxEOn25nAm)pcWQ?3@Yeta80MFT1HKPrZFMtO!K*7)+xjVKs`k27zC%HmC`f1n4?cT9~{FLQ0Zm2(haGJ@Rd~>6d4kVns zhhfjn=`AkusK5)E!Xz_}iL7}`u)*d?_kDwlQco%(q7vcnbXhP7gI885j+5;n4m@i2 zNk+|cVr&0Nxsn-@DWo7*CNkBZ9FbD)nSQ{?&#jV{miMI5s+-AE4#{N{B7Lk{q7KLn zQSrA5rN&;*7Ec~5F`&9{LlXbU(S{XLF>Q1@vvj+0TMyFW5$`X2MfmyU7Jo-wvsdr2 zE}g)D-?^+!jJg{U0_#hNl3WqpA(D#54S%=;|3MbdyzfX(7}U$SvHPZARB>#}M+qnd z$m|frQNq&U^W+J`Q+vEEOPl1)mgiFns@-xX$}vt+G?oU0FEtQXFR*B#lfVwg=!V!7 zp##tSQ5YuCgwle6gDqPuk!k~wtRSgJ!xw#qrW`E1q0FWbmGL;jW@ddOFz+hy&~+)d z1JQ`0(v`=R%mU6&JPUU?*-VyNFup|recukZzoE8UK1)_Sh@hihAP*X9JLuubpj;e<&I`F(Nsscp+4v8tcR;|#=K

    Qk|AU?&ydIYPR)5ikxlFhN27Ec0~lWt zIyS|2OYCZ}MmLMd*JLArSvQ%H!PCtkElAvk=Q?i#&7@rYl>#1(D`=|$Cct9}BGpA_V-^#mF( z!`J;Rr5BHH3uyJ8RXt@n(lDOJVeLXrJChk zMkD!9<9$|aR=>93PGRzTe;ucdTvi-pQ)&oIwL~s?M|(GZjXe3v8kgO9nyRf!6!!gp zd%<$G4G@fIQMwd&8W1YL|J~S`^Q{2ap{|7`xYhes(OUXBQ`Wi+{8#g=`kzVz?fKh} z=9zlPh8SFBv!4=J2`y%3i`$hW%yo}d*2nwfZAMjAzMK)wS>PT|!z8kzF2y1%#tTrV z_J9_U9m~ku+TMPbA~MVM@>(3MCX%hQfSb)|&9FNcw zOhw8RaxqOsjFQhZC=12)49a$)|GPD(8i&KL>%~P|JW-E1T_aZd-UsSzga3IkZEj+Pp+qNgRZA@%i9cMDJ zZQHi(Ofs>ZNhWsk<=*@1tM`6(RiEnWsy?UpUTf{O8l8~ug7DO%ZjhvEo&OY%tGw2$ z5GI5~3lsyIQDsTU`n;JktOPzV>T6tt9x#Qnw4Lvz4*tOH-YAtoXDhfyeNnrF%t)rL zfuKLj({t(ZtTzBck$)9ZpfFmupL!R97GI?_RTp$pMR1~m_#Gq~Q2+s1zCyfBUor*l zezA|n(L7!&lcSLjO}bx-)VNj+az#dV!Xg0sXdb|m$N8N^&5nUOoM41tL(OKbWN^zFQqX(i zJShOTYQDpAL85oMmZ=e2c?h%O8H9Mgup@(J!X0i^o35LcrHDy12e@%OZVa(ud@@;v zRHzy)Y)T%CFl++h+h|21I%SNMtQ?Qtol@a|ouIpAmonI{oD2fGijp~bK5;ZYCMQja zTps)i=fw6ZzK#)0KgeIcsa503MO%3Y{Wai_Gv)0&RYHKu6pHp9y#;6Vr5rk*Sk|$v znf@XHLsXSAn{!`+n6A0S6Bv#2fpwt8)6uWMa+6p%&moKxM)7tmZF0}c-OGoyfk>rI zzdmfPvGoimpU}}9hHM1O?ll1ev39ff2c65W(D9Aze$@;Q`Auy2+2GyPVQt4Pgf8IZ z$0lW&J<v+Czd;zpbFr6QBT1Mc zj@bb#wA{bEmT(Mm%QA2wY3zRIwSflq^)q8qqr`scHI{(H(L-gltqNbgzb|jx z0(&?4Lto{`>d5X0wmWW_+h_$?wx|2ZTjS+u4vmpw*g?nY#wO89DMI^P))pmzdZ_&Q z;k0R|Kj)ZIzsfAIq-;LtV5U8&T%!Dm_`OnLB$cx1)X`H*B=`s2B%+k%YUJ~RFJcU* zFcg9>?v_q+L^qvfiXjkPGk>|Ygyku@U2CvTsDJLf>3Xp7JYaqMKslLqV7Lnsw79xH z0(C-2v;z6$N9SM3?-9K**P0Eet~@gGHF%O94YlY_NA+{Mo_99r5VKM_hhiI`u!)>( zxB>*jCzAW*FA!NnQ6DCPX<2t2D_s&gN+U{9$y+D69&(E-oV3aFYU{}#+!;A}Mo;Az z37yg-{@GVCr@nDIYyOn&#C)U}PF+Fru&-TS+X zb(y)yoaEH}Sp86zJ%t{LQ1r-&wGKCN3;c7D1o>Rr?k{TJu~OKVUSyhl(~=FfrMyhp0JB=>rA!b0uh^}W!At(CSpEiff6 zVj8NRWCyN$HZ-o)?arx}drkcI zKPclu%Pw-A8~G#0kVLJfsV-(k!QG+J4g9dp;jc3Foh(hH@$85(!Fck)lI$rU$P+Wj z*nH4<-D3z%!jv)3Hp^d#BVJCSAf%e&g|n}TZB@diwTE;B_9e$mqH)#!r^VLKlAgCo zW=uU(#@YfT{JrQxkWgbiE2ftYV5(1nFhYI4Fe-4Kz@wuO=x?-<(`k)(v$nItes=w& zL_l1&oi1l2$8Jh+PFA)t&mlcWZLxh)Xw#?Uq3QVo3!Lj>aR&mC=vleCO;k6%Uag@o7U0AMXMbPTXmrQ z2L(A+dq^4$+A#WrCQ(o#guM|P*kBF{5G?y3Tna~Dda4M+$S+xWC zdGa(ZhHhMQ*(2;3)mrzBy>eOpQrcVhb_L0788v$?Oq%c zkpeUcrJx=1S4P!3(fz9bATx|Y__tS%khxY&+r*YDpUH)cqtC4^^^HQ3Y!A|I&~cJZqB8aBPt~t^-j42vn71L7MTJ6Dxt^Bgnd0*7 z(`#xoOTEzj_Ka1%_QE&LklE#g%LK#EK@)QpZ{F}xDS>Ht{O{hIzas`^`7-r;KtcOPpa!z;N4PW)mRt=UWys_$%V z`EoArZW0%kZ4ytygh&9K@zI~J_+FxtrDV#-?7WrfBiIJLe0?*1B*__vL(z4m$uX5~ zxz?v~qah^<*67K5u(Zq6j$x^tWAuz1RGmI^N=O%_8};Pd^fT{X{4=)M$}8!Qj~x#e zUBrtRFPjHNb3tn?(PWVT;aFO&DB&+j>#}ufDtAVa1#%`7cb(v ztkZ?a*7#z^J(q2)TZ@y=Cj!+vc9Cq!SWJK!D1Xl)HX0ijcX;05d0g2kU!8MZAiuOa|Dv&XIleKuJhAh zVTr=Wx4xj*U(b4g3yyZenkGEz$J+PJa9Q^3>7%VDUIr%V9DEs#>ptYbs{D;jRM^)UTL$RUP zuGuqb#5T^j{_392q-LHnqM;p&u%C*_4rBHriy+K8O_l0ExUT?%llI3cyO&u4g(_pN zvnjbfvlYB5WomkR(-TSzolQ@4nKZhOlHdxfFO5mz{PFC`A{MbJH=~k(2su0Dvh2#H zGaBW2Jd=mLK^otDe{o*^rOsG$XD{}&go)-qw-3iu0QP*PO>G&4FRM4x3nObIHD1)E zFU}Zrj~PXC>|=D5<3*El!U7}5O-PlOXDzVHHLYNhaIghpqflxz?kLTC$tjSJC*rbE z^<|Bk&`y<6<#3xBSe3AOiQTa}JRNWP%+A$E zrmvvj&PyCy|NSHo3WYeAErL2KHCfmeFY{3YUcAD=dn*7#IjqQfF5RvN-q*((nTzuG z*Ln0D%};||E!j_d!{wYC`UA^}Etlbi*WJcAUY=a<8LwmbwY}RE{XcfU>%=9AUjK6b zdk{(EvXf7j?}gdtnm(VKFMn=9=^Al~ih*`c!zUDXT94Xu(;!BezkIj;VQ;gwn5~zr zk3pr8xXSiAD`aQ>;B4$8Hg{^fC97~0JTdO8#Z$j^(c;|DqDgNfV5P99j2(M-5I%!6 zp;2qQHj&v0;w9Od(l4)}osms!@d@^S^9>bR!B|69g{gb|;1qyQ?0Or@r(tf8I-(t@ z)`GW7EM+o;ZK9q-gGk%O1|w zrvzf8mlbvFA?Rv2WlBDcG;SB-LUt6Led7JIQ>O&LvBB25)KZTHz}bNNeMLco&wI18 zWyq0|rLN|o6X&jj&B*i8+Rum0*3;Sm@f@!L(qG6a^g9VmYRweId5{|mcyW#}oFM)= zJ^~b7xRgIUkth@sf2+=}o)5}Qg7E4smsOxushHsx^|b~zJ4006wET-2Ye=+@m%goH z2y+rGGcRJRBGrfwQ(gr69dy|iSi99a@*7M;CSd0rbn>D{_N}FbzN3XXN*n#`8)4l{OX|`NGK)V%U_&F-E-eobbg#aP7G4Dpv2HF%LCKF=J%vRxBTxogVxDwx z@=u2L2fM6DcII`_c0vgF$)Lcllhhpp)FPANWrb0uSOu^CZX34-O+$ZCI-o$#qBCAO=&=QTT$e68kOulF=!Jp2J z?4&}1D`YHOQViD*-3j5jys=Ddi@;`QAq`IiC;?@`J*jMd(`=H>@#* zp|ctwEA$Z%*op^4?K~(NH6MzAg>8z4MM z6{nThtXb<6BT;)q^8j-72=5^t|REv4$9B2`~Ho1N@v>k z+nL4hc8VpPUpI%C%@3HDB5-L?E-2pLkf{r@9*X-Zc5RA*wI$7IQYMlG(HJL~;jrOh zp=1kg1hW9lgR^iOW@Hqe;lJ^zSpt4U>+HxePf%>NebHMr@#-`1_CJ~67`HqZVIliX zNk+>fOjm?oR)!`a5HL>v_j zhGzJ~=a;`*(BZ23SZhuwQ=Qka1C)$le3VUm>d|BOq`w0+H$N`){@(h(h?M8QJFIH2 zWTZ_xNNa?dT)q_Lp&k4!R&jXFa6vF^O1L0yu9&hI0M?JQYj=CMQA$(3aQ<>iEoy}4 zL#&%xK+uU6(KGmNcLm0*1(g$s(KXQR`c~?O>5LleD0TClT2l#!LO9U3>o&^t!Vh3I zIzq&tH(E`lB3VNr2xwlaNFVGOWJsFp$;cJ?yQtF?@*XigXkZ!R`Gz>`g^z?`*3aIt z8L@2#ER$__;(|_N+pBN}Rs1a+ROvQ;*ofa%wocxC+~-!}hmR3{vxp>XigZ@Ml7OlE zF_Ra*xOxbZB3X91^Bj+@0#f}Afx3VjbqIwDk%Zs}b~KAC!(4D46mBv;Ul-Xm*Hkvu z4Wtv5P3slNNE(6~QZS(-(_DHsFtM z`e~Yn?4B+<3iDSWe5MOtOJ!k_qI-jo;!h?Ad(975ROuE<>mO-7ZsBx58LXNk_k#UX zpc2HH4|3>gV<8OuB{2+HF3m5<*v`65C!eZhg6SNpW=T-RqsLu|o4#j)nr{k5ZJ7dS zKp0sO}Ofxc4aW#D}nS2w5-m2i66P2 z?L$9Yx9d%FcZ7xB1PNsx?!C9oW-t8@$!dHgxY#^*UEt$|6?XV6>!~>1Wuj$7nP-~R zE@$;SjzO^9$xXC#l2vB~pfAD)+b%DdfWA~jXhdI@^x@jg#_*d*f8kes^qV>jmsUnA z<1Ysi>FLT};=_$Pk%lNgaF!wTk# z|JDwej3V4c{_|GA;O-|n5%PfUH}`Zn#(Y6-soJs@?6;zW-rk=TOjkwPpd7`6z~sfyQj=Myv18oX{IweIc%Fvi*uV7O;n$xjJi9d> z7ka*7QIVAfq+aLbkXJ9dyIpAFj8E5e&bfH+96ZuvSh1DQ!y#%e|HLy&o+xsC<1N}g z*pcq;fl)G1Gei~(yk8E`MBg6zCltePDh-whJo}&g^l-fgJ5rt!oRm_WmBI8tPb(<0 z+2xo&6|k_f>^|BWfc|dSv4;pVoU3jJqCyam)A{C3Z%-5Uc$VD7cQ@6yxX#sHg;Ojq`BvAb)XKNVT+hQv(1VcEO#)y&F z60PeUZ156;I^a>{G~oMgR9wj9TE-UB+U-u3tl@#-i1_?q;SzC3h1=D_fgnk$*d&>j zz?txT-sPhYcHnYk6drY~Z9(_d_G_#&6mA2M#?QuWoSB9sZ9!3)&Ali;JtMmV}>>OfN! zGtXbD&+^dcFY~hu!d$*#H|RDTl`wOLd&Sn_T9%aE&^=^QC^1e1g@HM^M3Su=^1kU? zi`no$zWA(>g2SjGCp~9;!KT?M0085Jg5Ye#O@|j^n=nSQ=NhChjrZcZat={gi=BH| zZEa=E&_r6zY9X z_deH-M)#aG8!Jz(xmu!$*WbXUNj)yzT*l*$VX};whAVN2Th&4|4y!=e?fl?7NsZVA zd4syIr{{kcS$yeEWBJT7ufoEq*2{Ld|D&=3_-K|su3vs#9iP2@I=lJL*4S^9>nYl0 z;WdVo#UfDoz)n;uYg~u`L`aX4;TWi-EYrku^++u?Bs`2mzpghBnh4bDQN17WcZKo=SSxsag6*f`f<1^>Y?oAB5&Pi4cHst1%O>r9s z`jevq66J=eR*%XanPe{eku+}7@YVep8yU$YxoLIdt*6Inh7l43wNK0Ek{3EFoYl3} z@9!IEwdvKIhj9sOY;`B;fGn5>>M>kdZ^@AJ!R||pl@3|{Y_;LW{DaT`j-p+B#e)5( zx&Dd;gI8)2B7Y`x?==MxU)OE*`7>;=Hjr0cFP=mRk>H#K^omZR(n~s%X>LxNXNww5 zW{q;ivMuq;7ZUdO#>I;bI?L3i$YG~RYq3mn83mzG!1YI^F96u|`vYit{-s0CbVf3Q zyI95;^)Otf>Eoe?GM6kU$yoInS9@I?6ml*{6#Oy53gy3X=BVL4ag0?cBdH4oD2555 zty^KhTHR*_jb4$)^P>KvCN2T_FBgi5BP(85h@ZPXjUQ{{%q7b)v?5l#99L^Np6b9f zK#U{S9i#{3s~Fo^Z-Mv!S|`tZg=`p2-6vjJ&aY08XcO=$b_F+>C&GDr&U8RPwWVWY(;Zph;KhmM7sz_U0iM5sl?(rGKem}7ZOG^zL9s^ zuMmEAbi<1=Kzx*$O_(1@u-m}9G+^ZJ9E$Gq1GgK{JZEn-`ZpCYW-9ULDOnq!qTEc;TfinWt z3A+v0Cfkd@LD8m-KFrQ{#iL|(#3lN9_V~Ro!3^{q2ctUl>UCd$(fWcLJVvf?q0a1F zt?yZ78|=wV2iS0MBj-F*Rrj9m5lu3~)8hlut=n==X z*LGvs3VHsm^0?0sZ5`}5YL$8KEL=Fd4oMF52Btt*CDpv5)Mlq?A5}L^sLcNEbNtF{ zdI)DSyzd5y&S^!&T{w}|pUYUZREovaZ&mICx$gu$i<(DMs1BSz#eH&DXzsSmTdwrk z>fzdye^&E$aaC~GV!uCH=*Bj1Q(@*SllD;J+7w3U3_eq;MAYUG%C7nOkj?v2oc#-s zze60g_-ys{wO>_`M3Xw-7(wuJyE)u?Xj0gc5G+`cc}X-v!`0`&?>{Y@)%~2ktzm%o zr@UA%L6=I@TP=DZw2X0Ls!>jrX%Ax9y8*!!fQ?Cc{&-gF5@i%5)Gf)*FM$enuYhN= zecW<8GO^oC$j=P8?O(Qw7`YfL|C&a|cMTpC+&KICJ+O`fqq~X*p8$bB$-5%YTQIJ` zDiB7*Pp8hG*G9f=J?NvSksYi$*&VYd+oKkWXS?_RngJO@m zXQ7j+dps^b5PEl{mNYEaaaPt~4Kcx6y7 z9q;_h#N3gx2ahgFY6|#?!n|E-{7XGc>4F#2#fSQxhn1AHHK85_)<1SL)+kc#r&=Gm z4*7s8J$g*>ig{R!cnembzE)!~F>M5m1TCeAbHNFEByuO6%uv*n*%+F*1$)fk(}jPD zU%Sv_BX!9~y9q?g`eZgK_0Ln|UtqXs;#QR^p^{yza`gc?ReXC8N`HDF1n{+eGn`_Lu@(z{Pn3K0`VYEourY^Qjr) zQ-U*-n@vtF7?It*k7i%6#&TD&#@OZkC}si$?Y5)x7n||IinFbz!8ha7hjP<{B#D_u z--*_%3F8ox)K+!kuh=7$mXQdX@g+fJdR%ex#iS-k99fTdzzZ`yWbX;P<|Zp5hkK=c z-~J(XHcJ3?`%RPHyFVUpOF+hL9c7hCzDjPAJLX(!<1Z_(2zY%sG@qs37OKJXz#k@$ zRgLrCIt}^o^f|GsG5`F{eD~&mEP!*uov;tG_V~L_^5C#(>Y4R~m%pZO^Ru@b8c`%8 zNvZ2m`7a)X78jIkA9SZ+27a41eh3h@Ioba`=6ww~9u{!;SFP#M>ytUz!{$&p9sOwK zqWRj?ER15wL= zrcn(g=v1EeYU87mm#kZEG+#+-cE6)~KTis9XXob3VmIA`X`IEotrb9OH+qk$eEbTp zqnrRR@71?#6Prusg-;iTGx5wDkkC1ybEZP&YK!NF4+qP?sfRdaO$agHrG+J+pPSL6MBpGRv{yv-9;QwR3wyj6gr&Tv z$gMGBa{cOSHlx%)J|Kj!Vju6X)Dqnry%RIorLoAxKj5fi;i<3_)(3<8x- z`(=NJ-5kfm`)7FDOqaOZ!EhY0g`tEVY(T83tR(}r<)!~#cih5!{xoCXD2dx@CrK<43LBN|V>!uJ;T}$p8>7?tdP?!9n$??6U2G62*+|!jOOYY*Dq@1s8BG>_a zw~O}r^hRHdfj!nA#)kz=NQQY#U@D3a_sxbsufT`EoOc0cUw`TcwUHDh?d3P zRQ_?B9qB>ikxCEfcdWsEkGfRFEoP}NMo1qQyOkw0ZVlezi@!|JY0xPffmls?i7)!L@sl)~+;nv1 zn-j9>?$s&fBu0FlI(RaFp{u%H^=ZmiI#)?3T!ekVXdp4b3YL5Luw03bX;V;y)~1b3 zzTfGc^XDJ^Hgb(^`cBkX*et9L$qA}*W)9zUqN}VJ&6SL-`As!@n`AvHpIKD)*##Q zHEd78RgN=xMgjJ^qET6u6COy-rr}&TY%Bi+Iv3+#Eildk^odP!*APC0Z{U+5t_{W! z$UVzbYuJ)pq_GcvrNRp-HSif$#s-Kmts72}c?RYIQM^F1S(6mVPPvcc2U+?q$^lI;&=7O+X;?FI+yTn zW^vxXa|x8biw2lu_d7Onolxd02Nie+43+_V`081jh1yAQ`4;JCmyUfPY8^>pw)GEe zVriq(v=v6lJ>7y61hmU-DE@!?@h*Q>BcFtBtWb{Pg=FCogjZUJdHJrgPZyw!O5dLF zF6YuldP@lE!%|&gT6`Q>^9L_FFYr9w;YRay3>y@tW~sgWaAS8hpZ5utGpy^r40i)w zHVmDcPQvfUB>WAZjwDK=yS#*H1)sX3gARyP<7#4Qq#9SW&S)S|SYF_&0a_U*@UaD<{;25E2l!xr$rR{)3J0!1v>) zDpdAkv2q#(X4nYI8I=*My*kHVR}vp}02gkwZ|U*=&g77)X)-w4?Xx~DaB`sw41uh* zGCKCLCyO;|U97Aei4#FN;8p3b+TiW%c-O|>iW&nB8=qxLU(Q)42rIoD;(-}Tx=((5 z>Ayjt^ga0b=?u6guWEd5xr_cKOX)lb9h>X2{XR6gZfZtWk;ne}08`45<>-mjI&Qpo z;+0Vw+}_ox3doCC;m|$+D(VDRmf;n*T>_k$Axf$&rV&r5BLj!TYXqH)vvG+`if67SV#3d-uKcOK$WPp(GYz}zDq9ZBSCYj3$ zyVBBl(nTm`S^m=+;w~HV=q%7_;9(X#Go$_;_p?c1iEmbym7tgn@F1+^pI8l(mbB$> zhzRA{zn;?d*oN{v!zUxBfdWgSqDcE>d-?f&q&#a8 zb$~Z43?s&w7##yg=#+#T?_ZTxMM2tNn$s{?!$H+5g*Lw?a<&$(ihRP3AuKXns8l=C z35)S9G>DdTMfr0n(1!=IWjVf#o>Z)&l0PkTEEB+XEvMW*qoN6gf|BBpgfktwh%8s| z;o{Bb3Kkxwst&6Ely%ML(Hy@u8(QcoZ>De?r0UMj*rp|FuoykkpBq}va3s|6-8Vz@ zJBzMv3D!+=3Cu+KLsWNjRfC)oE&3rcbGnmb(0sHsxqLYyV8r3Ho`k{7)9eKdJI>k2 zmAx+81zT1-AH(k#DDw+ouj#RZQ@1E7;IX$O*jq*JtrNw1@B6P1!4pukwl0gzx%gTS z^!UW2Q19=xqwB8~y655Q3_UQ^pPgL5z^BvsGJroQlKjgH?X%e5v?2btgAV$NMfs0Z zIo15i=LBjK=)LiuOBhY&9L8<`0hZpsf$+BBBkrBw2{(Y@CuKa$-sCC2kfUBMfWAL# zB#8!I-4lcRP}oh@D3DRj7=@>zZbRif0A0{di|>3zg&=&kem8(4CBC|OO%~UYbO>I= z=)3SuEE+Ne3|STY%|*m*`7|8#Nev(5ZCtXLILEnNl+|pEh?Zb28t02x3ou7Qp_YHZYZa&Kla8W)(O)L+J}S zQ3w5X8@<(29HFODZH<K)~Rl~JA?s^eKv6o@`pC6Z7fI}9N{@X9Z>wMr2qnGsHQ-!TUmd+aL zY>dv|PxirmW7+N#sc3!&9Tx!@DmqEsHQ`fwV8>-&C=;OQ{@0?Z%<1jF{tEdYwfu$V z|E%j~Yl)Kzq`t}idi%SC$!zOGJ?DnR@ZX|J8S_WRlGuNWs+#Zgvh_t;HHpBC?(NQ4 z%4K_axFk7JigSsRq$s0?mSGqvtn1lK)BG0_a4tWRnDc$`Y&MGuZq~zvUKg)3S2jQmO5e;b#HA*n90bcOE;sT!*+{p%|W6S|dov-=1^3w@J-Oe5O3w4S5xN#y5n zJ1v1Pdj;<#mrDwK#eOEaHeYjVA5O%Tc$N|UMJr*x2K5w_Bs5Wq>V=}HW#Jhh<tMlh_w2EodljiU8!8mClMvAJAh$=C93c zN3D!LX_tkIeug}gmJxHJ8r&=<*h5R!Z?X5sYa$U-Mt%3!C`Rk{k=b>WPGaAQt}pd} z{D(2O8)zxtwon`kM(Se5mJl(nYTry)JN$1rETsI94QOt3Jpv{Z_I}wWn|~&_iRC85 z@QwGo=s!iJ379SAj3j3Ag?`mPr7_c(s6~ZbT$gV(!5i(rg~NUfX}FGZc7vl!pU z4l<7-xWG5|3YNk&D;ElgWl!;fZ^br8{R?_) znyDba+Q{J{qp;wmlp3NR7tX4;15;r&boOIii>tjMaj`i@!-DKb?XEX3H$B-;w3o$N z*0HXh}}ZugkVCX z#`j8HGS&GbBo(o+3BA}N#(6~s8mX}>(Fr>M*W$3L4M07Zz1uc6&^cOi*Ag_b*<|U} z7$H++930OR#8!AIBuX=W#MdeM<2P?s{HKw{+X~1tAC8@7DQ@qM2RW#Dr$Jj8+TYH77biZHW9oDEmNr(te#Aee;+O8j_k<$W;7}Zu}1b z4$0Pd?ux1Ey7<-gEux}ip#P`q^RFRv|10K9s?L%@87UU;Y8ZZOK)B{=yh14lr4r9kz@VdBC5GG2PA!?Ln zbpv|rQx_{Vr{M?KLWxPRe`N~6l9L?_-THZ$U3;alN6kp}1i$b;;x=U3AZK>kbz>;T zX<1LkB<>1J4Gma976+7B$f?(KlJ~&He#v9`p&ID9$HKdTCInEClbOc& zn(X*{9lZ#n(T;}`Cj ztfOQIM{+Y-*d<}1kJB~;D;ES*n|(UcGkM8J`!Cv6>1_$4U$3?1;HoaLa;Ow2nc6dbVcPDAS1L(j>U^@C4CK*-M zf$AH`hp++}7F=XX`NSfVevoQ?GY4twtp+v=SsGU4a8M=bF}-3v+xX-vXTo(;!eg37 z*s4Zq=5tlo&*1FlpXe^-yK&V&CS;IXw%1R6BCf71R-@ru1%rQU>k(&jNA~!O8}Nw*5Y|wW8r>nI{kLUyPT9j_ieVz!O1@~>KQca*2)57;Q zhz_1e-#q31(r~rk`JrcONkpcoFQo<)<@=Z^QDn(AM;pZ5V%h+uWhDQ^4rV)EMhEhNB9)pAS`s}DX|UhxgT?)EaeyANXv9C^@ELa3{;-+^DO>?tg-hQnL$Ssr5W>W z2YVnxF%I}8pP2D2e?Z%z-xMgarG%+gX)mesZ_J9g5F1+RQzD&%h>`x<=NEJ06nxvr zkp71GZ7aCR;{gjWY)}JD2bq<5d>)#72OZEx^iPo^nDGTGl)G6L5^Ml?iar@d1p;_8 zd!U4>{xB$_skvSi@?3*aImtzqFPN5w|6I&HJ8{O)OEg-d8VtnZe?4fh5Ht9V1)Xhn z2bcOVrO1lN^Y06xAz8ViG4`vOrdlaIAD6(HG3hkcM1DozUJuHqv@O8+NlF zwG3u6N&l3EN3#Ej)?Qm$dn9$^3;DmD)W4#v-hU%{r}3^E5b{>=OiUw!&{c8gr~}*A z=p`EH71Fkwae5Z%;1Z>YBk7npG|re-P{ITZ6KHBzmKkMSVV^>E|2#>W!Bx{9T-JM8 z6BO;V#U**3$P|UP6rfs#+_Q_~i#0%K`q3SsZ$1lFhE}RwDE_k=Ae0Q)>9=c?0R zth9*9Nt4e1uDlnLSXbh34ds;4rl-}wppM}W7Lixb%u&z)=bq$9<{1~s4xv7Jv5yX9 zOD8Lsn91iQ`xaHL$Y<)CVO}4R#PZq1(_G4cVOQ@?$XqfT7iR`tsrYJZo^q7KU1XdL zXy1GYcs+u^QN`jF#Bxt$(_NfXPlQZP%BP*^gR9_&z$}sMc_NGBm2=I1Q0j#_kq?*n zw^u5bA|q-#>=Oy1(VG@k$e(^LI6lDHdeJCPjae|g$$PHskPdzi8-rpSV#EJOo#@CpKYP(jxY)~C zlcV%kaT|@){S7S%qP+5PmFF6OAQ=C%Dd;*{IC@`{Lwx;swq$NGz|a+Sb$NBQpLt07 z6DlK1nXD^lOTNRZ)LoqLj+~zq%4x=Zb@`1MMHs zwgY;AHHTA;AjgOO(F@fN#q9I-^2+SoHKJUsrNQ44rLpR@XYL8pA0Yp)__FSt=AJ0E z#~hp;L_YP_9Gnuf)Y8CM_K`5>wcm!&dprC6<^Rs8&I~tvOxs@CeV7dW z>)G_5w5T7gP^2wKNMb$xLQGM-i#o)Ti$y!p`$F25&-kp;E(S^AFCGoA9H=vYY$!T zX>K(huca>nzas*JZ)=+q&0+t!`cR?VEys^+GQm0ullf?wMowdiX~B#_i9@ zEhdxq5*k=bnN{0dLI?h%y_{7f`|~HOFY?ggk>>7ccfk9m#)c|)UDuGzcy>#lSHMk|PDkyLn4M`Qe z>jcN3@6yqmQo}=#k+N0vAQe8fMjBpH%vHX58`r)HKD!gAP3Ehmm%Ej4WH5{45Q^+>k9J zQlM3fBxIrmE=I4ES}<1gO(z?x4oIBF25+-(v+=CXwsi*2uXi?2tl4qag98t{+D~cl zhi9TcqWQy;JDLd@N=c!z;*ztJB04x|t|l2}l`>4e5&T&;ntKhL5?V}tndL?L8~V)aVkEcxge z#T&9^J7)sW!DT5c7A~UKSw$?QQH*6xN~A2B@KoR%n9D7JF5Aw&tosxS>Fr>I1;v$3 zTLEorMT3AZk)p)P`?Wg2T2FGtD6`k#ik6PrC~@47lCOhPw@+kZpskG^f22M^Jk?W{ zopZ>ZL==4Tp*vL;EOkSFY)|6(NVq2z6%XFLiAqNn)L!|az*2XCN`u6(11eBhp~g+? zUHGfyLFH$rO3dNHJvqKRjAGHx)Mf%dALp~JoM@nfDKaCfoW0`jOj}5vU%2)3kl(PP zCt-Lt=BK(rfs|Cix(_+Gu0g?J$N3;^(U|?9WGp+3vK+TL3`HG@sbm7NnbrbOhiTrL{^PgY&-x+dh!HY#5m_+v^_5Q^BiM-;JZ3W%S~ifIgNL*a z&k`U)Wv9@&po!@9WnWrTNaJ<{#i=1cSOI<$HczIO)?8WrUc;tpQnk>f*hptpubjHl z@5Q|{!O`RVJ zLuY=qSP7v_{J*0M2@UUFHl*bjYr!!NV8N1e471i`4v-OQ+*u=it%5XklpFFwfL^URI zKaD{@tTd#Z78P`0HhAk~^?i`qqyFr-BgMqp1qG^8!QA83$!zbI>Bw>Xx6YeSnJGPI z6gt?C^8;{pKd*^@N-^Q6@f5Z~_rU<)x|;S-po_ZOzL*m*StD8C*0hqRJ9S*HOCov@Ie zLn&$>rKSvfv%?kJaX;JcsR37+qH%}rt*mVwAG}az3&t!GR^kn|P#>FKmPbGlC)3!$ zejAN96p!9}z?K8Yy*6af;_bo!vbmX2%ZJzgI~>thU^p0DY7Z&J%lEbcLqkYq`tmpe zt+4*XVx!6)j+QuGIdDey`8gm?BeQyh^~?agBQXr+{vviVZLHjAN%UM{-CQA1N~j&q zcWSye$}TXAM2jGYxsnyR3DPPR`CT42hqiwqq^d`Tt^P)@qW<6+Uh4t!9Aawkc$*^y zrHNvRUeKn@{N(xn&>40%>r($QeXTFsm1>h}$oeDg6O;D{845TG)F^Rh7%wlZ>2$39 z{##?*S^Om&(PT-dj(t1y@F$ggHx@>FMB~?}9*=2|OWV|DzOcIIAlFWd;lz*qUtypWfSEd=Rh^hR2 zuyvBGb%Ha-V1)-7-3leTi||J%G#ZMu-p2Th&iHUw`o7FJ$kO*v?JP;P9R)3#MwPKp zhD%sFXl;J4D-#)Cwa0Zpkd8uC5N2R`$C@vBYb}zXF$QphwZK`Q!p=eCR4qs51!0Q| zpjAo!(Ie>yKK3jzGQrhN9i=rIK_9$s-Z^=Zz4$fLuLP&YRu*j2Cagmow|Qi)>(sG14Y`PR7{=Yjp}c!>RDcx&*dijE#{A*Q>(j)&8(WqN6uV z-k1)HZU{iFX1gDFOjV+t=%&oMWW710jx-BB9su<IBtcfinbMNTN2am*7|yX9Qq$7IX(9Q!7qR=(~EzU~xvjjj~!G zPf6NVTcJ{9eWao_$yg>4Crjy)2%(Er24d3Do47C@yONmxGPd5xI8Nc6)Bpc?I;ZeJ zqHSBpw$-uGvC&D#W``ZyMn@IfcG9uav2EM7ZQty(&$%z_TQ9ZVs%p+P#y=){>~RrW z{7zC1hMD!IP6^m=7GbW)5Vu5HITZLqysLx&tPqCV~7ehs9+&#S^pp z7hA?FZ-Udk6NDHbW)&R*3ruZ*+4Y$&z$htCSvn$1eO<&-9F)@o_AJd!NbE zG#0`k4Yjghh{Kw!=+{LxdQtCDG^gDP*t^*6K`Fn2$1u|d!&Z3erU1SZ>vx#6v&;<-)W z92P@`HbLy)a?jZ)W+*Cs zp`XV^^iXtpnW9UFf54ZrKBp&l7Y2eN%!T5wcvm%~zq^@ZfQFYY zfx3QC5~>e{o1@t-Sh#^FL?;;Xonwn=A(0v3jhiY+4|5Mfh@K$Lr#DE?gx@GPMtHDK z%iT=$hy^r8ZOWF~e{GtZOZeH(LC+9^p@W=iV+)hfjI;-?CDp-Sy2Mlz7u+8(1~V|U zeZReH3RAC)Ez@Arbsp7B3#m){$u^ZZSRR72~xaXEGu25k3|_45r}A4V`vOsuYj3C z?M|zQOPM$EmG?DB|8!Ld)zij~xlFJ%eObU#^_`DPqi$CJpQoL@ldr@s*;k1knfkeD z@FCxF`?TsR<0B7EBynUQq444gT(BuU1_L1e6BMR>zhT4oE2jWW+vIAv62^as*-Ai8 zV)&(~;G^Gj+GnYfY185DJ(-TmiXQ^2>SgG52=-Ic)$E36jkdJloPvIdd0|OgD%P~J zB&hZvdwY25e`c;7BI@z}Sq z?eMw8E4X&Kux&^LWLHP?8gA-7y(0^SXFBz0NAq%FzKBuqY^YTNE5({aR$23tKU#l!gVVu-9YXyk>xK^rZmB$&hzOA=~H@`XXpe_BZnfR>KGW zds|;P07_sgIJ!{j-(5un<=JusWrUR&`amY~A=6JX0S|4JMWK_kXmmM51)AyejU2Y@G_Eqj zf>%Y%t{6rWsYimhVbdCNCcJEqTGzS%2FFRl0*11`D8jIteXl&Jx1ZA#!8N|LZQNf3 znz-|LCblY;cPV1?5zB~5A}FwuJ~7z1g*o+Zmq=zQM#^REy>;WRc`XfFYEFVnZIIDV z3KSdm?mj$~;6}EZGP=ATt%+S{cpmQO>!9ziTK?8z=*;I=>%|w5k1zx%%(iWY4d!-d z08(+(6$np?zz#1Sv`D!?0Ex-x(rl>h3MlF<)?Ftzu|`EjAO0jr?9d3vJd+2wN*=PQ z&lEW+>t?tKD@*^-ZSDdvsZsVg2ihzd%knO)w*H-T0YgT(y>m^QKP!es^beHKc&0*s zQs5Tsc-CWzw1qI_-C`iU`G#XHA@PeK;HB%|Eb;t)+na%zM}r<|^>T?(4$+$48(+Em zsW;^IfezOHYvABY=@U!^bS_NVPHkV-o7ZK0lBn8`lgktl6f=W|E~}6}ySHWkH0GIb zhWvSnwuk2C40NDkkdJbu>vv15`d|JuM#oaxZl^lG1D09t6N4GniInqW7nn?6;QWmX zT(pF|sRK%lv*lLR>`{(S(WXF!W>VXAc)nCqCeGlzCc?JK?j})-e=MO1EkA0LfFu#G5$pK)5Y@xIPEQN}yiW|$PYRn@g)tTXP%|H#vO z8ml&|VklmRY6v)F5+Ly{D{+l%9uO0tz=M0QPSuaJh#`3WqXM}!Sn z8H++)I46z9#f#*fKhlVwYh36O2e$^z3WVZS@ia9{l#Y8A0Qj>XotXnPGLfsS#@yKR z8k#NtvOcA#k`om)qvYb>w8}?{C6l6&_m2S3{o-LO0uDvIGsL+#8e%%81mX*S02T3a zXrYZ*>lShO1cA@9>yXC#UPXqbbnRO$y<{%7GFsvqN-CMJ`b__r*Fv8GeTRZM*M(^# zd69BMJB5b*x^&r+t&lO%m9DJX3ZjlJU(aCAZW4$*uRYP{fh3cxy53*4SF17qTI}gI z52WcfSEP>1bon<52h}{=#%lR#pw!w)hKWi=)ClU0qx5=eX7|^&@bR#pw+l`iUBGun zhgNVqS6HRConC02n3>)e8Vap>$8eZR3SQXX6iD#)-Hl#byq6;kKXJXif4AjqwPICM zXnJOEDm6sZw@5b}w);agBESSDs2KaDgW(Sr6c1;_F}aMaLu2QPW?|690zD4+(*|^8 zBqz4)8fma4u*mRhaH2#4n7{jJsJ+wjdh2rD_~dPi*M=1X;U$?ZFy;jaAahL#7mdGD zQvP1ah|EOSU)GAe5NwX5snkeP`lcNkCEiXPTB;#nWl?f8OmY#O=updMFYrl+{qoDa z^sO!Y*)%Ghf)btd%4E<92bgq_@s$X}?7ofc=!hW~=iPJ;(~x}RtoMN7az^2D7MR8q zu!>V9#NJGGZ-c#`9hmn=y|-;RG=}0MAl`aAwV}o7dU8^`HgSY z?6kC=cBkQW9;<7<+to>Ss*(O~-rmcSMaZHZzVwIf=PhH@!&HkEi>>vlOTkQ&gXpDB zU8B1EO7*47ILKpWMyoD+bm36R4*Zr;=aE=`vW@x?N ze1B-L=rBjeZM1Hn1|SXPUC)>PtY^Q)PbeS|pWCQ%6U>1Ar9o{$My3nF0 z3SAEqt*umFY0afjt#S4HueNfgAsd~}gwp<~MqE{AQGgl5&Y`;@OMkC(UeZ_^MtvkJe}JH{1qZV< zE#vp&H=p+l%`mqa-~FF4a+oPD<#@TFMfamZ;XIIXaSd1KRJ3ROmF_$jXAl|-LqI|4 z26kyJxcc6D_dZdzV5IfTv?PRn?!$YPKQf#&HrRQ#G*b2jyP^#&uPwqLAsI5ps0|_W zVs=Qf;#+k9K7GPR8k>AoXM}7r;0u8XXl1DI1XX_2RMCA}R@CFo&xbxt)XG_(HVoA| zWHy5LX#UdH;)M3F;hMS@w?17V#N$>ygb!F+aq% z!c>0YV~`5#vGKZT7iXJ$IeGy*(&LYG7s%R*GkfPtL|(n+7wC7xhQ;hUl4Z^bz)Vdy z1#9jwM0sHS9pE=gEzFzPLHWr0npY}p$9*(Q$Eisv)EDBq?=Wdnm(rw=XUt(@TNjbA z7O_xvyn~U~Mo=R&t2n!{r5S-0-0myyzy>4A#N+-zOp$$}?K&X>t|Zik54-d&=t?y}ccXK9t_ z^|6QYor{1Gz!kwL5)b~fW2LSD=A_4Sz1N~*O63Aa5IL*e z0n!cSnuHN=l(G8=NrkyFA&5NpqLatXySZFu26XP7eSK7uJ1k8SHGrg-srM*^2pH17OR%MmGEubCVMb5b)k zi~qRkQZ<#$QMBL84vIK%O5YSC{8VGW1?PeLAn%$Go#%?TE4OmN$f{x|sf_mp_bL%E zw|KXzTx#)ebGf&TI`CZXbaYLZkG}E^0MqxHWW#eQ1=Pu;mvV~`;-;C3m63V!>xxlO z^_p8l9Otp7tKCK{5(B@`v`x<-T#gW^>H@^MCnuY)3ui!f?AUUf766)%iiH?w&6`g1t0Ic}C z@A{r4xb(qX*K8HJ7GboEPPL`VO~@`w0>T&~nAHY;$jp*usZF^eWM?oQnGU=E*isyd z(#W~NT6NMJ-%UhAz0*q{c^G59=fKM^Iww=*9`rzhflK%^1bxf0ES@Srk4nx zD+)H#+t(q-!}>FNkd3qrQ7h02(c0UsDmm~DR8W%)R7Rp85AT&J=QDH5WcCzH=F_b} zDbxgN+8iE*uTn=Mu8;(AzUjf}(Y`rN5I#gQp-COERUHK!LQrsPk6j%*j=PgG7{AF! zU?|gMZqt3G7W%66kZWSJ0ceY6VA@vx;~IgisyMM3sAT%L-21G*8v@hd=Sod>ogix^9Amk(*t8sS!!rEpxwlyYv7lk0Wv?`{vSU4uGxAziGst3TLInV^ z><+<9N;&khZvyBAEPwRBK(4r*jfnn{TpF#~wJpnVLBoa%WqVtGpo^y4sXSqQk#n|G zE8WSdsBA66HNfts^_?sIfPyGj zSFA)xjfogxNR0di{Q5RGa7PVZaVNqz)m9ls3E67w72o2qN7tgtG>#v6R5;V5Pc$9aU<2O?dJ^pl#!$ zYHl*f^|PeAf9l$U_I`iop?YDCLa)zz$P_e7J>m$I{uy?#EFiM@4jfZfNKjNiHb;H7Kfx?Fun> zI2apSW*yysnuBCSi(`uQBOmsYk~rSP#Uzwva1(C>v4NSSCTEi~lM0ma^%;~*YXHgR zR*(~JgO?Hf+2>d4u@}T7XOe*x#1@Fuws!+G!mb^3ZjbqGa-(L`*#?X-_-;pExg99$ zl8k`Z?)|e)F;3A$F*26EpC|X;eo62BrvRCd36c3^Sm!m;5Y`K*@zZXJ=c4Ge@*~lG=*s28V5&n2crM3GNdbi>(fb zvyrh5q#6xj%&5h>Km{1;4}ppD?GNz{liCwK&ETz8m-#KzjAu7j+>hRrj`S2YO|5|( z$RjXHG3S|&{QO-SFs${OxZQvP*FBR06REgp#|$DcX+0>=x(vYyF6EhqW8*7AEOF#% z(6)qWGm4J;V&!8LHydgn1r!?(VKM=eB0A!sVNK;Xvg#M6K~Ti>%-{#FDl!NZ5Q(%} z&3mGWI_3sqew%m~6HLE+o6}`S!n-v7)CLl>NK>1H@5pe+rc{R@JPIWa29mET(0k(N z;@}^>;8hA_Mx1xi;OZLE#<0lATYGdH#f;N4!dZ_j$59ljuNdLNdJN>Hdd&jWFG+@u z9CS7}5|RVCqu7VNil(9ry$2kULWB)W;+`YBn4-L^OeY*egp-X%GiOeVH8h!Lomh9KfNhP_Sfpu)O!X}9rR512SZ6k^G^tf0x!^3z zGXz%5{eL*G=xHRipivEF-Ygng*|<65G)NViVE%KBIlI&c@VAfEKLK?Si>?Qnn}ozy z`Tm6ytN^z`zi3jR^Wp)SrsVf39{QPYBBaZh4H;^v#dO62Zf1PMZdQQ@g-yz$<#qyn z;z#Mu!%toA><-kF0a?3mWy0ehly?H>(-g3VAKDe4AUnr%kCh1Z6hEx`Wx<4RQcajK z7*n8aYwg3R6OT!QWPp4(o!fk#W!&$1qsj1mqxuMJt87@0k)K!F*uCq=0jN~6XMv_o zL`it$((y_8q>*vj$k!D|=#pciXvWngPE2R*gwA6A6MAMmK^|*Mu#-}1k(oN?89jf# zudTb;w8GlMmFD@E%Ad~~_EjO?ybR7&1(wRoDg*5>X}o{eE_djf= zmoW!tL^B!wc=i^&Cxao5S7d39Pn*^g+az8KKl(}@^T$KIZlQi0mEXSkSL%cFbA9m) zHu=W|E3$dVc#DL{4+V=MuP@pShE0rY;r|BDduLksuE>2iETrq+b)D0p9i0$I z5gwVEGT(FM+WGm1%H^OVQ*2v zth0C15fbp!m+571=6|m`+f13bZ<(mgnrK(+voN(^ybW;E&1|i*y{r_j$*3`e?p(Sw zb?7Wj{f*BV!uRI z{qd!r{l{3ZA#1rR_@90$1Tg^1!nN~9oZH{WIAz-cjCoZIUj|s|LZpJvcFn_54d-O% zYC~O5Am-M@RL5j%zY?9Z}EYYW{%f6XKSRKgpT830-jk9p2>Lmh8sjs3L$D6mNY zUB+0iHSpL2K}|13*Kv*%hS0NDy_la5T1*n)IpgRxZWEA)IF4EQ%SfoW+i=o|PTDde zB?$dNVB9a#@}w{u!V$^3Be*2LQ=F`D1_}t|5MUalZOsgMs+%6m~lwZewLiV)|n#+cI!s|5RY>(^(7H zQ8n=?>q7MW{-lJQhN4}BiloCi0h9KCG6|o_FA`D{H4-$z=Z96jR|h$J0D z+-F(RBa*12lr9mIN)ETav@(r|%prkrv;R0bVHLx9MotI4L)&ur6TFEitgQ3Zv}Ng3E2#6o2|618b*TIo?EsY~b*@cZ0fVyYBJ{ie+pDuesz<&; zN-K>gNpaChu#x>PeDN70{p9%e)ziiPbu*5rEqS60rHW2-l z#-@RT@~pg>4F$@gZ6u<5z_FO%fT46+fGRY4%a8nXk~a!pHfkMUAC%qx_V#bKEqkqL1H;``~EYFC@VU8>fTSVRDg&S@FTe)-=p5dM92==Lsgmz z5uo;L<#7Ri`OzMdlT~*i<1&*?)GpEQNS4V>COf*~vXsi^GxCM1|ogO5I{k zKMt|x;zRC4dvw!6=5L^J>QRslb9ZrZlEVJ#_rq!iyHdrzN>V#j+%pgCWlFOZSqy99 zB6%1bc!}Qw+Vt`gIwt$X6R`#Bv!oZN<4a}?W33&kB@%N|8DA(78yi>`4Rbed8L2MDiCtjHxJ7_B zxBq^sw7V>;#Y&kY`v^-d)!n^VwKd9_N=m=ca#;2lN8ZwXn8pF~?80nizmB3l6juvM7N0*@U%l@)lg7m9R(kUEC3|ho zgZhFu?rKRb{$EOyG7DJ;{TqzrfJYUlVp(#aK~Ee^ktP!>I-uTu8PZp0&AGc)Jc<~f zMC*4Es;3Q`&2sGa_Ru74_yKO@!9CAicp4}CG-p0#qc7~nI{Bj=*q3k&nq)k$_8jc$ z*qWqfSm$-bj^koI2D-|ULs8N~Pb8%swJu*l30bypuVmx;a-5X%fu%mmi}jR%u0RiT z+ds2+YfXpl#1?kD$YaP{E9mU4-q%-pQ%L_duJIlsb$+>~WPd48<4%i_*)pXPl@ioY zz>Scn(a(SW$guJH1z{hy??hqy&j69>skH~T*KeTSoSns_iV0ies-Sw%Ai~NBd9+UX zIdEw|_pDKFUMOXL-DUEg{X?!4tqv1-7aeu$E#_m(EIr^WvH#0nKX7kUz#Rl1$&Ell z1@oKJ$2Szi@F^$8LYJR9e~nfN_pjm9_qz6V#T~>mo7ny6?$);H51(l{ZP>eOoo4;@ zXcR9Jr)k-XDb=gal;-Y~vCOF9wdF06=$$%CoXZ?MmVEhTIjEYWmxl6CQ_`dbxDC>70+y zr;PjA@!;=xk`Fz+Rdi+zt`Pq`MiwlK038%-m%N@`iGh#8+{6$V@u zRmFJbK?N#|o`Z>!;_nOR5wq#6cO8Gmyr-Mjq}Q|=gc9&*{ z36N9X>WcrN+bpI8oR*~5asM+o(@ZI*ctI%UQvgt^VP9r8mabp5Q78J{f582CbG0F@ z6LuIXl*{Zy(75^7*0Ij{8y_lLTs0_3l1V9>IG=iSfXSEmCm3#cLZ=wF0D{fK-;SOw zgU!cqh&}pVj9{>F>LAeWGEVqwtTX&Ey(>0ZB?5E$Ge~=!h}8&F5`>K^HkrZ!249VV z*#*F%xsj>KCc4GHyXc{~1FAlww~He<6NViX?QVs7!^(JSt6{ePyxY~@V`;0wk|z^p z^)L@E5vh2!)}&hIpRQ^~Dl*@w-2%((j%N@rd0LfZU2c8|+h9>h$iT~768)$iamkGW z$^=`$qc!YhGLHEWQt=`!A>J}ybM0>nuw4M;?0w`dPpjb95vNMq{5aSVmeWP^Ig>GW zy|(`TYebvp~L{n;*EYmdSRgERfFkP2MZ&A^!vee&#e09kY?@ z+O>4i_a#wz#j(XvOQWo^b_LF)yj0+D#&uf7<1ve&%E5c>UBdLyWJPc9SEhc$pg03i z!Gs|C4WEcVLHxi`;}OOjRi8Rc5;BbZWhH%ClTdOJh{-4w1D0`015|C|;FM%c;$cd< zalQPyby(AfF`3{ihzyE;#zP8|%2(fUh|=dtNJOX7YiO~yeIJ!VPQd&N?MBMNlOx#a08J)F_r!xT6kWqM(bzvoa>pV5-2~XFUzKevlo^HB$AK45zi& zncweS*Hajl`ZcWw!@#Lle-r*>j(I(}UZ@?(^`yf*kI-0V^lraRXf}12z)( zZeja0P^jkm^L>%6{i2Hy>9vCo6L(^o-xhI?aQ%xg$*P1Vy9sd#V{IYjsYzUQIh8>) z8y`AJ3sqjji-IV4CUY> zZge)W)O9|~-Ws^@0XE&JR81YDI?-2s>)X&*g|ctt%9m3vAc|aSY@9r5MK9f0vmeUU z6F@ePTZK2BttPK82|h9YQn3zB;oix3n7ux9-JZ8jB4C+1=86;nL<^xv2H}#+Ioy$3RCa+1*o|f>`1RH#A1v z>EBWuwhK4byK)&?*o~#=t*E*DiW|{KmjLXmGetV9zURieCdI;ht}8 zddYUv;Dtm+MP3H51m5N1*hBedvXI4N<(nOb$bSho@b0#=%wf^b{_&CGm;93t(q;kg zhKW`V!rP@&*0dB2KoxAMpb|rm3D`t*w!&JNyax?8mTD8v`&JUPX3Cb}_EtM@BLwxr zoARY1WDfLQ%IGE;(sq&kQvN0PMpRb9f2JZUo>_WaP@2?bZE06-nLa;UWm=rSd$`mN zcI2b`?aK-$kzqVT*H~RTR2O&sPAIQqwiv^`&|x`*_UO6@Tp6`w2w`VBsyu$atG-^2 zeVQ+B6<>~t&+!cYY1S4#(DG<^oLgV;E%wON_IJE)wH$&Emgtq+`&MOc7nGS6G*<30 z8>8{Vbe5@F?7_`Q!~c4J--xA&kJh7=-~ZS?y9--H`xtc?oHDqQXtc-zGFw6>HHgJ3 zC^>*ZWT+VdFstJ@FTWPN;>2W*!YlG=0O4vIHdaN#OkU04WbS$`txToPqjdHJ)lMx z2;OqPyg@q9{F6#@eS*!8=ub4SEJ2tl*uI`3kFYVz{fg1?w@Z^ANIq~aCJi!XX`zSG zs4Rq9&d||X*rO_%+l`HaUr_joLocZQd-fHX;>Q%CJbLt8{)Q=gIKB4KKWknadS{coS^^9ggWrMz5@~{Ur$FNwG5m$1 zHwsuk?EWw;f=9|9nhbQGeGUavVlRWopqNaQe*3Y8@Suzo6RWv7G45}b1}>QiOQLjH zIixM4nBcA4=T0HWOTNXERr#dyg?_&6&38irz?K3-cmnNiqxMLGJ#Z;Qu`DtEu2Qb@ zjZ&l*F`LG*D1rms`4_LIgTIps@?T658hL~}Dvj*~p`aT!EyB*WI*2;OS1tX_ zDJ9H7E80ly#JP*@{P!}%6mdqwbBP(24-8OOe#mzZsZ{vhCb0`zKjroTMd2k)CBX_X zWKb^<-IHp?%NPil`Auhpe-{e zTjBtJG#tr&>Bj5a+qoxJV)3@Jc-n66geJ7+x`)p?lrhS&=7CbNd1I0iL*5#+YYixof}mL1k?PaR z2ObiQ5)byi zA@iXHoFnBeyXnqO?UHC+TQPT{t-wQ~x@ToRAHfy}6wgf1eZ~S5d?evsLzRouE_Y(q zui-(?MU)s1;dA++Ea0^tTii$CB)Gv`0M!z)1=cG1dT9oZ!Vo|0ooLgb-dubPbE5XH-NbGLuzUQ*mq095_x3pq2 zC}=(>p7Ouwb*H~v!LId<+lj0Q1W4NDc_HC}-LfapLAV~~{ zZTCR3j2xWodUwvHdb(8fGg7K0QPv=BHrTUV+}`gFe%gBvfiF<4s{8-hLbRF<{0pb_ z)#fu?>X-XR7M`Jptxa~e^W1%5``^Cgf8nA3T+QUzah!flA?_}Kzygdh-YZ){3vM|UD)9S+E-8MjmVr|s5 zUn;r9@2|ROn*~BHaGGaO@o10Ng_^c*^f_2-xmq){C8@V4EFPF`8>eIFW}&_2F@2Eo zh0p4UZ8BYl)5Fb$;Q)(0^`~EEr$tRO+^BPTuu()lfaO6v3Q)bf>+M7pR9o3g+R-wcrd=_!vmFD0v!Bz%41B^e1Mw-c13lz!6;z^w$qw=V? zT9cpDO>_p1_$f*;A?{wL)zjoC6esUZN7xVUD~^NV&0i!!1Q21ZoA|TuU4fP`<8LKJ z$L&={lA%;C?o?0eSs2PjRghhWTGGSKL^5wU;i9=@ae$&tVY2f=QiE^avaP;H18Pf> z7NHMHc&iQ{ElhBBA;lTAwLE~E%Y}-YHaY!@+*L4CsqY=2KrRxuo^lWrMy-o8>Sl>1 z2A_7)#$=D`2sn1{1U~EGcIc-^e1Bln$S@{nm9YPF3N7bPt3B|0&|^(O3g?K5-W=p3 zJudoal1n~0-}fG8k)`@hCitqR7$P;pPu=gDOgcJtU0ACi29~={zOd*I9wTckSc0*V zT>RA|oUY1l_{dPYHmeUfNqKWii=HdS_>^aOxG%fN3&7X2GkX!`E%xJY_eHj^7viQf zggg?Tc3$>0K`nXD;#bL^7&l~Y9E8LR6I27Wv$9IN{V3d zu2JQ_dd|&>)w@uosyuEs&}5;r>s)Q$OJ5liPrQjrJvIYQYRFE@@?M&d=c&+2UE^nO z0sjTK%0ZhHOUvwaLw~Js%y^Zq)KBSV- z(Yz;y5fu8Chl)R)t6H@GQ8dr+LzcWuX`9KrFB$%;C`;9r- zWD#qm^5#6^Q1K*}@#|+eZw$-WB0Ki-ZZZ!}V%EKxLmts1v6P#v`!x5HJ^BpDwJzF+ z?*B;<|JN5pDCYaz8*?{&T?9X5StDzo70@880iCoD!4yYS1kEzDNYtd-EQqRidkCJA%+?I?H-UFq zEgD#`YneNs-)Zwkhordv6tq1>R2Oz8Zb)bA*V`C$RCzM6h)dg)1FJ1nBSDRXyWXKO z>RPY0&E=V93UGhrLK}h+_ivXEdv^3~zgBd|TZc$1eV=LL9w9`sP18cOdm2W``8Dc` ziKOkp1=l8rQ_zkmX4nXCJ81B5c74?V=?a=XsohtoLY>WGapk<&w|E`$UONu%oz9oz z_KtoFLVXu)J#zl40z-=@j=Jhl zj@D<<0dO`*$5v&AFx6s)sGtqHE^FhS(}_r?Hr=!)Gu`ARHC-fx^tO(=s3~+kzwSa# zt8@@K*RivNd4WJlrf>u7D*e=A*|FP5uP#-Ez}wzb{fItnFf)1%dv1J=_b(R@L2sou zVRb&`wIDx^k{jXPuA3X-R02%bXi@qL=bOJ86`Cum3LeOja<3vmjZTvyVwg2Sb~wLj zH>T!$vCLf8B^tXVbCGT67-wwM`GD!lvcdIP@4yhL9<+!A+j-u!_&|yHpEKO!w`{JS z^A3VnsbLXzyKKerdZ;OXP36fRo;*S63dwVugWx{o6f$P#5p)+BpL;m;-{8gKh65(9 z0RA^WqcEc}2u;l2mnMhUe6{V?6Lp#O-64As>dfJ!Bc|&6l$i8oGIs+HJwp`5vJHSoeR#u(#s~>J#U{ddI~%07J{Z?LC3fX&-ih5eKc$*~i>*=z2;)=WMu3pp z(+3Lr^SSwiH!(a?fLG_q{oS0KUQ!UGt6BBm-ztqg!$g8%K0hA?A;Sn@!Q{S zp~Xp+6R1YCma2%URM_AbK8>TT@AEYKLztAYr>`96nM|xhf1RaRo%>iyi$AU^Ga?ma z-lf@6C0%&coo-+`PKSDi!_c&3z#>FlWJa}Y63X~7JzV+HZKOBjesmrQ?d?YK=aX;A zP=unWI|>^TUAo-Z@BxSkZbQbI`H-GhAwXcaWT(vj3>xXZ_?Ei2{hc|x-e^x!@!`&w zo!x-Kwz*G2npQ=UUCTf*m1)H7OE_LAMh#0e*}dyvleAmVvAM4p&eeKws_xpepWo-X zyP=}=7o*&nF>Y5kbXWw-DiqXAgZ(x9H_={H3>f)R^~1vQ{3MceMP+yIS0W4}y5MYss_5$$l^7Jq}#=3nWJ8wTx31N9dNbPO!}GoM>x`LS5Pdcgt4) zc4`#xS|!;<^jff&>9>p%+eZ`iUPwC>@v)1QV>S6G%W_6(FZgpXA&S)}Htr$E@q%kd z&X6B=J1&3=9uE<6HmI7o1Nh^K>xtIWoVbBxl@!7R;AN(i@qCq!W1wc51HD`AeRr;C zrE;B$sgK~;PnLPrGc>bfH9XTJjXAMt^mi12>;@0$7w}0IRIX)J2pU%JCGfylg}F+t z*9V&fiFWE41#2^Msq|SiK0tC?NAw=QVe+M3rrH1N!HBOW`zbzygSg&kq9#*y5=o^K zKPKGvyYiK2_T4k=Bw-d=DnEbnFRZq^O2e`o{~1{5|AZh_Eyf?VlGD? zVB+XMYND@;&c7Om7?98uIf|NO7$i^Mrx&V}Ctle+SS$am0Nmoq@AJ@e>PW55(XB^fy+$FuDX}Xw z$~Km6{H%&C#8nubnQkf!zNy?F3%*Q5U^FFG)KXs|?+If@U527XY+)KQ9eEYP#`t&ryAEJQ%Fo(^*an@sLlCAIIBl&3j) zJ=^jAgST4dLxCEe5=PzM%D(yM^NgOiIW$QSRjX8!uGr_ z(iX~2iV}V)VcH_Ox%+o%NT~78Y5SuN@PtSLt&3&xkiM_(^x==drF6&#^&g)j7Ps$f%oWAWOddPg;1hTWaMI)9!P_wuM9@ zzG-SppA73S#HY3{pe@%DQrxD4>?P%*$(OKrF?cei)o^0XUTM#dnY6;ep1e>K+{d}H ztz=zvqi#{O(410!2f&+>KD!sFiN8W2Qbm)|{+8nia)G_wdAj4&1Pr-Y_&mNZ2z7Rs zxdp0T4Tumyh<1bhTBX80EYu#4yW5*o6I@hbMDtxrSZ{lZ|1L2mR3k|K$oS)5oDn$w!y0iA+8Rs6-SP(XD##95v&{=UtEk8{@tHS8(ll9AO1Zlpq433YWdx% zmz5|m$)PmKn#zJV4qQ5;)l3;Ii69uXe!-MOea(x5gNIp9EGkB^f2k>?BCj|JvN93* z9q9-vk;lFgu|<{f%voe3i z^~^^SV^*P9BMfpwS(8a839zECwm6!OwIO|f*u8)g=&(h%3~V1BC~_th_ag-JH#P$N z`b*Y5nUeF31uOpNqa+|_S^E!M2)hGUG`)Vsp_cxoVW8edzh|e~N4=TyH*qVkzKi14+4D|>hQ%L16h!;~ zY_SGWJr-SZ2V--9(m_=J@_zfh1J}>_0He;xZO8@z1J}9v;|E=+Ra699yO#Il?uE50 zu&)P7gIv98f?U@Ju4EuTd*0Rbcw^r;Ul1164%0kF<>YG`IU@iW%CHk^8p1oscWfvQm_}0g|5O^xTO(SktA1O5>T$; zR6)!hYchM}34JP91YKN0q4~9_0-sd($dg48pOLX9JOyNLAfLnxF>)zCYcsGV%6`X0 zRv1vw?PNfTiLmUD(?`(|=_^-25tj*vL6)n0|JFTAMPQR@QeRFAjFHWY&Q#?sXP{?F z!5Ou0n}0KRCFFPKG^<>Z>CCmXj1GE~oVk$pXR zG+HdjR|Vo-813e?g&T&G?Jlo6BZd3pb#_w8-zSMKDxui_(xa`BP#8^Jra;B?BN+sPM(i$5<{WKQ#w%CTy$1vv|RyjAi-5|Sb&!*!8s zt_&p^Sfndm;dG#K9_Y`UCT3uT$=I?a(fA*Avo%cA64fRoJrWN?bx<$CllMYZQD-X*tTs=oQZAQ&ct>y!JqHj|327NebfitRekVO zuXW>+AI5W!($mj#;}$C#+C}RXIpQ@?tiw-Dh^fljgRvn7+@Tz(hw2=n-btEKze?*( zgXvZNhyrryIQj+4F{dibhf!{dxrl%VR=l#G5dCNE@IW+t-L7M(8<0V6wCBP_k>|n< z$*S8ldM{X>Y!{!zoAEymE?&@M%8^3lfVE>;hY`R3IU)YfkNWv5X~TANt6@D@ckFX% z`mYW!N)m?m}>HzTRwZHovaojj^gXyouJSV*;zHpo<+d?+jV|_^3UV#s;%!m zXJLG0HeeER+?&G+Dwz+t>?L8esLS>TCU;!*0@Iw5qK6NDw?hRbUAu1UMp2clo0ve% zZ%blg+w2bBAUy>Q#Z*S}=?J?nQ}fE98?lAlHD3E@3MK4A@LY7*(n?%=al#h0pM>7E zABsGVV4(>s}~Q1|mrS zZ;*x>+m9p4t1ypNJGxcsz^c?b>u9aYn^D2WCw1vAOMw@kXQd~`FPT50H7BfySl6Lq z^Y5ghl~l_3o{4=y;WM$P;KEqtds5uWqiV1f)%e6Qf->X><}ahZ0*Zb6m-!|TYb;r)s)T<==q9pKHxU4nk_o&_j*nET)l zCgi->nOyths2>a5LxSXkBbP|i7cXrbrr-KQo6cL^KW4Wb$4?t1t*758=DWSpj|rdE zI8DWqZ*|Zyd;nw#Cp`G9?p1;*I4RqTPlc*xc!PC}52hidD0HFwYScgyby&Dfaip*GgaDsS;-YQ&Bp30TTR{ zd?H_qK_F7mOV;TO!nOegkx9^yDaDTd-e6`*AYs*F)HkvM!?}&50h=^vo5Cqekl(LQ0G zbdF*#aS#+oqT>eg8=W|-yApr8Jq(*MW4ms8m!~H+<*8HCf6ffp^`f0q*WEi?nO@>= zz6GNASKmtue+PNv2Y?6JX5>WFk9tK7IWdSS^y13QttHb)o~(=+tlgm>^WHvmk=!gT zY3J8p(RO5L^H~B~Jw^HDWaA}I`Q9sTR+69-GWr6gS>_1k#*Ds)ZR%%RA(z>%Q1YJk{CR0SVWthxS|I?`T>F=^2Qml0|PCcv41Yr(iVn@jWe2% zh?{eoqGe!eU|H>lEZdmEm`-g-Hd2kgcT!olPOuVGBY;vl;0(?8tB8im zna0=VR8*PbV52cjReZgh>yS2j$W=}yxR$;qWT7Met@~-39dxn5*xO(y2KI?{{QZOC zaf`mfbtE%T2Cyb5ZTvTXXdGi#fa(o_|Bfyy}F93Ob@9F0MpaW8TLSv?NhDH?#N(J z=Jm-L@%1_j$OnFVn9mT`)jFH+u_gE1L&d=ki2-2V_EMnp&3WpqMP>L zv9Qw*=dX)@;7ZAqX*64p9!16RNXj&(A^Fi+YzWp+9aqiLi63SZIrU`dnAaP#&AVOY zDqBJ_^*v)~R`O81jzR6gIW9cMmfr6iO*d0*e;_v@UZ73lrAU+_FmmBd~<3!f+lPEf-d49Gg;czesaHXqp_Jsia?ZMBJMSpY+%q zq{*EVPhW{KLoAE|-HUC(I@HhtTN*8rcF()+QzOTpEW{u74iQIKfQ5x3)yCHHXymUE zf)YM04J7%SPxOqaL-Ytu6CN${P#W<<*vhZz%S)vvT^+uh- z*D{Sr%yE3xRao&5fDi-hi9?S1&h7`~7s|Q9-!6VO8^gDz%LJ$He_p0Cimk(d&HT{> zkzS|3*((YxpF1Faf#+BPF|`bFApi+7g95!~00VuwC;(F_09%|;@0s#}7x04X@`j@8 z{oMJvFct0wK!vquW>n^qpU}|$QtzdD!+77JjV7te1(N7HaLWier3HN@mKOYw|2*Z< zyywm-;QJA=>J62j{aer`Mazmlt*fsk#ETd+_#Yo;pZo8c^vl*s^Z9l^e2rF22u;0z zj7F}B59=QPw@CGnPc_2tWP5WPl~dG`}d!7v~i)s89OrW9T0it#ZXu!U{VA@WB893ns;uUrQp5MnO8j-740djR-cKxTm75CSFvj>|D{QYu$M{H#m zh*HE7Wn?$Z%vn^1ah!$V?PYNVr^rV_u;EVsLV33uyF{hZ z;Pk_!CeI-7<5sup60McY?5MUFq8qD}n+Q$|5qaCd8SL zbD>Xbh+-x(nW1wQy6`x$?`n|>Otbc9HpERh(GW~F_c~yw0%vf~noQ(_R3eP}Yg;%KP#-+RM3?as>rxBe1nu-0tiC-n{(Y?vIb_uVTC;>$`t$ zsCsJ2q{8;s9@@8Bw39DK^={J1g83}@AzvU@$9y14hEGN=<6qv6?oXwDDVp&y>62A+U$x%HUg*nI;H<0r9V0&zpQc3K^c!FD&6#ZeP)(Ihzva;wwT1;v zTans=VCf$b4~RUh-J4zqhg{%ubEGYEJXN9-bhuTnYNKP16b$9{T%{Y0f}z7$&+f7O z$lu*NG3f!LKbDJ4Amp;cl0D3BQ_YBfT0prij*i%E#od!f#rg@RzdI$;UHn|wo21qVz(#M2ngu+RC?$* z7%c#erxxF$HK39^pN{0X^)~6n+c_Y$oS?YQfu^XODUlhFwWJ8R8$MUV zAlbK?z1W74{G*A~JrgM1l=>`Es!O89;*_dSDLWiU(j-?dSBpEPBz3LP+_^7BDR{~&qkF6+cpem7kmANozj)`u8q%cb?ZsC^cv7SbZ z+YH4@GcUB^e#D<3phTw78G#pC(|<6TEwgSs!~P>isDvo$o{i2FE1T%RrJwDgJz{O* zmlAfDp20p-JFAFEJ_Pzpbw*-}v!cx;QC&pX%^^}u)#VAul!^Uqh5JGItO-P&cd}-z6AQBYYtM8}06UlO>UnF=2~}xg zyTC%Fzf440WA$e-ae?fb)dOJM4$#EPlH`W?EtZY=ZqI}YX>C6Bv7)|VX$arZJN4v% zffGyxpVVVxn5zu}%SkdHk3qB-V6fLRUrJ;l?S^F_vT=f8yqhNa`Oot@($4G#byHUK zB`{Pw90dV*6=~_d09jy?oZbo55o06@txwOwbfTWu7yOD@$LQ+rehI)RG(S2QvcM;| z2C;(+zCBa*R0JHT+!P+-q0N%j>cnVq>6nMia4(E&U?&Z%AulTimWo!5LEb(kT!ywX zu0<>>b;_EadBw!S!W&dd!)TgEt-hQHHj(gP_IDy83`h7hsZL5e03&;RF@{q1SW`9v zS&M28Ryuzo30IZ6)Cd4KkQ<^zeG7_q9SYL_N(7^&BnGy`kaZhx*08Jkf)HusL*_zb zjjZMbP_&_ccAvKn@;5xDxOE|CdeqG<275@i+tQtf1FOA@B$ct_Yb|C ze3N4$t>Vv-lyC-=RoxVOCRQ6&xv~8{cmYdQ65PTx(=C*B5sc&zZd`kJ_Sip?*yMY! z8e|;|o$91p@PB|C)4jbjM^AQ~TzJS8I9u|dr!P}zCeZuQjaF-#lc|wfz24bfLT>l& zcfr#&V|Q;(*Jhd*w2kJGzdtn23Y!!=_*q2P0wd3q0Kup8oL15`K37)VP0m)ICgv4w z_OWErXSO)}i<2broXF!3t|D)$6T>#XCXKH{F*t^+M^FI4)>i$6xn>KSZS4f2?ZkuJ z%i`MIuE;+^5IIN;rcN8b+UZq3CL7D&oPX1fHRVxky*`mRvHX2FE0}LB?|jV_MtJ<3 z>ciu+c51Oa(x+E|T1;rd!nXrR8Ca$tO#QZTHt9!Vdr&+01$#wucYAiZ-|Bp_o~d2| zmo{^y>|}t=CXub}!400FPaH-U``}dwbFg?joHPE+taW0umTE>~=ZR7lJ7R+8kkFI> z95a(Or`rdLlxfFxk)1I3h@miMRP*K$JQA%jLmTA8%qitXcodp)JZ;`~_w!r{KhfZD zRo&&^X0)`hf#tHxb`f07=SX#T&(k?~TY{j`#zFv+^r=dQm|N|Nd0>D#MLGwR>4YH- zRX?~~65DA_wA?1FtQ;+ZvP?9G-+eS&Aj4Agsl3AlmHoIsxQ+1mEfg?o-9OJI>@_91 z`e==8TE&>jgYX80_fEXc|X%OmvvVk!Y~;FJ@Rd*m!obGk#pRN^K)n)_rHeK z@&o`CUM;a$9IbtFbZ&9`{9zOq$_Wu?BW*8b1r~`2*$UYz1f!iT&`u+Gw?s-f{?j?1 z``f0#-^p&QIPl@07UiHWQy&$`paVP=J&x=CR1K3w$nIC0yrP}0GWjCRaj?_J(>{gj z)Bk)3eJ@e?u)qJOhW;nyd+>vuoc&!7P`J1b-5%9WiKR4KAVC?x@~{uNxxasrNJqW=J6#DERf#rtn9?v((C!~zhJ-< zKKECe&>M1XGp&UCQC$n}TZPPO2Zr=+MVr_r&s!1`tj}wIWtI~l-fX}5^5@0@C{{U4 zZZ`Bjw!}`;A$oZP``IQdj#=Jj6k|FCnXl1drlJVJxm4WtVv1;gNW z7U1;vu?c0!d6DZSJnjVDkm!+zjJf7r2hsMS=aET|ErgTjc=uV1>nUhwL@oWtGG>}Y zk)^HS@kU0musAm(f|cV)Y#;<50#pl7+iB?Lh{g_e`y}d%p^K?hl5P_KzF+{BGFF%Z zhY{kxX#P6rec)aN(9>Asi*wc!Eipo3f}v~4kQjH)4#0nk!U8aiLpMTGs3TpNfHzp= z)H7oulbAsr{~W90?J)!4x05u4EN4~4HU_CuNiE3o-zRXf60^c_bs#o|B7BWshS%Ja zF%i0w8`@!S_F0mpDsGtq$eU=;;+Xl!dIvn1`#@ij`z_Pjssu7Wci5a zkcgZY2!++*3gA8d#9-YODM5lfB&1Rrmb-m`rJ3LvjRI=qP7rw{dv=vXS{kc7>Oa!&I*&|7P!hl?9ib@P(T-r~>db_xtZy(hUEu+l^iGzu5W<%7Dt z*sVC|Kjy4&^=8QfEWWlz^O~ygqZM4&N8hl9(D#6+o4p2%nsin` zeQp_GvKm2cl@&6#d6)C@)>y)yF;mkT_bFx7acH(u)4y|o8ZJjk;CH{z@u^1+YU*61 zi1A&xVrTHaOQC}Y)<^Ms@A)gx>#FoGBK{9Tkl)#3X%skG%n43epo16$APv;mjxHLZ zVgWWylu5PMWXp4JRCngEL*N?7JC)QfiDnUiBj8f&U#(@)<^z!Udy%H0rK#d|r z!{6H?_*?w&d%gw!t6NNI z$v7OcBlY~Cth8Az1J@!6vpLsA`xq1p!ZG+RE2&_D6Gt#?qEw|O;@YkNHwoTDn}7)C zQmW1AeknBP<7|hWiRtOT&in>3b)05mYWo| z1R)KNlDLWDUbTA|La%bg|C$j7VgNK*LBM|>_U$ADSuY=C%wXj% zu}5CnvX5jbR;i2%rSH-4LJ82(+BZ2j17>%eC^ksN@M2k8!*D5#F#{gO{~E zFLPZX0DnPegKpsf01kYpPYorx-@L<@DF8q8dWGqZXOA4-K^rDCDUj#tbU;e*#VRAD z8$2FeUiz0Q@3@#n7^K|I3XaEwxkJFaO%*(1FEA)~ra87ijkZbW@Ski(RXu}%-?VSY zGpY__qoUW~S~6m+GQVa>%n?qGh+I&VgsP^9D2%dr@otIZo#o+Cag(Sc{Pb;>rP_xs ze9Tc&U`t@yDfb&2nqG&s83D}!*fb%^*r0@sS8|NKikdAg-Keh}VQQUyl@6*5Ih%9~ zGnL_~N^gpXRyK#vNj@O?v3-T&z6MM$VjQn13;M1920jeZ67xV);LUaoSXcbO0dI_W-2Yetr^aLB>%D>X|Z6Wz3CTIk0-QpjLdUiz~S#sJ`T${y;f;DOLH zU|{?IB&aC_q%-BJf?K4m&%~hQpYFC(?1^Wg9G}Nj1!IbYO==-ZuxxfuawH^ZI?!ZX zFQATdcrY|>5%6{pR79e{!QBRSS$gLk$g^C4&g)3=ZG3ZYW{98e zXaaz5#E4wkZH|oyk^yg*@w!30^^(4;>#S%T zrnc0xyBvyw=Jd}=e;=P3*nNVEvaTw=Jant-p!@<)+=nCqw*p*lx9X`4#w2Ul+8ie^ zZ{^<>0YzNdCQ!GVc<$CYwfDaPj=b6HyemI{p)%_;2|hy)78*-qS`@fX>1m`u^JHUHwJFfZoLgxjwWGQMil;tm;{jB^E<`!LTgZoui#;(24xgB-^UxqXMZv zD(^`Tw^$-tC3KDoL`^BPL6o@a*27N$Yz%zp$@F*>B`AX-z`C*UF|2bb!7Ha*S^cDO z)vrPx1>o`qcC}4qDV$!ivJ;zA42T5Syu?qp<1J(o`vC5Nu{DsS7_DPoS&xEK2vEVg z$h4m#IJGQB^i3wA_DOX3GDG@IYq}~FTObtAvEHn(9tGHFM+WlM;AfpxFBx3u3@$*nDD$= zL?1g=Zh*vhkxFzGg7|fojH%lPt1t`U;-#gY>p4U0L$c&4eF21I62Tm{5|*Q;lxjvy zjQEO1snD~4Gq{6sO8G4yR6h2IZJY0tLoa;2p6)Dq28X&67ajo)ccbs;<73e&4zsSs z=r%b*z}v1#x51&fU(TxLIb3m$-}xQFWPhk^hiL2(c)_P50AuLqdH#s5aMNySPZDcU zb!h@X1CnLf;RIOUqyy6aejoo^4sr2Aa6p45##;?H3iZ}sZ%aGGwTzp zr{+Q&K-0P`rX#yMc?kV#z1Z2EaWIgP&W!`E0Xyd(Z zh3YUuGIVxjz*&uKldoKLf4OCt*LpX6@3vQL)xw-s&5}e!u4{75z$MT^6ygy61yU4n zw{@DQW@B2?tZ!6$|IGJ@fu(=u;AoNtTM2F7XB`MDcgGO1Q%45XA+&!;g>ZO15E7Hf z!C$-G@mt7pO9UY7zRrR-u#C5&SwaRT5~?4WRdOq;IJ)F=;iHUu2gb$4@V$y%+u;3< zGdc9IZp*u^XW?s}=`R@DA37yD;a|=Xs)JWLvqlfl;NRW9xq7ippSsAh_(E7~Ju zfzyHGO_Y*F;D8omhF3ecwCZ4K4AYGwAN(N(uU)^nPXYusIs$iXMtmi42iQst3Q#Ym zLqlW@@!~2e=-hxYXb~{dUwzjUC=8cVBG*b>nJ5!6l9bR@&4u(Ruz!4E8DM@MdWBcK zX@=BPkP*+xJb@OP?PQrg0%IVus4;LT7^5i$Kj>S<=4j*xcwVP()i|pr&Wf{KgaJZn zeNWs!mHew+KvENUO18$6n!bCm|r@}HG6rzZ0dk@G)EWziwlCgP>#o^G6` zO*7VGkNn%)gC=b?X3&(#qoBBzcLYW|ZUphb=F$&r!1{%K2qG)7`%-SV4`FJ8%@LzDAJ9zr>CAt> z^YAd^IIB?SsZ|AC;K9nGdE`ZU*|{N0K2XApwHzZuH9!hCe8qjudrIwn{BZWRKDnrI zDAuHr>$Vu@?X(;7r|Fa5(AC(*{o?4jetn#n#)c5q_6<>uf@djkW7b~?kp|2C&zz24 z4E2iGwE;SQGN?3%Nk<=MMLcbx$p43RL(eXyI zU^>BJ%aErkdsE3c8LXpeVLP zE|aE5Nw`>{-B9yQ;hM{_+win!SH(=KTa+X6Dod>%p;$KQui9-)4Kr;621^!%q|6lh zV3sbpF4%+lH5n!~z=+WTx$g1QhK3I3#q*tUSqI{nC|`D#lA@Dr$q!$gc`iuaiVX6jQU~WvOZKYEl0$1 z1gi)eFyJ49GigkGNKQ`YHtyXHUM!;tM5q|PI7s$e1v%1;a=4}=m&D!H7JLZYHI~3} zdkUeWj|btvgjuIlJbyceffTi02BsTT8H8!*i9xfW6XT0`qZXGO1ty~8bxcqM2^e)$ zb|groA0or)jF@z{aRVG7(ggU1)#=Z#pi(J30GS*GeQPns9{C=EpMc(Va30LsOsF_( zxNvh=tf&LjKB*4#QUkuCvjQb}F?!eB;9k_1-Jl(kzrP27^UpU!`$ji7(1tieF2%$E#=gAxy|{N?N*@nZwOK9_3_VE`Vpwj#nLvLahEmJMnY#b;(;r4`!- zz<9lv6(3h9Ao_l7$y}kLCHb0wAL1#s@pK+9hTqWKhw)6EpXJ`C&9z)S`Lp#G78^Go zcz`HEp~MlYutiWEd^7N)t(rH@<4OYWG0kz{?>uYk$mMPv@L>}Pz$i6+z;Aa|D5&gf zSeHM>f;vaN#aMb5I^6n!lquJaW;US80Ml2BH4Y;TYGbybMDGe5A=Yjf>rgdCn`D$9 z-W;lU>UB`Q94^t;my|diHTc8sg^0Nld#+Ev&C${cyY}M{gnA_z>9`y*Rd}?dVs<&q zl!rz6UhtUgnwXx~HD@(RVk)-X$0OLKEwI6spsLa|QBmE#K%G^o-DO*#%tdz+V8bt& zR}ibhFKM2Y{DX$6yJbF+U8e?{Ey0(<;4DifjkSWRm#+WoLd zJA2zA20i7(8JU749;&!aRsPj3!~r}WLzb@S$;uO1o>Uyue(w41=z0C=ad}+~luV9@ zQ#T#wXRo&BIS>W0sE}eiC!sU}C`sPpqmqM=T>_(1ULQIh;ZVd6Xd$hHEfZN3mrbVi z*{4DQ@6L_1m#lR7%cXkWk<rJMl#EhV=qwH0=&} zaj!SS(!IEA!M=yQXNEkEmZ#fm@M^mtL_{j@l(f_n#>Oz?>t~ObSFYav-pql*&Ig+% zst=bZJWl>^SesKu=@i3yq#$HEOh0FVg@VYg~1=9Na|E#<& zHSPX0kA2k{2vG^Hf}|a#li6>G3Wgi^1sH$U7xmR6K+w-)AWJPt(zLEV_1?s#m~tgv zu0gdHX!3KmiW1!XZ9lor=p?2UP0@vnG8glV%_oxvR4Q}W?}r7~ozQZS>CL7(i_~S7 zWJPyL>}8vP9joiNQK6{Fb(f-`k>*RRVv?9jAlJZ6R<#!SbW*1-A#4U;aI-byG)(PVE^G z?>9jK^b=yK01;9X4%qWS5xm<-VmDY;UJ+A(N@M0OQfbo|Q&|~;L`ma3CvHbUut=&d zdd(qB+#*!*y1N2xzPxf?3q!B&@`d0C5UX{-(Rqz())aQ(QCt}>MVsy6ZP$WCpiaM_ ziKdRWxPz7!QrbORI>UU@F->V!pbhZEu1{1qfkJrl$- z8cYkv>~r;Y_>MwAQwF_d<~c+x+2ESXGH7MYNoor4Uf40O0nVQRmm(5kT-V>8S=<3F zl1EXavLIizz%FIYpW^D!lR|+JqvA33k)T$><4L>yJP?E%l=<-5O3Z&1*VA!zmZ7E( zd&FGz=-&yi@&9DT{bpbYd?-8lrrxpwprs~>XP48{LlscYYc30N@04y=X2d`U%k z`J`>6Zstso`0kO%7N8MRjKN%jcK zDt)U{x@5vybQWd5@&5oL-qLC6DCybJZjslX69aZz>{Qj7Q zSDrYXs*OuzP2{C@rp(N{6u~6|IDk0#`=b@|;7JiNULxlEP6bY3&g=_2M^q^>5Fr9p zaqL9UO>C8bR16dICY>oC^MS)UZ6z(Vn+vI^G2Z76BJWgvz1~vjSR62-5rA%x(jS#o z`qrl1Axql0_{dSjgZfpY*QIjr=}4mu;TgwvO0{fSZD?Nt1vHSCA*P7}G~vlzRw3$< zz?(OXlkcg~o%Fi!RcY0TJ5$BVYNC#jE{ZWu5%PwlUw5^in(Fe!SG0@@@}fEJ-=6DzltB7w z_lv-KTz|hDJT-P+_DmIFJXgBl5Ojw*(9&i__?lXe`k;}2xzsl}GW zj?+X5Q%RE3G3iLU@Adxrm2JNSs4p%g!C5K)`f1RL3FOkTQn!KLXa+(_Rwfp6&3l{N zaG-y>2;Sk2)D&_8q<}`RamfT&1e`hLjog9Kyr7Uh40pRUm;deXHU~(Pl z$F+P>A`Pyf=|ls=^fBYFNk;vN!TJh%@0yV7JrPu#fcgcb0p*hB7MUqF6@WO{R&Tc8 zS;#VshnT2yZ@I_Y=Qpr|<#1BR77tJF47>XE^*Vue* z+ac<>*Ga%ld}`Zqx9CiOlKv5%b|N4+k+OK)V|wi70PA0=cTD@LB01V7v2`U5CwOI_ z>$3c^+9*f_0I#Xh`i7Y2DT)nMkfRcWN|t>RMbj>D5q7YDm{l$;faq(Up5VB-41x^M zvADRp008Y_8Q0&HTb^6%85s#~Q;}$w8*Z~aAdM#dy6X5LWDrSBYN1`tu-Tl)MBvxqSx!hD$D$fhcSUT_?rd%+x*Mt zK^isBTp61NjyZ`ct_^_1{Nj-#aU0QR+YbvR}4BeKDe0*4%ZX;I^00KxzQPZUa# zOi---Fb{qcuMk_vTqu{(K}-}ITgyP1sj z+9Vd)*op3Y`h8u3r3Y$_FD+3UREPNN(yzK{{Y(^9R{gN<$&#Bt9geyR*e+yA)NZ?Y zfb8CK_RGZ1%QN20K8f*~h&d2)Xe+}{u(^5%V6s9viv@pDYv;3RhA&-RZPb9)kA~#2 z5v0}FC)9X%vfscEtpeUW7^XLABBFZb=Xce#{73H_4u4EVbM4&5rU*}D>+{_d$QpEb zM2E07h-!Zw*YHP$P4ozf>3P7M%mVk7*JhOk(iBp=iGQU~G6>16r%?dmP4W^!L8^rS ze0{o{uzta)O$~z@<|b)2oG$pyqjggzIy%9c1!RLs_pyB_E%7?iOoXW5nA4gdd26n|ua4yXlAuIJrU2BL{*7l87KgKTIeEi--fugCc&w8Ln^?=X8~>51@xQM$$`HjV~d z3N|_^qxOOb;*t`8Ng>m}h{!{)mnOeCbvOIwac4UYoO{nK(}5G&T6uc{QR>%(vzW$J~> zWgA-g;QW0s$IoC^JaR?z6e(@22ZrwuRFiMCYhd18En-3c3{bKlsNInOOy3`G;{(4ci>p5a+AST#Zx?`3({Mzt_=fW$w)ArZU0LsY zu|8eh7@aH35bh7+o&GA8FQ)rP-P!i&=H)B!Fya%Sdar=$EyNnFEe>Ofe-5T z!e*i};KadSXCZtCT{aBm2I3HDoRYJQ!I0Uhu(!wZV<(@?MOEqoJefROQM^B;#N9Rg z=vw@00tSGd=Byl6nF-zg<^D}eWNU-xI<7b7R)9O^sY3(l2PusuiNv*YP0v7x`Z6&=Sv*eGG|X=@=+aWo@%K;i3&R8cf5iw70#jR z?X`})FESw~fhlwV>gs%25rDoJ0s02HLB=ns8ZI%TJ!!u-WhS>w+o_$Avc>mE5couv zb&NO4S(|cw*XfB)%D9egPM1`^!VcLo@yq220vpH~dZIY4ct8oq{Q)Ak{oSh(cLf!E zyOtPvQ?nYoPfWgM{|@Q`A1s4tbq+;54-sSj#z!xxZ#Mq{91L*l$j>J@>oB^*HI+pK*u;vv= zQZN>r&;#59R5NPM(w92F|LaoN`u~WD)!#&_u9$u4YiA3rHehIj_3%`!;pm%#?4~h4 z4eM6QM3tQStKU6035~`)$~c`n^|gqVc6WIGCj&P*B5HIRTkVssndX;&*Y3npyGwd6 zb}?C|%k&VdQqZ>^_P4S+a3&5|_`Lkt`S<`lJU;x`@{5}wLpMjgk`ni=wlcPsxyNVYxFS`ng`q^%Rpm1PKv1^BJN)|6?6oBtrE zMd~E3>ccrg!BVg}hhk2p2PNhlMJYWGsbpqgUu+$m@q|1F$w9(hcNv!7v^}?D)VLR6 z&1cGEwJRLySNrS>-?<`2^=WXQXSB;8op%lYit^iL%Q^Z9Z1vxPhoA{IJt2t4G^v?3siV>pRO46U=roK@)Z=GtMyIErgYizRPOGR1RoMQvHwxPYA zKP(H(r#7hqhRuhh<8J_EPo5wp_y0`Qb8I=HeR-R7;T)*u?wyi7 z%;|iuxEuFYJAY_nRyWE)S==;lT65&~k;sl8xk>rkKvqE;@~0PrIG({&oyrok%G&QU z8BON7xYCY68_}UNgT?MNtbPfQI+zeocF}?>ip3a`;_CH zY^|-tIkM_+XM!0_jrA5tFO!*m1swS3%RqNdnC6z(fK+{~OD@BiS5;Oi$%tSTvqhCQ zeQBe8ks;A;ntISK=w%j~GhHA2T*Hz?fXOBlXGsDpxEj$7JRm&fS?dcRDI&3tS4k}H z0JM@7Lvsmm@d)_&%TK+@#5az~i0!aTGJVzdtV)d>Q-{YAVPF|AZ_^X5!Fv=Ny2XZ+VXoKyc@P-QTV5TkzWz&MVSVIdV?}g>F$Km<>yF%5*1bZ8Rl#9)a{Rj63;o>mVFgL zx?#EH;#u^=fB!>Oy54iXpYIq zIxU^m={vyB+h@ji_PuE4n_w39V*E6m>#0P$|GM!Fr}>r0I#D0p8@!Z~+Lp@4a+A54 zH30;P>0njrTp;U>cdM|-y^a_N+J^CE)TNX-(HsTA(U{IHPb$o*uyij3KW5?~w@hdr zZK5qYGRN=`cz0f&6&tCUxi-`gl3iw`U{PUZrec(lMI0|v0-A?Y-on5 zx(_xo$;N{U>wST~7D$-lrL?Z=-eSKB@XxBm*x0>*S~u`~;4=Yn;()@PI~uCZe7C+(TRz!w zum_==hXhNIjNDj47e%akB%UVYXl5=^mqGllY*Tlj-Bu7$Q0<6ZmgPG`TUBz*B-Ez;`|Uubx9I-VsL ziAXP=7i|ELJ{V{E;FmUBM2g(`H(|*;`DR)YocG2s#=A~IP!HdZO!9FTOQRdpt!scP z`LbPs)uFXBFd~Wmn>`JA6cbYx~Mfoe_6nthP8_u>G7wGG-Dl&Igg=+D?RUHuJ}tDZ_`h( zCMLc+hXd2;VE{eY0y*KFTSsv_X8-3iXKzOaU$(-}ssqC}$bV)DwzU7NbzU0^S7@yo zx1sYoZIr|X*bfJoV80uS1{&kT>F{WaQK^tq_uGEl{cCTc@+RSST4@5%CZgpN$GcCi z3GNl@lf+PESZGiXqDo35;FE)DSTHP0=uoT&6fzF*z|YhZuvJf|nSLAPPG})U^Ys{u z%%(LIu+GATP!7*KvcUib=l6B-ddCKhEu0C&7PTmZOcYxCU^4dPgpjV}a>CqY*x;?i zU>If`aWLjvBK+49z*GP_&Wn^3rdc&uCt|5Vb4vvW4PdRrR6@$tfZj_)?WCTiuu~9| zH2XOe+Egs*C>6_RL7li_Zmdz0k8LWPUAV~`>;wrn#?{ogkUAUw;{ch#t7o#bQXzTn zT+r|m?YYI5QZ~lcz#%}@$a^iVU(Pi*`zdU9U;%dmxNeYRRvVzIV_i8Bx+$@g=X6;A zr5B3upo1e{-}@H*l41G`J_Q_jc0~>x!?KV&?4Ny2c3og)u%s%Eq%FKShj1;R1CDHk zBZ;g@|KL;N6dExPwnJjG)$i#Emk`tiC!&g=!LQ%@K;}L|2cmNf8+yEem{XUjJXox02?bfvE*)WjR zb_GMa(eWH|m%R0Y4We3b9yVnMKZr?g3+gK=GqN>s2tSV9gjgJq;u^1G{iN|_ zIa|=q9E-qBs~De~*a~fF>UNdN4Yr+*y^;vN9cXNZ&lI(d9?_+F4Gs#aXvWm-z|<0i zyGmnorh&Sr-WAs&afFT4k=3F3L};YcpGxd%k^#ibJ_q-E&z+q=8sF7qWe1eTKrw0c z7=~1*TW5V{5ua|?x+aGm#3^k%X$Qm+DmETA=k7>ej=QjkJ^l8y0t^l@4V6th^oA)aqTplLkr5(9P7;_y7T@Gl?hA=+jI@!mY$-@ zK?CG>dPxD_q1>6VG-Q)s=YT;ujoEN4`NH~1NE3V&vu?-PBR;)6xIf{fuE$%58@XW( z{?2|*{!QS?qZtiL!{TU;DF{7l2k&WvQJ)mD=))K@`doHfaF0N>3+n@;OOOe6fzPuF z9>7l#U5?Pq2EKcjlKC;M%6U%S8T|DKZGaIZ*DD^zK6q7PMHupAmar1eT{g?9+URq* z8Xhi#*$YmxK=SCqb2Ij-HDlCRvw}tV^KF})p5$vc!Ao{)x>jQ5<$`JjoPXi%g`{LP znxPQ8RBW`17SSJ^Ej854=SvSsx}a#fJfm}Y^@%{Hz4HJ573Y|lRPvi7nphxu4N zS(QCFjuRtfNoEf_N44KhkgmEh1cqwvFV8(JWZm#m9fwRK^_jS>(ivMW1E8j&mQ8=5 znIFeIFm8t8%4mc{kg!TzC@MA{(Exi+)$@w2o97K;AICx(QP?hJ!J!?|!$L-~m_%;rtuWzR{9T&C%Uhc?seYcV$&mERM~LhK@HQ zzi&=a8^8bh-m5o%$HP=<;^zgW{LWtNa#l;N^!qJ&;82*t$ykBsZ7Ceo-wPOye(wE5 z!dYMbeUEY;){!-1)B}jowrCvVL2;wuc0b#0iq^d{1X>SQy8AQE`Qng7G*p0*3)9FJ zkyFK5Jt+#Cl8kwH_cl4&(F)2)QyH%|!2`*a5;xyan`O6kb)3LtwpP+O?!0F{5#Azr zyL0rx@Zf?WR?Oh`5l#o5$Oc%+qnB>Vj(y&$hiaR`Oj|QuvVBH{NOlxQ-+{}V=cf%Z zK38d_a`E6V`c^sR5f)Gm8|m|{L2%vz^%(b!mbEi8fHeYqtcGs(YFQgD8$aSuE=IZC<#WLi@Z|nwluJNkuNm9(<8w1A4FaHC-{Bk&K!~WI>fER*p@D30oW*q-o;9p8&fLNhvj4?kv0Z19RxmDA^>2effy_=}kTM7^@fn zZ4D%1Ln+M7I2cSe72#teYdhqF%# zvmei(1uM6yTLyE*iBdsh&#ueU#jN=6-0QZ>(ClKUl2^NJ)|uX(q3LbA_Gf{v#sj@g ztV1t z(#9^sGr&0&tI=jwhBPYsF_l%Xs?@D0a|j=j9KQT9><%2;l(#p7GvLmjsFJzf1fH`8 zLjh|#$D*g|(ZVuH&E~jVTLVjf(TuU$5UsWql`Cn>bHuGkyRKt;>dfmzxbA+~|JmS< zwr6NjyMG7@aNHLAyWlWw-Z0Up7R6bR#93*}kbnY!6q@x(rhk(S{n3|_Y6SQ`KIB?- zBc&x6)>5}Ep3#S*($ulD+1}8nlME-#j3$X9z~_WZRuh!vp`d34*<6*9(K?X1Uc=Cq z!akJ=^u*2_9A%U#?%1adOHK{j8UAg$@fS%rv3HdPU|H>M$2`W;Y?t#pgGnO$q_|vn zwZ%RXPql5Xj6kf!iY5~7um*sI7iri8L_@6UnbztCi#a?Yb3s9NK=!6J1o|4|vH`Cq@DLOG`@mi22JCNr=JTOmwk99-cKG1CRF^;ntlzhDrJrIUeV0 z{I!x5z=at%O=Av*D`oEqct_w0eo8RHtqW5GqCg5AG)3L%W7|KN54q6W5!5i#gNa)! zjc(`}4>oA+J&VBD`y;Ixnf(abr?iC6-T5Gi25#J>Yg1y#S*N;`{`XMjxKoLG)5TJ&A{9*Dj~P}Eob5-#(5gMQG&s_)Z2M{m+Fim z%6e;mre|7Tw#W`tpf9hKr>Z;&X*G6kJVn@&RY%jCwUaN;K-p|Xu4$aWmKPDONw|{_ zFnn}MQy3E5Woy!TtrZ_?_N*R!1&74 zo~kMFH1{A#v>v2)9%88`KFQOpg<^^UFdGHR5^cnLYrz6h^Due&y4%ZC9R3jtf^vWs zjvOy@Jl7W4W27g2C2v>|MrK@59AK4UhqKq-!9iqK-cxUg{xyumVB=4@5f*Cem9+lh z8iD5u(-;^5@NaZC%pK`!a&?Nn%<<)F!VXc0n`=dC0>sz%<{Qp3%rfrg>&ICF&JLam z#aDvr-4VXh;UOShst2e3Gd_T}ackzrv`ga#9r#7eDv_f(k+Qi+REjA@Ge5}so zP`6&iT()-JnyCp)@8HPbc;s;Nic~zB-WTl!$NQ=I1lOc=RmQxFg8m-SrHd2CC^6iD zv)nJT00ApQf5K9}BH&V9KA#x@sQ$?4NSgamwx!Y3v6gC5F+9-3R@mpKv>*?-AKBo) z-Gq=33fqD-HU7bSBT?}kniNrRw*tVVxO_HL{^h$Kniod@)+_}67U zP30$*BsjU4WVa}`DNt{gM+yk$&sbvKoHVb`PpKv5<A*ZZqC`>Y-5a89&q9UY4ixV!qC*FU(QF7eP=9>%Jjjk0W;Lb62`OsnG|l(F!1ac z7f;&HJ6@D~QHDMD1q1k1)M~c!wb-fT*5mmH6-(bS2SkL?-RZ0&Q7OHzifGOY=q~bgyuqzu+=moeZsCItu_eyE6n4;gf?H5HwJ;kvpElx*^`F5O+lez+wWk+b0Tz`J=$!{etS zKxn(yU|a76-vqpeH$AJ%Ol+QK_y{&@Q)NgmV zO;ec1@$W-n_$|wlQXVjgkNU$Um*N=MAkpvO?l7p-ugo3ss}Qw)lHo)6azl=ewt2~V zUFrDt`Bb*<8!Q9QfW!Ch$Z+U!6Ux4do=4B`6MjSC)$v})%(4lfDD@@oDcrctE<=@b zdV9+Nc?|^6(h0OztRmApp6hip142FST@7V$ML-M4P4I-z;-diP)1w>3Bs=9V5m91V zY+|U1rzGo!0vFcDk`{;ZGr_X8yINrje6QhR=&s<0ire%_reAPxOS$u&~ zlG+shD(8o11(dy8N=^5i1|Z57$I#42DaRrPYL`9$4~_{)R3EpSFN+ID*bGOw0;w4d zFvbVb;nCHjRw1S4y>9zADWY<(YevZt#Q^nA-uUKx^Rk6so9eg$#cp0YTgWL_J>S4( zP9hjDd-bwV9$A+&H#J6=ytVPLCL#eV4a_%QeMeI+i2Ac5z&x(g0m>tVk+%Lmd*H?%CSH-4nWsCLV;VX8ut%?t}ktF=?qpY zZ9ZUGG$$Vv-a&$YmXi+ibCwHnHEIKgt{zF^9d8B%39yT7OOP;}_!=XkmK-PqOc{|! zRZTNv8xNI%l)@Bw)wwVzN!Vpj(FVuSJDHMew#e2E8lxzy)s0c~gH~FvWyX3j75LT8Q&b$qN^mq63}4AVC08p-bk+QJMM&6iv3k z2d)FKLA5C#df*u=bCL<2-p0QaZU1~L^T=*mhV>|*o<&w_rsZcV&|2c`s5h}z-W}L8 zyQh*RTgyN@iGRz3Z&VZ}_r%@)utk0JYl7b-FM6E<77^WOqs7o<6roHvo!^9sGAHEW z72cmXy8{G&x2u5Im#L^m&8BdMOlMM@@7xTqcz#+B&NPH54)D^ILm#ICN?CC=j(-q2(RB*yR@-=*LxxrAXY+cBpB&lBpC#GLoR_Wshwr z^3n=MPG}^8K0xMJ48;c56%>n!Ob?-zS)dHHLSfswA|zjT*R@bL$_GZk38|zUFq#my zeEI=CE&7}n@|p=tomq*@LjiUs^y?Zx3o#_EP?+RLW-*VGg7Wts{+j-2*L7xUgU5n) zV#F+P9OsTnFWU&lRr%8*d{)xbC>2)%tPB4|h5WW;faHww6Y)ft3rJ}zoc&&2rWoTQ* z!Y_JV?N1FFvy`Z<#%8b6ZU?X*)5Q7^N(>)U(UZ`ThEeq0nXT4nNG;Ziz!xCQNR5aMzf_=kPC-CXS6zzq>)O>_qDB?!&*+MX95=qCqVLS*75mkEQdbC3P%_NaVLS*&9 z+eR>vDh~wUV%xCeP@qVF=u<{_jZm34iWh7F`6n7}1Ig>W#sC=YE$QKm(}tTWSSkk`Y90p#O#vh7(U{h7u^ZAkS= zxIK2U##lD2*n*+YlA3q)K0Wyl>U=Z~I`Mw@Md>o?v;Lf7{cbbhI47JhaPon=M|RF7 z+)YWGzGGu&yXNV&BwgLR2g4t|W7ox`=tJ@z;5a;C{ynvPs2_5*bRg^EeRjcN(^ zR95l6I@P`X-F&kQ&GQZ=hvD3Gtiuok!_EdO7*u%q5N>R$_pCF&=I?$jBa3Ynlc4V3 zSL|Lk_Dxq!j@lRidl_3|1I4T&iZ`~QAB@jz&4(7Woqzt~^)Rp1Ad4DG19MZ_QR~lk zBtm=8hpwa#kY5!4Pa5HK)ro^`_we}QeJ;bfGlV+lm?chQ_NQ>lau&)K5xbzYM+c2g z9%=rzN-N&jlWXZDiY3zfSK7%jFI)~5?xCq)22O)JX)Gz=hjdtjGu#&2C-o#~>2Fi5 zB~-N#MV5=GxdkS~r|+^TL{+%emdpgbxgqSafK19C!;k~FgXv#Zd9vx*9mr9a25~dB zvif#=1S%2&DaQtXioMlg$A)1;`l4`_u4Y|7q<$tCo_MN1RvYclUgv(kNc63L-g>W9 zc3Z<*ZQ|bm{$iGETDZ1L z1k$VzY!QzJ36~9&g+-G#nnST`q)h~oek%?CxXHy@;1b15<{@e%k;7c zv$5XM>QpUr+w$5kmBrL<)?=FnWTc&lJZu4emBUzdGLm~N&?r{|CP`|JcWzt%2A!1t z&c`WkIk6ZPQy;w<8Xm9Rke*3R*YGrF!|d8?V)k0kefl6}{43Ftnjdc3V_O7tG#1{b za7J$kWuVjPm6N)|d2N@LdasnRRVtr*ChDH%*$c;H)Z@r#S@Z z9M4=xU2tv-Ni7zShFhh>%+&ghOSNG1SSZpcn9xZBDQF6T&B#p`aS*0$&HM$IFTSWo zL+O)gV^+sOBT3gAz-l-317`A9cuAi8glZ4U5!8DH&5q$N{m7!;i@3*Vc9Y|!Hnn4D zPMem;<~v$=@CC-VDps;vzZ=*lWhoC}HxOel>1Z!fwp9Dj1H8@`PN}Mh0X;Oq2KL9` zEy+;r#IF9gH4~5Z^5|^4c%f2E1;ccJsNR;ytFCHiB!@7f=jJhaRa!Pq(h#?2W>dfC zBh^|bW*d-YFzBq%?#)YO3%Z!7^(6-=q*?2nwy*e_=(u(chnM)DLS)pn9JLgw#au`gUA-FfeIS!v(b+uyn>iwa=Bf4_{@-cOB%}1lAJk!5$_pk}3KK$D^tOM5}HgpR0!I@vU z7A@NFe*5SLb5?QJ$GEse<>Le`EfgOLeop{i&#J{Wb|e$tye4dkPdD9Q3ezTK@KfZ8 z^hXCzZS^zV+PY=?6>gekYKNz%GAL5RHE#$AuD#9BM_foi+_$D+N~Ws-XTsmzNj~4e z8D#1FE#+$_a^wuasQQ^oKz%?D=sA*^bI@^WF-%TH6t`& z>cg$EExboeCrGaYjkvI8B(G3EJB==MbTu9ll1RJpx$&KFWHjt$EJ>Pn0UDe}-c6&T zdv;{d_U?_R#Lsk+y-Gp=Q+b4~1ZN@$T4Dt@nhRYbXDYvR?$hFhsd|nA;`ucRz&Vzc zp$ym0L0Jc7ompYc-ZZkylf{)R_}22pA|oCqh1kh~v~r~ar5nDVEkp$tvG-|Y^!)_4 z^2X;qimP6Gkg=d+Y&i^J{TBT$}Hd!9g@6e|9ecBLlZ z*zb`PfS|91Wj^Q}@ih~@_;6*8;AC;P%3@&)1`UhT+xJD4h9)MBsp&g@Tn#MdpuR!DSJx{=mLi5(?7u6T>;OOMx5 zVoY7&CQ`bzx;z(OJ<`%=$a)?9wR%e8P7j#ZybY^Y5M*1mcl#$5S0GKQ5plB zmH{)dCVI{>1QJKUdT0&tjFV3_5Cno44AO@t1=<;+d?Rzv9|oekH^*e1(Zw8wrUMV- z$6f?+>>LQ#-?WRT>Jnl@a%0l>y1Y*r6T%PqT@IuaJU8t`Eo$98b6MA9;Lc>FRa0bQ z`RsRYW4s52M>q)2CJKQv)))kzcUQ`O^*p7-HX0?UawYR4QC?mMCZ=wnh$#x`Tx0|3 zIwc_WhFyt@&`a%buyCie&S@kwY>HpbJW#5x9ykfG5Bp8_OtqT>&mx_(^Ipfrk-=6R z(s#cR;oR0blE)}V<3#uBl`MrvsgPyHi0I$;$&j<;HqM0G6WDLps;72NLp}6L8_fWM z8rRP>t1B5(7;X<&2m_XTkLY}A9QZ(dKzydSDM3f53M2JKI1C>%OxglnKI(ZjJJ^Il zzF-M>BvkkqY9@-ugM5&r5AJ7ax#os#6se&igXH~cBz7Nnfc7WdpzJ=Y5<4}W+Km+i zp|*5{XC4yhmKf{ zjnXjV%_CpDbLk-ddWxA`S-2^U84qG*gFc^X+OlPOaYylntytL-J~rX$)=@*pQ=Z5L zVazS^6%3*c4tf2|hPSFWuo)=-IaLT2`;lQ}EBV^pw($L$8U8u5gE)p8y1fjAkRJmu z8vX)ew%>@7s!hkA0=6);D@cZjS_DV%3?`^a8d}g{+*0kB10oGT$Jgf#&jy?f4`50_ zc@{Cy-VNE4bLhf&!%(R1Wku7y437_Q=Mdfq%)E^c^Yd*Ft7etGKJb6~{+yNL)`=i^ zFvdlV8%M@|f>L-_<_2Ow-&}SZ=hpxtU_JHbgkh1;P#l8oJ8ahj&c4m$2xCucuC$py zUr2pLpDG`49kScbhF3bGV!^__`1C^E-do~CYLo{0F{+cb4$YU|^Alq~DjC)^a@=Oq zErPr*%bC$c4)OdHev&e>JW$%vWXvoym1iO1`~_h@6e+k&&wk_~&fCk{g+BrGI)F~% z_aldhKD`XzPNp|>!wje!@a3qVV?XJLuRne3MvPmwx4PwT0MP$!s^=LBH)u5iC)=$z z{bvuG{luIUnO;JGSaC^}CXtY$r+{{|hL3v`Sa2Kb`Tj{I#Rk=KI3Y^bYPjLty9gQ?((&usbNMeu%Ze|j9A301cb zUO@`W&`TYC+rc6i%9c@Z6LH_zoe#NIQfP^@Gg~7BM(*PEBmA9_5o6v4mGJ`%QGpiq zixb6*uK^rUU29ZLYo{syI3TZKc$W+4i~fz2+Zu~CkfA^*Qg@m2h^7dA z_yTQ(HFdXeeRoi9!~?q_=E5hg!+bRXHxh!of@V8c-41&E+I9lNCbwW&d&d&aEU5UsgERLV7$M*^6l_5mwwcwU-Og}W5@0Y zyNiWLwOEfQ!frET3C^75(PV>UgtJ^V$|IkBalR}RM4zk>Q*Z`P(&c!Nq{sdxdP_t9 z9e$3V@nOad4UQMuBA@)U`N#jF0%kbk$$(!R&^jy-hfI#|QauzO|JvFZ7FP@FC`00= zBlr6I{G0;)_Fx`j<)b@YfxqkDxp9PD!4$ULvLA}=(4#`LE+jeAi+i0+ z6W2*?=)EXJ+KgUv4-nBt1KLlQoKK7|gcR<0*G@YO{k9ItQx`yZtWS@Eh9eHQu3BBV z4R>EYzB*ru19uT_KVUu_xo{<$2LZo)t*#*c;58pWoo(Bla5+wm!_Zs=u3 zGs~28qUp#eCi^uG-4$?Rc_iLfHa{XK?Mh1c7N|~lN0(cUnEESVGc%cjwK`_ryNr8QR#DY&#`%jRaJ#;*J~1 zgd8FCkLI;)y*&MPm*FdSI#0P#+94aJ{QrCr-G8M;H2;@>3D^Rgz=`(Pq_W5W1e=$n z!cm>eQu=>-VOxhviHwSSj@^%6c#7F{fl4*yV-TjMiYsG@9Bzm^E*x7F3vku7 z-FadJ&N*YsZB z6#KIx8P}?F(B{=q<=|+Y;&`D{BNL~}ia-cO#ZAww-@OI9SBLY^*1d8q?2Ynx3)o5e9mia?bnY7~C}H+oMZOD0tnVXU*RuZ?J|I z6f-e56$6_A6AURs@qrQ%q%pk2mMJ4J$0s|*{S>@uvPuG$2g#_>LE3LJ%r9Gtd_m&k zUm4=aP#1C#1U!I=9DM%s@6(hh=i)%xLvTU#m4sUM5gpO0m4u{6$L>&7-xy6yZroA$ zy|aH(BS?-u^AYl+)~~qH5NJ`Omi>91$HJHgj*I?~Au~2Sxv|CM{Q*MHL5B~b9h7Z3 zqZA!nX4@iCM$b>~t^Jf`l9wLrDe!KvC7$ynjhK-SSHwt3|M_Ko6 z_RDxtmSv&ItM1+2!Rp4%t09|P#gNrk|2=lTk%s5nF2C{J(gq!QdHnvILyZB9#!CFo z>rih>gWEo6K4>MUNU1Zi7eO(+kEh0HWQR!2pR>-dv#3cOEF1m;vD zxqjz}bx^>!Xn|uD<~%XSH?PD~lp>`EbJfk14c2lvr-WPPMx|~$!LY;ihdb%>g`TRp zFTnx(;UKoDaX>6|(}ddTGf)sn+olr91q9a(yS3gh4_h{;%pp86)K-6$fl z#DOu(35nR1Mi;Y7)<1xIbkbmx{7?V_{i@yKk`*8hDyFM@nY*29MO2M7r|pXHFPJ7t zAE({I4z9V6TTHeJ77vmrH>YBOb#$4v5#jm|XQiN0Ma2Vl9u_EayrIDidss^lBSbyl zng?4VX{$3Vo(5!*Uh6bZsNVp8&EcwDVn} zdRhOPZ8YJE0Bb(SEG!zDCNwYL{MnV%CoDj%O6&FSff^u~)(p6Pu+Gl{vW!0u$#j{@ z!5#CLIH-};s8ifk5k`|tvfauAYD3Gz8?Na3j*-PjGk*>=u_vJ5%so}wYeK{)YOZ<{ zuX^Le@bE8tc-vAERRh&g8jtFz#`OwhFgBDl#b0CL<64le!w$7SKpgZaOhOL%P-g*a zB3d>on_Wsx)%T@)HjEYOs_#c_N4^n)2ZI)zx=_|E;D)~VRJlGwjfVI~EskPQUhXxcJczgL^pya9ubh@3AD;lvC>ag_3cI!vliP*6uqB5A&hn2Xt*X0V@EaZmmZYc zz!W}df-O4T|Co;^bA$>U`{ol|0y6+mDKtkkM80jYcT5P{dkqVk5P4q@P}}M8@t_gN z#+&mi+D3aQcX3tr5w95hC~v4@JVnlZYEiP89%T0fFaxo~(mz;diz_0Tok1pzmvRPi z)Kq5WAttbm?Ap`#qiAPRMWUEmR~sEOuyg{-lLQMblNYq`JK6&&=gN2shIN4X;{oRa z;H0%KdV&*0j$F_p1k}@j*|{O|#WqYkUObD$9^LXZg}3(!j?kM1*nRmp)ig3B=9KYO ze4cTCQ936sXR9>NEZpy_K6n0z!usU_e!TdxV$0&>`H4IcT*1oxop-Jmc*q-lUBbsb z{Ujrs@hkzdYfys=MNQlf86JSZ$nW<;0r(w9Ug*7n@K$>h>n*-?vk6M-shLP1NwIq{ zN(wTm*Bs^+i15I2uX-UBs%&J0q{f(tv=HnXKVb_a%eD$E*_;{}cJvZtQA)x}H{s9- z?HsOTv_G$j-NKdj2lR$TM;o(jyZydT?KZZ_IpdeP;8w-`D7j%QHe0F@qYMryKjmK+a?kf;?<1PwmwFPB#=2GZ>Qit)~ zAKNzq&J^b5&8&X_u+tWI&>~JZ^{Tl|K6Peepw~QY)N{y$H96E*LKdrM-DiJIc<>Y% zXCWO_pSwgYjak&X(VLR>KQ2-R{e2fsy+)j&eskhe{Ch%SE@)6Yq|TT@Vt&IZk?+qo z=1z5W&_A$Oa(cB=66|-Q>fV`<)osm?bLSt&!)016O2-F4@t~k8dbGsnr)SksXNvZz zrKn6@k}8A!>phFPQ^B%_m&$?%kxRARh1yud}p5B*Dt?3Cz)NA;BNRpsvhKat1Z5X3Q^ zN=>10rE)s}i8GqViWL4Q^_CPp638Gk11Y-&bce9=O2pwo-l?C8!fT1bKgG2+wa^@V zi`Y!cR{S!AW(PWMPV%1%fm$I4oiDJv6|^Ulh@v8K{g;AtVS1|18<_Yq>HlkDFf zm@=5+alxMYBk%gbQ(#is1|s^t*1bw7&hW};mZc`Z`oI&LnsiqW{8#KmJrUvppRD`VEOS?%p@Im<0cxI<3c42x;N;Dn z9pF_l?KY7%)6~=RW5?$G10*`%{lndW-`Z-}D9GIALV!X9wHgA@c-mS^awYSi(_TrKzob&?JOi@IJG>=TJOi|#+*%@4|Q1iAPImIG1XEy_CS%z~M)u!Yzb_mJgD?GkJYN2LX0D3rONG)Zsd43F2sbYRhYw#>_0kj6>%~ldRSH-^Vy~ zxln913AAp%nc0_&O)I%^)=1o5ARmXniU;e(0y6P0djg$;0C;jk{VyoBm=7gp0UGoL zRsA8>*W60ajqs>)$v;tqp>dXer=IS5JU}+}j*=urf0ug?xwl0@vTXWbU)o-K5NrS3 zC|(F)oL)?nUMC_kz7qi&G2e_=;xohz(r)=(PHh1QPyE?u6@Ls8RvH&7IPY69?^_gc za`D+?bHdN309Q9D*SWG^iY6P?fzt7tgJ^n|Fqi1DSbrZsnis0mrC-=t{}FieUTw-# zl;Y!m&i(s0?B7bquwhrW9qFZ$gBX>_w4xtit5+3%MKuugv&Hyu@EB9nhq7_uh@j2j z&4d5tql!Ndi|s--EANkObD5^>OE;&=L^#5uW<h^KdgrQ*lz+vo0Vg+7~` zhNEB8T0xsvz|>Vb!t|Wkz9pHVUIHo%w?hRG|vkn;=22FJCRIkdAp?Z<=miW1Hv&Lgf+pu?2d@M+|rUD$0aYDil+c zJ8t*J@PNb&E$p>BZzKmkTSt+{(3{7Er0Y=UWPiPeptq`@J5{OeZ7J^NdH3c>ulfP` zzelF(rYv(h1rsQ0%eJ z)^i5w;|DLxof|2B`L6c%_Q|33k=fHYRA&4!wUGwz=z*UH0K;_!&A6_=RvE>P>dpN< zPvX=FDIU`V^`wOV-+#O+p)6Fp3jDFs+!hBDsVaqOy7JLe%7QGdN>i!Q2DO5V_GnHPH_(r51rL63OA`q9O2?E^*zA(AkpgPM zu`vd2cJ9(PfPq=vnHiSl@3oQI7I|)6rtRzAuQx)${-1C8@%UQ&Y&BN%!Bbb4iSpH( z55n*Fc8}a267*m)tk|N7#-ybdxG@%8k*UZ*{Ys2E3y{^2NAMdjOs%%_kz@I!An7EJ z;6tGBvNU2Bdo|+bq(!+hCw}NY-0IAXqVhnTUfR%h00o{_As4+P!T}E41hk3{+Nt!M zN^xL;JHrzkDac7&iSr2W^emAw>7%+aNmkGdb&Tv7YYxY}VPRpS5 zS;=1}fF;NH1QSfAYx9A1281{pPwTJkNWN=GBUWe;9ihBw634w9BSc)**-I4a2pE!y zrTx2R;}lk~_<0}|?bVn?^OVKPixzHHUWOtdNU!~z^Q^aNz)y$Xzc_*!tz26g& z0Q!joGfz#$$l&T&c5(%^+kJ3}*dX`Wj0s2)Sv_N75rxsmJ5XU=z{xcAZ4-K;-AiX4?}|}1J9h1x7lwvAc<4u{2(Mwo0MR73 zBry9|UJ@)tP|s63{ONo}q{8O%mMijW_2%&i0jAT()_mQ`_QRXUR*!g@(hrlNW?Y!9 z1@+3#9N2+#OvRo%%dUj6mD2Z(+0@_HOM(@hHq1`utkX)|)Y1>?%uw)@!bNYb`!-|0 zgrXBM*Mx%{lqM%;F@-mwt?8);0iaRZvGtTqN*48!AI5#3kjfaQmm3w5y|$TCTsrj? z&L>Fb6^~v*A^!@0Mfl(OK2ds@QD(@Y${8|TsEc~6 zHlp-bR+KV@`Sj|BJktGIzL5FzLrL(^KjqJJj(4T~Ac}_vgKVBx$3E_XwwS-y)ZO6SFlu~uJ!s$6?W4oH=4$(@ zW<5iEX~GGbnqr$5OqZzS2Y%SrPRv$Nt-MJPtMDpRPr7xu=8?G6Gm0Se7x4Sf%g26I zU7ex6yANl(AEQ=(pW4A2;Hx~?uBeY-?Ylt1$790@%YA)v;9v4!3al5*&JBcf*cmC> ziqWoicz8Vi50gJvaQYlGX;>I^kI_oHg0$O(>mI>Win#Ejvmb2e)L4g=i-oNoFLb(o zsyKVsmHpJG-STd5wBtJa#M;*si)+iUyu#-G_(4Y=^OneSRW*+aVEgjRX_~99uI_p- z_=jI=f=X}k>0Gyi|G|pePtcvjs~ufMydK=x zM|Y0{SO5?<~2%cI@L@E_m)wWlV zSG{jZp^o78?Rp>b+t*TUyHBSBIpi9 zS?S=0dDvBpOnU}n|2X`^m1n zugQS$gR^iS?fx`8j>k__PZtvy?}M_7N*9m-=jn`B+x%9AbR+iKB9>JmK?PE|y*oe8 zv<#S_s4_@wVeewOVgb#1!tw$UAMH`2wW!nq_T+Jg(j#He3!)C%VuqGW49P&77O69d z|Kb_FhvSrJK(;D@ow616|gx z*RtPrURrB{sf;O874DIFsgtL@Tg6*?7imS1E!rr%HvpOph-n0BjydHY#b2xDK(y*c)CSbN|PqC7I0 zj3AiW)q5W{);FuEgTqWSS91xdctDoE@cs##$8?-wA*ijWRz8l9Po&jLt&L#cC8+Y0 zozx_$#ptSOIaaCdfG22{lB#^q0|dhT>63 zkz7Y%l}T~c%G|6i&-;t~wnt)IR#A50WmikFca<#CgO4+mlX9#T&HASDe_pnf2tpnl ztS}Ohr>z21BBg{7dWF&hF*Mk}^8~U)EfW?2s%@%PFVx>}hp`vDQQDV*N$VqA9BRJp z8{c)uXhJ`f=*q6OT&^Wl5XGLOn(R@e@GEnsb{rH5s$Uk7Pb~54%w}+`7JxoCAyNzI z&6{Z8jvD?S0INV$ztH>+%G&UJy25;4g<3BPM@_mVaDlQElz;Pm<60RBb&Ap#1Gzekk51YFl$N)oSr=CiT{`x~O>LZT8d139j;%lX`8Sn1nWyx{#(& zLU+ztmbyj9PfpML;o$7lKXH90INaK)ub5_YO0W|Q?573YZSpS;>hA*U_A#Oh^$3?s ztvcIT=i8ihFMr@cOC1XU=d;HuME!&Fnt3-HvQz zqbU}QBvRa+{`b8IOpSvSLrHe?l3>vN(9yZ)3W2w=Hy3ytqg9B7SXj*RBw-@rb0@o9 zeHdHF#}eBc9wo6&{e?A|&fM{EHgzYqJFvW$bynN8<+=SqD~G?-S$(y(`oE-jart5!?bLSA!Bbr7U*q z9eNiU9O|d0TwQkL>I>nFgdI6sQd|r5gP1iUg4x@p<(+95@qpR8w13;OIKcWvC|~Pl zfpmbahnjUX1shu4huTV{mN#tW?o)HCD?<_*ntwmwt~LohdS-tkO?tV%HXg^i34{I( zXKe&v&n3Ezq zXn{0h%5`~JTOI%z8ZfQ+749kQQ$d+R!LM$O;2Kxpz zvzO3?6M2F14tjyI=7&ChM1u#7PMLNf?(H|4GSL z^gc@Xij>G!q0Iys&&M-Q4#^uP*FGZjBX4iEs>xD86#k@Ul4A?u}`5&>?Ia(6Eui!>3=s>1$l`C zmxtZJE*;q-z%db}oD`L*WIZp)8dcYu2hkneR4QLcYBLotq`0CmlBCk|{@8~^?^VdX z#(>6N!rvtH<{|ZJ@hr%h18Ub}{?8K2Rq$ERZ{z~~niuouHBl``cbcBMb_?Xr$0>8ex+ovEg{?4rlIcGSc=! zqtT`2ZN_M%ed9Nsy)|pPnX5s6)Rn8J$jS^@)3?(T-}(-MXpcZt(tjP!|5>>*19sv` zQ?+0^SxO9rT@ea9B<3dmE8^#8Z0HPX9T!gmIge zpgWy28ktVT2L0yb(4B~Ns+col(kGr>SN%PH(%FY9L)OikQ|9mq(sqq$Pg^-`F5fNA ziT_*q&Q4$dpZc9;)_+{U$B!)Fx^I5ZQNFWpVa@eh|1GS^r|~0)ofOT#00030|I}D* zZ`(Ey{+?e!s6dedmMoHbO%nmbuy4gMpu@hjMM2B7MTC|#QBIr|`R_YXwi~CS5@{;h z2!SKZy62A1!@DD?l$F$XCBK|2wkea#<`booDRpI)R&vqx{D0O;Q1|<2$!cb=t72Q) zVtHlJAi^}pap>FFx00`2Rth4xSQ-4!hWOtIE1_gmWcS0ZsFl1iM0eKpd|je#uDIlB z%@&ediQv1}>-ufyQ1aADe%lqf^N!u|N_D+bSc%hVY9&h!j)spYu#$5FqR7i_J%vVq zbOuUh?lb+bIv;EQfF};NmC$zf#zL$T%+ACTdeD_s zS6kkMD2^t25QolR*+nkztfbS;VPNh2WBZ#7AFahYXZ5z?5N*YBC0Pv-=g=09qdV7a zYlhm6GlSUH6n*@NK74}epW!{R4~WU?x>OhQ`C8Q3cHyLd#b(Z>*on%{cXI~)c)pOu zVt(T~frIB#EZT=sH))6OUcJL%92_YXX_4EkB@IN#5#6=Z48GS2F|)S&=d+?|^JbJ;ie!Q|spJoI7g=-4@jnUQIJr zfifW54DmX3-k?=gY#R6?Ws9w3)t!SnBRVW!9)#3?K04bE8X{8}D|xs1SMA`l5?}c2 z8;AP){Q*>ma+`@XgOuP%FXzsV?<#aFq(lq&p<=grOJ7DF*A2Ub5pIB<%GTki ztcyb{tf4lTA+q8I9_m`LPt0MpV4u(@v$f{C+DiU9xJ2)@%fozygeCB~mC2xzvDPFr1-y|CC~4VMZ1AFxi}TynO0n-uf1FZ( z>|H%lsOfnpv~1Wlhx~5~>37BYf9#QZwk4(=130`Iw8Kc7Mf$+)S3O`!@8Xz{fRX^) z*s%TTH8QQUdX4DtmGtnPxM5!~9LSpr=$Q+D=2N1Eu9u&=6n)^t>rrE}vD05p)_4q2 z&)hbiCdhCj{6o)n4sE3K;Nm4#E`Lnr3VQa-&*B)mw~^yfi5T|mSzX5qNUSfPP#lnG zn$*9u8vq%u}vYPF{$5#?w7bL=d+W(}+YQ3@6Nx zn0RiJdpL}|h`19X6Y7PHc7cl{@DUnXa6cz6L_KUe{cNeA@TM-E5hS4NdK1=8j}X6^ z86I_gjJ1?)R)ZaNf>fn^foh-^KqE)g4;tjicQJwbJc^T-BA;`MhCf4#r+3UuuKX6+ThJPxxk&+`o2CK!H>dqx#C)W248+xon^@&cTPVM4+L z`qHUv@V5Ole!MtKVpFh1euZ|`^G^x|6MqcrL7>Ya&i3K^t7$*tI zrUN;R^E9yk!Ysi-I3ayO7RJ^w2~&8AqY?Z?KMsHKe@Xt5B(ze_gCr(-j?)QI4vF)e z$0QCil1}J43xgOOrz8w;8V@CX29zI#zvOiNWwC`KkCHf#!YS*IBoTJ>f%7;?vkCc& zN%|i|cLtQB{7u5s$3Ax zRq^Kfe{vF1uXr}#l{f9yOd%NXb;DG{#L;=3&F>=APkYO};>iH~XUu-x&HrGHmTFYc zr^37%ngtrdY)!(;gXKdj4fl$1?k=yCFgTG!G|JtGB=CtO1{#qw?gm$@;}o9u7Y?M? z`5WVrobfou=b6Sa3*7b+Ca}2#>4Y0skf)h#fA4S{gaJu|c*>275ZBY-2q!tgqd>X1 z@fb_Mna(foP$C(eX3}(^2_Az|9!7yZ9^o{LpjDgDb4a4p!k+{lVj2x5t|tXJJI50Wpn|DRlL=KaRjPN#Y3$02*)CZjvZT2?>S{)n`EYZUm$!3BXyN ze{~%$oMEFWPnfVYU|U;3mf|c+2R6@9M;{U=)^iranL8WNI$KOmY(^tDk5Pk|{fQd|GSswx3j&53fBK*Kj>m#B@kW(Re-69o7V7+2|N6^We^JVc z!Jk&KeRixgTsF>i|F~D2{ao?e0|9=7<^HAWd2g$@v-39|qKZ?$=2gX-E6=DcvuW}N6E~S2T;g{S?%FTiazT?Vob_=?b&0;0wYHQRs+5HUCqlHRF!}dq_x#-y%jUyAFY(6 zRY|o0?#_FmrIH#JD(03SRMxby*YyY-Nzkl96$hzl3;cZchOOyV7$-K}C)!QIxGVAl zFcy}W0mXrmm|_jo#jxo90k@7(1u+i@J9LJI=p2ntx5!`x1_OUxZ`(E$efO`pV2z>$ zY&lf?k)*bO0NY!U4OoE|D7t{QD2oUsu_Bcy4f5Yd%2u0XqEazR(CwvlY~FJZ&n2%8 z#Y{?&f+$8fge;+{WLYX9i$KK`SSkv2vYd!uDFT4ua1g!u^cGeCUvu?Ye7S)lIln3O zIQVo6<=;2r_I7_T9PR@?#x$oe6hmMVw(OFou)eNvFX;7EmWM#{ZJ~22SZ=bA!iEzm zSBRyYr4$7xX_hC;iTNq;;8a#nM2I{io0!T0OCy#tbv**6U`;n!LM70z1-m5NfT|1< zQi8}bjkopotWKpY%)pQ+Vu)=r4YMtB8k%Vk*u*4FDVKlah~-iNXBQMAO@yc&LhJ(z zS7^(#HR0!6oE!X!KNch>2~{*7%a{l%Q9T_=H%f~-jwuN>4-^almIUIQMl1B{=W*if zQh%wZa#2BO5LW+`v+DbCg0!TJjAIstG+iN;7h1>VgjG2wr20>nJFNp1Efos>tW&~Y zRm$bWj0t}R%Sr6(e+wQgISU$VRIQMGjbIEnYoa#sxn$qy2qYeX=Xv_W*$5&^lonau zE++G z1|uh6vkHOgrC*O2{MfJ_{6-^Ea3u{)W9Eo(N%wzAcQvqbytD5#y=nvNow3$3LR6$x zE$%1uf1;?gB#Gee`{G~m&sWyp_o*J#Rpj)(U3Z=nw@rO+g7m!C^C6Flze# z3I&ZtF+hG_Ko&;r^SkXMyObzU*MeHGPP$;V5}sNLVhEf>y$NY4Njou%lI?5qZC~>5 z+UwKhH}l7Rk*aM^AH;SgHulM-`!wpTQw@JQgUwLz%F@c(qs~h2gMPARxl78|T)neR z0h;^0emXlW75y1t0e(!{LF042Inc>44i-!*PE)Opjc2z`gG!E1Xl4+}eMe zWFz2z$Uxit>&rQ4eCT4KvCXpeW;A|HGv56{h3y{l7j@%!_9Av1&z+cc_v2=T_h!iN zypwr)8O+h&yl42o8FSkS$mt1y=T6q#gt>cj6YXZsU6ys5HMTU~Rgx%@3o!_ml)ro{}vzdR6_R6=JNoRZN`;Q>HU>-h->4G|KJytEIz8*Jo zf^8|0vj9$~3zuB8XA#@_(!d-%vFH2q+1z(f+xGh4i3+@!+O~H6G_|e$*QkB$VHc&_ zj$%)keJiF@`{}zu_ICiSoWPwEXniYYZg_U=c6U=1^!JbZPj!ZnD#AFrd)t4NkaB-r zWsb3xaMkTn(-3SfqIUO7CS}Mj?*mu&Vl=M{@bF^nPO1lS)r}Y^rZ`=p_xcZWL#JXU zzhB<{SSait00030|J+$^Z=6OD{(iq=grz8v3>eNnzXT&iYWpd2E2Zj}B(nBkIaXd? z#4a{&l>gpYINY<%;nNq-jBtx^9A(%iYp|I4Owr(J5M35^fEBqZJQqdM~O(YtSJcfj}{jsyi3EVQoG(HGXXBdA^Oe6v9IjmH|Hn=uM zr$8V|NuE=05PSbT0hXf)TiOHz@HN?O?>B_$Pi&9LRwPAQ2y zI(HgxZvs$ST;oHv+qPYEuVtRzh1;!kuTvy#q!s?wyM#}z%f44|v-tSG=3&5D7^tqi zYw^5o%do9f;HrODA=vP(t-+;Wf6{#r-aa^vgC8#T`;v(7fvd1w1*#@g}F^3QB zUyiz8J5LiBJG-K^yd;gHxu>m*R}Dh-61zOEBkV}E2ySpDfg%(<)rmj}Hu-kNVyD63 zCz_C&EZQg9;dyGOYyT3S(tndS#3C_p%Oc+{%_|Bl|qjPI$6&@dI zX4#vns7qTF?09u>dT_d2ogCSo6~5I)bb6p3t!-1U+-3a3`AJZu)<-c`D$kF53jNbq;139|(1%gBlM_ zQ$4MJGY-6i5fV=3Bf;|wSWv;e5)Ix64c_65JRFlwL^Vj9hxxhLX$SI+k6 z-??-4KM|>4PrB&3AAVZs?&Dto00960>{)AX+cp&au3tf@LDvGYWci)c5io2WFraIT zuGk7RT|rxvMT8<%l8T)M`R}{b!;e^UBGny+VMS2L6z}D^=N?{Cq;E|XnQJOInlpc_ z)P)@ojVZGc4K8e!d`^U5ZqN2D3&FpsN-*Df<=Ot!HLY2XX+W=c9d#mt10umeW{*)M zNqAvD*|@&fpHY5gBTY3U7xpXofwVw6q6&p6&yW_rz#A=XCC%(Qv5l~#xD(MJApFAC zG9?BeU7KV96jn-h1ZOw4UpY(}R%UML6l|xofUY+&EYaZ!b@D9$ILXt4>Pgt_1^bbmw=y#FIIaw3A%jADt1f-)n z5Ri%tt>ht6YgpYHX`9QO%uFpV$R*d!eG!Ve(vtFp5#BWMe<^Z;8KQB*NKClKnSlrq zMhn7;gm(dj7b@?Uh$WE-U&D(9u}nU|9m+U!rf%Yc*SAF@BpoLSqa?^hKoFqSblagB z$Gpf=iZnC|=~!>K$3W#6kUxLuL{w%F3dY3JG?%(U@JH}GvaT_s0fKjdSXTKzFJPXW zSmCn|&(Q_CR9~e2n0&guRB8PAc7-k}M<4Fc>aW}6?#?J0lnzV_U_-5STSy=)7&2d3B!K_CY!q{} zSy?FN_Kt=%S!vsUlaEx27bb5H{!f`dWz^98Aa|&fF&9b^BRm6~hNMil4*HpdIAu^7 zfUI6lk!#!@a(zW4C+rKeqF>2oghD{3(Lt9$a0MfO4*ChGoDrCrYK|F}NWd&Xj4Ev{ zBk_Ymppu1-a*V}9;b}aB$prBBFe^Z7?StNC&_r)+b0CilBf)`D+9r&zL<&^2nrXTo za~+Z3W*Dp5UpAo&Z>KdgOW}B(yNs2xc9p|I^$zJGm6=SeB`gU7Vy0XZNx`7(7Q(Zs zA`fJL_>oNH8u}bih6~Bx%WU}ug|M<1@?WlJy9M-{@_Cb$()&0I7K^-~_wbBkk%Ae( z-0NMI;q9_jiMW^vYgirRmG#OhQnitu5gvM2cvhwGDrU>J@G8-HTN&oUDp5W%8S@Bp z2yQD0Ma!->9P}m=V~x;4DwW>CQQZrE>NRnH55~3AeG`mpO5$F9Co620>gEN0&+z+I z_?N{{_ey*s$!>09d_~d(<-4-I{oWKNby9X+Bq-&IFbEdU7=WN;^>m?M01DG>w=?Wz zbyfS)_AC3tU>8mL^SSN6Ue$PV>JVizuX+i)$N+2HSxAv49h2_vusa@)Cd0|3178Av z&l(%Sm&M1%M*TKMgBqhawEeDgHW+rRUHYd@2+zF@Mw7v@ym`*V8;sh7`{@`E4h}Lr zz^Ui-Mx*`_IqeUR!YK&9Q$I>~Jn`Dl@cp?S)o^_{p22u{cDx+)d&3%b+IX9c2fa2l z)Np;cU^F-C9Vx3yK}o$yNR6-aqbX#UUUCHs0)D@jYjOo6f0LROvThg9Mrk{rh~F+a zlku8_H^-KL8UM>bZFbw=4lJK(`Nq4sHX6zOptCdEe^Mj+!&a|X&ii(|KQMmE^ozO> zP~G-7Y*A0VZr#3vX5U^d+%;bwJnsOAntjg$?plDT+dsEtqHfKrPvK#IbvyNC`61WX zW8F4>xw#c+f9&({Hnuw)jJ?5`H}U$T;n`^1%74?HMC(}UwuyNx{r*{Z)Ek|3$Nj-% z)Ndiv`u*4OV;LX(WE+l0{XuUu==Hnf-ozWW@_#_>E!BDK`BUq!Rf%3~N_2jVGj+{j zM-DDOpUFJ09}nCP#&}hGJ-B|?_I_~7`8|XGzy)C$Ufe%;LpUA}|Gy#Jy&>GXqB`=u z;BQ-=B>@TW{HPg5q;VP}lH)U{%h?(Jy_>*_yN+MAK_f!=jZy`GFg!|gAbc%YS_Gb_ z+nmRPA8uwjBU|W*IZzK%2`#d`llvRD>T?CE0)L$OzCRm|u6)P0!|!_w1$Owhx6rUf z2%}&f8-J1#*L7P{(rNjs#d5Z^!+|>(`J;jP8_mXptMS$7xPkDpXU?5=o0Gv+Fq_S$ zDXRxR1OYcTXbFb z(bWlfVuv5w09-G>2yK`h4uf-meLCkm{eQz}m1F0I1~(i(uW@_&ifpJXdhR@$O<-@I zrY(Dqw*A#PR696+e?H@M%3CttAKUKG@cFgJ@j6~b_rGGtz z-TJzY8|F0HTVvZXqS1Jc&bBRVzoW?s3%&Pu=X%q%b9?UY_=Ns%=F6me7y7Kd;tBm7 zd_R+)U%G-?^_{gyuXc-c)sLEcx@S{?tNKhvbMsi|PQQ-X{az{`O_Un6}%tClymgSXwx`ySU{BDt}1)?RWHXzpL63UBl~rg>x_Q_rwuhBY(_%V{VJ{ z0F7;6FLZbFPK$ZS^2)QS_4VlONH@r;zL#}qx>9G5Q`xX++p0S-@JPfo=|C}+(i{5~ z{?Oe-m9PSqB{z_Fh19Q!w{xS3uXF@YHyGDrW*X0`Jd^riFFS0-i7F+1(tpj{x%a(k z{9eL}WEr`H1-f_%lH6sKaPx3axhCZ{Ph;C9?tB1T0Z1FRndr#Z*!*+(=|z z)kt*s+9SAG_7^wvlEL@_Y!>r=V4Pybu+(e)Gsy=ku)lG+nujKXA20A;a+>m zBhxj<+17qrA4n>2F1@F6v%3L+g`R+ltLuo)hW#DmFM3~EsC=$|K zb0~3L!@sz&HgE&COG%=SYjEJ@+x5FoA8(7lr26;=k@yRKgj<>y4B>tH1pkIxLq1bVC)T)kRGjp3;0FCJAEUjoeN$T)mOmtN+t`B^W6G<|vG#TN4Ef)@EHahp8MiDD4 z&7;ezDp5QUF+8wTUw>m{7y=bn}$O4_Qmgs1C&du0Cp#|ydvA+Sg5qkwS1`FhK zvNKJPDzy)lR+mwM1vY>0@FPjZ$`yE*cq{c;E{QGAX}ixYFJxFs!rPuE77DP$_BM^P z#HPX88qX$+x68?VIkhL2_ofvR*t|dVBtc_|do!3C7<3xVEbnWZ(T-Z)q#ez>Zo9v? zJY)izzA{j^fB#%!cOfiKJ|@I~b=L>0lknUS5JliFyp)i${z`w&jY@XsYy;!NqXwb2lvBxDR9J#+kT`ZuCu-p9LLqxOz9aUEEeWQa|Q4)=Ht2 zFxY^wq>}V6O7`f_I7@v}3E5OqrHn0Xa7aW-R&q*IgQ~Ti>RM_s^JH-$TXfny&A`|f zGNpX0(r%S_f8>9P3WU)HOFu$!h-CgI6FO`JZRrEyNJKaxC=+0bVouaLJmT4ILsE6E zYJ#a}J0hbv5O7$98d30nXB47_@{p;BDK0A|M5QXvmGJHqk)o{7KB7QIL~+Ifv>`xG zW(gxmB*drTBkVL%+;lz!a2rdC84BnQ$io&QcLY}GeYJl}4*IS3ZM4gT%&{e61j9At z+C7?GqdlYx3h4oWTbbrcl}s@=tnwqwIbr3xBQ}n#Q^ru1Ne=Qr#|!+U(a3ikfcZt|SL(dB5wO z%A|#=ox*=-#=I4yDqT(|hTW11s5=#u-gy=ls<~>fuKl#c#~my9V}h&V$OO4Y1ad`} zKsy#E7))gWc0FoewV~<)r{yvO*oOKU{GC|EF!#U2Vs_rLMX}65zmO1TOsc{+&}=$$ zrka?mcu*{kg6<=ds|M+zp4$0iC2mNWg!{+32aSJ&5iIA&%7Aqa`k>w?5f+HYh=d{J zxvtuxmhv^FKO=}$D>aq0{d_5Ey5>rG2eH`4D!HqaXh#TXS_!BM-V~do%A;Cb&RBkQ z#8q|mv_+6Z-d%e4h-Y@M*Cc)|wtCVf)IqD^{tn-hEJ6B>{_LpGIM{6ylyN~A_=-0S zNKk)rP=`6wuPeIS_BrmtcI!UtUwO`RGg&U@x8w10W}Gy28cn~&oabUdvsVV{7V0|~ z7}W@yCV2nNz^F;=%z;rexqHoi`gMU9O@?*f?CsIqG#ECvq#>zhrgp(pTXZxL!=$Hw zrky!5YSOw`$sSJwFV|96pgtV*Pns8wKx=Y6{EIRb5> zojC%n8JVVx!zh?WT%AV7x#XadG7aszd-wRF92DG(Cx>;s$wg+tb;Rij#37iolbAzL z=ilAf`dduEPbg+60V9DI#AneFALG=$zY_ERCDdX_d=}04{0jg8|Nq=sZExE)5dMGe zUvW@@tt~A1HBRCs28yl&1{Cdp4*Sv;1ufAQ5t>v<%5hTkzwanovTRF^l{#3l0I_9? zynF89y-`Xa~_xwesAPWK31(-%)H4U=vE7?1yX+rz=|6Fl%me+L_NUR=Y{}&V1LdJ?9b?QOq~Ce zqAnh@Kj$_3v)PB{G6xUHY%Z~XP-1g=y#w+ZOG6j`Wo~}U*L6Y}QT$32e#Qf^vlZ3s zo_6{^RZ1#9B${e37p`e&m=u0YNyNn5nM5E~baw4MJGeBKOR9X%#7#~u%Wr=vmW}%h z4~SyKk69E^aqStEQV-!V#~jJNbV5YF(9o6fCU2}Z$h4thc-s;a5AZXy%i37@9=V}u z0+h2AQyv&agyzK1YfngTo5y(qsnC?8#bnA62cs*?UF8-EyuFw=i2_u2-DJK9y~(>( zjijIslQiTwAL$J)hL?k3|MGv*M~8dfcoBO2x5{8=-lTVutZ_;~gC`brn5gfk(5|&N zF?R{Aku`22>mcAk&CXf?^}xr4U>nR`$dy%+AIrTjJ{e)5xgaLROyDn>au5J zi-tAfTAz|0<7)NLPUvc92a?F}RGwBN=rF94h3tZwSVpH{(#4;Yk8yvPW+8wj_T@`> z{z}c$1r;VMw=aOM5)29&!PwaZM#7W=WhSNy(_`~{wiJ6AnyiJGR ztm)f8&nZ{F6#UMcRI15sa=TB~7o`njb3sh1=w9n*4SUW+lA?dR-cThdZ^M)k4WB+i zw+%8e+2zO2FWg?{*m7oKV-h{?bmr*(=`;xCqT`xOU#vO=YeHwr<+QWx2BV@!yDSo z+X{(R#uXM%ObW?lE4;wbh(`cnQ&2>8sMr)!6Yd{L5`Kp$P(?{x%N|C+MugpR$o#_U zTO?BBIz*)DGQ;C0B$%P74&YB=QfAna^1F5m6W#i@^B8~7oS|;mI&m|O=&pHZy0%kOGVf1<>7qC0?AVHXqVIj4# zmM-iLLu$=sA~QWj{jg+~{B~=K5-p#E>nyM1#97MWHl_k@rMj_{-lDT3QD0LkLc~J^ zGM8olGOK^uA&UE@q~#|>5iA>cAk;BQjy_;MwEF4Jn1AIe;X|gfbkOYMXUZp6oJ}fs zJ|iRFZ@=0*zDX0}$eF(~qr|#o% zVs-&-j~^NYI#e7d3Y_YUF|Ys zY%YIk$*(r`Cs&KRK4bG#WhJU*GaY&vqQDfua6vw8hS!vY@jgK?XGUdelFR0pWIDA--YN54=^JAz@-tCMaSsl{q*J~M#1{bG#8QBZE{j-{P3x{c_d0l*%@O+m*jRk6dax4|i zDb7=YiZ8)JIUodSu)anG&NNZ<@K*47Ksb^SL4x59t=FhX;azgVbqqgWV{Rf`h=M|4 zBy=4r`tOYeus%*^ZxR(hAO${NPjQsBLj?mYV_dPH!9oNT?*ZN399myC$)@UTY3IGU zDlICU-M(G+X9o{bq2k82l`R8*t$7ymd^JbK#TG!y0JVBsb#)uemO{n5U?Ck7pa_nV zB7oF*h!9+gNep*~MmYu0j%XepJO-8~i5}y-(5Sm$fYd&h5GBb}oMV53LPcp9!TD4X zdwdrj#W_?AjM+cVI9C9B5qTYFO3CnyOQN#pKvTGser-9I;Kru=nYN4PJA%Jm@}#KA?3HNfhBh)Gd! z5lTQsbR-i{u?b({I3853@9|;~N9KAv#28E&)l*O|G?7{gjj0fl$*BqimSKf4kyvTP z8PvERIZ$Y+5KSIbhK-ZwPscT(QV~f6(}2IklxjJq!-X)CQ>Hk7j_|~YLMxg*XmoTp zj^;XNl4_=i$O#Q@VIgFiDJ+QAo+5(5Livc^BY`5)v7=ZI-ibkjw`bfVgT``q&4^M$ z5YDxfBG=F$XS#O+K7wGSw0A5?R8A%2Uq?>#x4eLX`F!sMxs((Wfi;)HN5z+B4`=isdd8VoH?2lXS7;UgdX~#i zUDtqkakcaEckO&SuNT&THLd?#v1E9B=+JCFlgccO;mz_H?e|E_`)Y=2Tww3xB!6wpcfl`KoB!8Ee?&uIP&Ca=Y0z)9P-}4=K{_LJDuV32DFe{$NQCEqKZJ_pL1ZfDIW~q! zBCO|}6e6C$4{brP;`pRL7FtLuOF%L!Zbg$Bs&|pFd$cil6((SzkrGCZlprgeIlp?% zJ#@;*jR^RGMej%}KNr+_5`Lh^Hm*UT_wdMn&E9xGc2nLc?Id2?NdjHsb1tD5{sBbX z_;Z7(A2b!mlaHi5LOIFG16E1EGA1RT*RUkzQ=OI$k>STBf97%Nwkq2HWmuAmGA&6+ zZ5`PmH#XtCAU(vzNtas6<7I&)Jk^hkb@ODgZYWqk^zRwL-Xrndep6@fi~Djr2>J+r zB$NF7j(75CQ9 z4=;fZ8!@}tXCH{%-dkU+x+~}w!_#G7&IJX&&?s=A1Ay)6!!-$9FP8gC?28J-EWT+pODP?_ab zZY+LPs-UgIiJ3s}B&cUW>$TIePTNTt$DOlMYN4z$5-$-Iv^hF4L;tKA5U-zB4Tz{? z)zDckowtTdue?wi*mH~=o1hp!0RRC1|J+$iZ`&{ozVBBUDo|{|>{^naU2R~0_o1iW zh8@&IiG@0{VLM%lVgG${vtVlx$`z4hU2|~Y2j3z;QYuIkhglrbB#M2Wk@DNsMK4xu zc{N_WSuOg$Am2&(&NIJ#?^@!`#UT7;*Q{G#&5I`-xs{_kKW5?Q%#Ed_JYNoeSgeDdAUYK9{;6L${X1sIRuJ6_0;?EA)XYca@wg6M`kb^WsS z*1DIq@rk&tjb=)2MsM$px;C$MyB>hp?xisT3zzTpjFi_KfKvcyH5e~7-OH9%8gWMR zC}z1&N%`KH$Sm%!&O{tT6k&*B3{e6Q8JGL}p05_941*-fQ;ev+wV@^DN0)VC6_Nt> zjC#``%z2Js3FvJP#_^3+%`g$q5fNuW93?SE)bX7_LrciRAk9LG;^`i92%e`>-JJ-6 z?Wyv0kZAP1DZ3A?1bAH@F0!Ud|9~{OTpYrdl_Di)D9wIiK<9%?bL5K9ce2y6ewXo; z1tB`}8F35QWg*jua52+P#-0se6yWV5mKy9~ebh zl(W#iUmRtZN|ps7G6~O6vQCgJ5QuCwk9g$XQ8T#QO+@4b_CoMyr-C5ncYJw*h%XE! zhqM<0h3$|`vnWBAot6a}e@4v^qZd2!>~*U||5he1j@ljN5K`N5Frv`@47{8yGYL&o z_u>T}TfE-<SYZ=n*PGx5e_4(V^?Cj081%Un z`^%vT=K_DnSfkt#_!_J;lUT}twYV#|rt7)bljRJO=_ z&pr2?D^vZ68+@Ajf0`y~FtvY4$f?#hZo=H4d+bK)l874WEtJesk3~1`c+l^4Cxc;c z;`K4ym&UsWmrn}9hC2%4&J7$wn#b$bz$@aliP(D}Vw;%#2V=I0I(S6XHgSh_EuXu= zWoIy%T#mX|!`^7p9}dUkL8}BOMDQJSJP@^2<7zw;Ts(EIf6-Y)GCq??u1<=ie>Lj$ zuDabpe>58QyZv#i1ZNP*_zWT$JrJ|i40`G*SsSnUg9PpSa$(+VHu|BA0RKxBl;|Kd zOo}}8_OiU%WQf2}#$94#hf*xR8byUpr3N2;>=eA|l!{|3kRA+;Hy>KquPmHKY+23k z#Xuuan?6wze=C^ZxGxS6-P^{QxOItPhQp2f4nGhhiHVse^GxJ*?&QQadlk#~d5KG? zV$_RS6oI&Ljmp5{WZNxyIpr>`JSq6v4UVSSR9DE1mKSk%yPGWpHJJiLYs=x(AYvii zpjzTDOr)8)Ah)0d@C!0yRFlu2$>jx+smU)teR)0gf8%aR`5s%bF{!@x{RJ~|HtU2k z@qx1yQ=VA)bR~s9<8tONFFT`7*XL|je97q=l32(gUUsDwG-hsZjBYA1rq&KZO>LtM;|m)|mJ-Y8h2xIh=5@ayBu>dF3jrIU7TAfyl!ydrJLL5v?sh`F ze>JlN%4`vvTpPAy`^{m3l%)|58xU1FbMAH9=9$s(_KvCuo2oUbt2pmW+&emsJ)8g* z`_`hR+VkOAi2T&-JlGyfET9;n<-BS(BFi~fajk)saf1&3T=np8#)Bzmxn1mR z*bW+z4~O8oFo7Ql%|pYD+V14?e)EBA!9EkX+XH+PQ5 zTT;%VGTH^HVT&fPNthF{nO>W;*#9|OWGWwU&KU;8y;oEA^T4Iu@`oA1dSXP+he?YE3!v1k;>&XY9dYm4asWR*fRDWQqXa2G;6 zSZu>EG#-w>1zSQsNspo}^tDBcFuNtz1e^sYf6kOPdpM4-!jIIMz&O z96>Nx*(1!+qZIyj&kbL5U^==76W%mgFG4qXzp9DUsY4|VecWkn(jWBj>^NiMf6}k^eaO-JZJ_>{1D&q#zo*EqwHugq3N^8=o5VT^xL?x{3*dVGWJLDb z0IOm0v?gCSc^%QzzQt4H=kV{KjemzH{5yJf{%x+h;g9Cu(R20hsEvOIXZEjs`}FMG zTO8CHb2YBl@cS&WL zWKA7>OzGV8yc8%?=AfS@(0m75>ShDz*SQ+L5l5qNHw;pFw>9*nciL$-8BWn18DH$q zs|8aAq`9&z5r@r&6AfigutUzu?O1s2rd+{#>Cnj$)GiMmG$IT@1aRGjOk$du(L1n- z>04*(J(!(3uIE-=mrec!D{F#oW)GNx&(75^`$g8bQzUMbXaNibC{QL$<;DX=k!WNP zyOAHY*wFz5gM@e38rZIe$Nz9N+IV{vx!}FPT^%=b>qbRmJ>1cq4Ucc0)nP+R)lXP! zWEeE-j^ww8d``@kgUJKe8|oHTLp9^?SGeI{t?@&A9uQNr)h(@ByDON_+Ffj#md-sJ z*L7mwsG`4hvxMhLN=)2QVVnY5^yTsz6+MLQsF*~8B6(S3;QevLp=q%`FLpehZ@mdD=e|7L88il>|2-wsIbnj$ zQZu(pdDuSK{F@tvWs4jkNhX6Ffrg@i2rwXNe~2!mFw2}A(o(2GTlW4oBa4oL6UVQQZ{V>9V9o0>qnY9e#3n}@S7wq8PO7FBl5^R9Rq1r#_5lP?haCa zf?OI&r1rAUh2qKlK+~FBvDz&-oM&)F5Pni8F^uOcfB0#M42##NkKJji!@L1+q-rTZUs~DpXG;a>RI^N* z;pw~$%^UZx!w=6R*-nsR7Dh2iM0eW{XLUlkg!_H>?nn>$ux4 z)9luQ=9lZlF+h_8(#S;DIulpHGzW`s@DEejq0f#X54x(1k4k|gY~xS(t4c@dgdM_x za6{}8$}=G5NyyS%;ixomcCj6-6@A@Y7y<`wBL08PgSBzltSp0qoti;@muqDn*5y?4i%1QSS6kKAtvZBBzc1yZD$$+=Q6;0+RzNGcL?9s0+|kjRAn?fH+lrA7dPkFs zle2j_lY4b!+o?@|RB9)*{56qQvD(iM*s?h|$>m;DFuZpXLk$1zTgX32HB7hXu{pba z3Y=0D(0IrKy0Bt|u=^6`W@KfKI}Y%ng|YB`Me~#)@;DUg`yB}% zt#ihRX{@T;OYlmG++<>x;^GfQ19d`aZ}TcE!{goAd0Xf?n$B75PHrZO9`Igo$&v+l zk0W+R!vSR^W94j73VrEwfwBo7Wwrch(%Esz@jPWf;kr7kMd}+fQeP4JUCLh?+03sQ{s7 zsva5zKyi`49xij8LNW(g_yqpg=+Mu7D8z*#clC?6NjYhoL~hO+8-*A|_pY`WTOpTB z24{MY?7hkS|XqPL^kuTI+{vhlhk2A$nI;i|Zf zrOTM%U5A_b+pV0=&$|bRQ5)g}{Q!Pr9cYUZ16vypl;PitmW@AfsXN1lf93FoFa4ny z_$o&p8)P$PAv;$SSmz)92LvQ9T8AP{kG1YPdph1wsTRYa5vA=#nT8i;p-(6#94OEGbv{ zK~|oSB8hxu_^w?n;m$!2-4XzvnmS-Bl~-?Vv%8QMXHHR)Ux(W(15Bma$V{?l{_2my z#VzbaLGzw;aen#NKa5n9`;(A+HKb0GospjT$9X~r@bGdrIj+_n^#LwGQKy##{>>1L zb6dugW-U%x9;~YzA?y!$CWy#1A0b*!QzBCbQnDWQq0#gv?V#fLH~u1eWmTAzayDy` z-j=$IafqwTq*1MkZ-DGL+YWwLp`PFjY1|`8$c&rDHYKWxTL>9NC=j8 zfC6{m{tx*K36qw{ByE;XeNz!#X+uBu8UMjyor8hXepz`*qhlGr6KF&hW2a(1>l_0) z1n8c35~Yxn1ytnZv7S#RdGx&FEGr|@V%Dn(L~VVYYL=R~c?g#!y5%u-hq}eLCmL{b zH@Q?0DrRruXCSLT48dJtZUI$2?l;D_k$LI5W2!FEf}v+M=TnSLWY_H&2(!)Ph6uRR zEHF5m!OnIQ%sph9+EjN-Xk@<^BItzx#$rzWFXUl~CjgaEQrU<$;qs z{SD;GFAo=wwY36#fZ|j}R4|{YOhDET+e6r75pk%o|=?#Ii zQZha5DIlIzRZmM-X7DJ93nBys4NEMXro2cpj3AvFVI~k<&$l3i1LkiC238T?S!}HX zn@sE~O+&LDdfDFo-DVFpAOQCtngQAxPUgH6+p4ZqQ^E*_655mWHbb4qOd`n%_9k0i zL5ByqQY`c^)duYRgsSF6@UM0}yvzV}SX&ludSJF84Vz<9O%`c%i^_(fLt-*P5;n7X z^bjIWjr@98Sweu{0{n;zp*UcEc&5NQWsFHsr)z>7Pl~CFryUk;juTP4gj0iY-u_dD zP@nh21erphz|&$Z&Uj(YJYPjc%)O7#^X+K1|91HBaMv7rS=Dt}qeLI8KN%bj4rIX@^bcTFL__hq1gTX2X5R9^>=MZ~_Fk#J zL!14ZCzjDIob{KSql)&%>%5QYx%fG2fbCl4-a?7(F>JA~Hpcd`QD#ihi#{E1tie=y*+d;LSNq$!i#%QtS{ZD-2I3k}>)AVP;qsn;$L^Rf^^ z<#^nf)OG9Hk#K4~xwM-TLkIRQ`oUv}Em}7QD81s?l*C(-Rjzdv$NdU1syR==a2eG# zM|W)IvVW>8cL?UYb!&8-_rIE$0Utfv>+jV#@abTX(rblfM#XEyTugXUEc1y1_D_Ek zhM#6QEHi%xCv@6w$!$iQT5PzN;WLh^ZtAr8aPi==FgCvC)u_1Yi)c&Bm7Kx3Hf+x= zf=w=xL-m%SNLl7D|5jOTI<0h^Y=Y2ZXw)W6W*W+9R*OA$bQP z`OjZPdrc~JEv}X|-t*a+v%5JfprA$#P#xl+`_9jtMC`QV*-6SsYN18N)VdH-bQ<=c z)i$fHauT{K*c6_v`ucm-0PU!OPt}$T|J=WMm1aaMstnw(6>1MVR~hr7Sz^Ab&8lCe z6RvwS8=ZRD4ej1y{>%voEr%N6cR!FxqE8^?){1wln6ofbI|B*ZC>#;d4;>|4Qhk9; z0n>LSb0NONehHnW*nTvuxYfsbTMCi%T@CLItFlmlZQda8>LolI2DX2yRHw+is#oFz z4eC~HDmp<|d_QD#7Ye>xdkdcqO@09XBeBoaZ6bF$kUw(th#T#-^J8|&Qmq}s^Jf%c zhcQ+Q(Jx{|>yp(gNm>)KCY$fN-QE(i8mv5)s{2hWy3c+zADnxbiji8>M=SJ%|CR0t zS0G{AXJaxRq<#Q!Ho8~STFiGW)ejh*E$aNH1JW~_%3eG}-=(O--%S$lNqRoqyr^IJ z{prQHUK43-6xLh*w90YSyr3JQqY7t5CrF;v+J~FvaAHpl6&{aV*M7E1nYRyhEfK=6 zQoRkKdeofwD6tqDPDzQ-Z*bgMq2Oo|RbzZN_^grA#eM)n7gyixGQl+q%QmaJ+iP{L zjIG0Utj?yFbRhUG3Er2#`|!Q%l@rk{F1@@!zN)1>C^O#sZhZ;_r&L6bs;zf41^ZbW z)M#CC_I?asSbl5tv87`>UkENAgy3;(I;p1rx?7=1m96R)d)hegbX@&Vv`LJfN zjlh5vw9o>?pp8^itC%f&AH&-c3B!IGio$K>3A0eMOb!qKJJu&E3So*-sPODBPB;*g zXd+(^<}OY}t1u#=!31H;!|pOU#Ux(Krg0&W5~@J75R@-E@(iyb$nYQQKqX?fRWuW7LTV(>-R0kqkiIh zpq<1E7*EVVM~Zh#GQ|~?DWG!y9X6ANeWl(5?Tor(g@YUY(?^Jqzn=rk<10%jLSkgd z=eB{ss!)Fd<#?(%^&rD-c!`Sbxq*>N>&AKX>m_G=g$5XvO0>_tsM(B$N+eCAi|Zri z)|LPi5LKySWB+^%9pS&IP|)Owv;EqUNgSO;6e%u5*Lp#($26Bt8S^^`GY#JrI@qJW zW8yT)i#9Y&H2p{N@P&oG4~oRovm^g#16%gvj_{q(7w0}J14oJ~k|(VD%(q1WJvkVn;z*!0jGgk3}L z`AGUs_KBr;)XAyEh#=v^KpYw9P^;nq*V>>{Ua{|{SMPGJQ587|UFPbGWOyUC_Tq$S zHQP+IA6ab~s{k4PD87mZ9MwscMmUy>XF^2C1;PU`MUU@@Oyd9zpj|L>-O}#llmfhS zV3CMpz9g+vJpuVFa_}@tbK+a`?2=r{JY>I$$E@LKR-G;X%ARZa6?+|>WHpOE#6Laf zz2`ICgRD6d{tz^fwzWVl>}cV$%=ok%0*E=0DarWkx(Q{55=SR+;Yh>AQQL~fLzG&koLA&o9083~gwhxnR3FV{d)`lo2c1?M^_iBtuDUHdYt9Ie zz$Gy)6CZvRa5HOIoE!xI1(a}$T`metDjT0;YLjSzautrwi5TXlg^4B7&GnY;_crOx zn)W>(*YXWV)qZcvz#hN|99Nx%aleI&QCX4K&Ou8wTY(npGxu@KuQ4+t6i8j^R_dPO zzwv1H;5;?_J++#~X|cZ=nf21)P->)CQT@DrxbiFT^Zz1(HnD|YL#V5}??!S_g$B8P z#hCLyS9OKl51r!ENSgbKWH1VKD{IS4>t>Ztui>~_GN=`D=~eAD#yI`ywcDsrW<&ts5NJ&mu7&Ggi-|W4a^ewq%2AvRIhZbkF1m2%$RIVAAQS*8A z5Csuu7##8N5$es2VM{jKW5rO){6*KrwYhE8N}fSc>wb9sT7> zhNpr-Z_*hkbiSRXT%7`5zUPA->Oe0KYuqNEdTkrd3MxA<%t4UJHR0|rD9XkwovRd|{@s1Wodw3Pk{UbkmRfJk+j zBNv)mFr7rhd7R;(6XjqSl~`7gC<=y?wK8Hk=dHTI1nHoxnTRz6f(|ZTM@^Jq6qDts zbcp?=2h%PcP_`};HCjR-!XPYVgT5-;>i7+&s&g?~O>74#EHf=>cUv4qp%?+hm4O9D z6eZFK^I)Bq2#2%~8qJlefK8bBD5|)Pl13=tAj1Anx>xx{sk9_cdVV{xD~IU@Ypg|#MHu)N5Ao-b|;uh3(8beHH*MZ zhJTBNSSZ>+)*GKNzuN(O*;gxr4DbHd91=c>ixA#mOfiaOOQjU+*laq6Vij9ie1)ma z(6I>g;J5#r>7XJMvc>*-zu+f~6rZZuG1gSmuxUU?h9|ibi*l>y^ZxdG51;kA>zq)L zpyhJAZX92pMGPuNgJbZ4UB$GvN;wHjiNh@46}(82Y|PDNSrAKJ+%`;su&eN69}6R) z{%<}(Ek`Wg+W7OVZpTF>2eb7+ z4k6i2TU4|dggt*EM&U^T#-9xqU3wiT7F@hz^?|j)Uuu2bwDe62mlQ>%DMD@JGl1aW zRsj{5E(@oIt$a2DmXE9vXtLH?hlTQSd*I3WpQ^>GnK||jS&t>Gy>HL+{;!im3j}SV zVDiJnXR??Ve8j@Rm4`rW^dUy5+WRpKTB>`S0pHrkj4%pWljiKKQ8WJPu9spSxaN zp1yfZ+*xzY8#FX8_tyrWfLe9ajWtw$u#Cq* z67*jma|H+`tdA*#P;Lt0k5WTobA8*aBTPm4edBz)Tru%{{^NhZXMi}dnS#Wqh+qT- zb|R8DzZwgqL>jgg38N-sf;|~&hXJcXzwJb@~iR&!>Fv^5^C8@9C|1D z;;ES!+(F@fQ-I}VrqlL2=4>0xb z^tJXf4oC#`vPr@ztZG@UaTOuSEhuWXDqeX}T4tHgS$I_P&UzD8y>!&i)V=~}P>ux=EYm<(nOX(}dpegWM^SkyQy+aGcd8UHH=ug-) zN~)9TS!D1mqD-Of5NU!q9h3RZM4MsK_msx0a>6?)iepvCv|{rR?BU$$te%)fli@G8 zGzwprOuG-jN(5(B=3NW%$I3q)Hh%-}N9e|~mg0ssVkIiU!zUmNPU%H0qaqgBBK5rV z#CTtD>{Vo1pbB%$x7#lms}{q@*~E<#lp2f+_k2QOcs`hgX+ReX77bK{)*VPQmCV*_ z^4>ozdUf7!OY^0Al1H~1*{`TvZl)VEOL(O%#r`OmES)aA5S*Bz$*0ZJ9o8buniaeXzXmVgiB{y=AEek7 zU9Sa5w9viiO&P!z?(n2dg%DhZr7&Bql=8PuUD`Hww>vKXH9xuH=DX1|5U_7-CAF>4 zUN$M%1a_o|yWxfn%iW15$`9G&b6Q1qZ4At^NBqsaKV&e~u$xA~KHx$SGz{z{Ky&I}H$nOkXZG13R{VFo80h7jEc9n{tlQqohc zoyt{fYvM$MbWz;!a6@-|?5dTLpITd= zZb3!w;9H|_qPu;o0lTNe;2mHLD{gP|vljn**9qLTjR#*R&xW>(_=dCaA&6W20iUPkLaUgSD1% z9XezXPCZu+VtaPp<=W450HXh~r!TBl{EbE#1mT1f!bM9X6M~puMBFLFKJ_;fBjXM=}41x#F4l!bA1vV&plNUQxeGG!? zn{}%H7=#4!NDkt{R!fO)UIyH9C|fa7>dM$xnPF-13r6Qm{{xhh_hfp8Xvf8QA;yAK z{-{OfHE(0U)?bM$`c*?VgIvK8$U(Wl%xU;Vgc17syogolc#PJn1_LI7`)aSikE^+d z%5590{9Ihajxw)JOjj4h9;CU|pc_Hn7af6)#(?gVAjJ@+R$Z+t=}ZA6svl>6w}xwd zXMmABVpr-q7O`R2k9wcGKvK~fv-{8{r&zuIN>@dKv`M6^h1JX7A@B=K?mCT&QLHw;|cuuMDZA6A*GJ-K+;oc1I~chotr$|Fy-&#-;{LK(GN12M38btmAcY zFygU|0q0x=!zg?)>sEIpAsZ(_-gF`{-;TarMkYDFBC5v4@e7*2QH2Px$k{5j1tH~) zM8{y72=7)x{__}KCTcJl+B>~)^inZw0uj|P#3{k)`<>?L=+$;BjsVxZN@yklsi4?o z)JpO!)Walwr30#Tif9Mi4Yfq1lPUB@Q)-!1rE`*~tQOa@=J09=R;9DRL@iTTT#-Du z&RLI^(|~)|J!(TfVx9_~;TS&@Fy5MtDr-+6C&9Kc7qlue#1;F0?h+FJzUSDt)y^-5F4#! z(RF~DfTdtatAKMNXOKBCOLH9Gur$@n=X$3l*zX+n-pL>klO=;BOYJRJb#hj;qrn1Z#zlXiv7od?zeTQrcPkdl>A-DZ>Jf#T=BJb zt}MCvch_097zOjd_uZoAex|CIEw3!_?;+R}7UswM!8-{~uT$j1IA_sNZv7v|OefrY zX8^ksh6d%wL`Q)iTJgaX%qyqLRt;(rtSC1En}$6Q;Um^!r}~0l(Wb7?FXw`P4m;`O z1{#1|nX?qCzwa^Bk^=X|V9Xk;zcb^k6CqN+Ns`ff2Y#LDiMQD7rr$h=4^l=OVrHo@ zC@A?SIN&Jz=hR=^BF^WqgLprbuO~fV6uJv;2Y!EdrGm~nOFxJu(nVSiFx#dLOf_GQAm0|sJ}wZ#^n>N zP#MY5a4v~hGvjShV8!HUv`Qfj13)*DdVY{ZeaUwr@n3!jsN%Do`7w?sWg?|u@7sZI z!)QxHBRuciCb)g&GbQMteZ*&rlM_JPvJnOj>?f{-z@ZQKH(X(H@H;_&q7jU$G%vMs zBZMv4S!c-sFtovaPLZHRwa|%L^&&`QiEq9r0-eF4$rv{tWePO~srY5WaVoE<)aGWg z8N$tvNAdRPc4ZS4Q{Np_mu(>%N5gd)6E)En(`miAxh4GU>5(eIX(&TxsT^C8-?MT}4n?R^Vy z^#`gBJ^Hc)78!Zo;P}za{7oU5AztbcKiq7XxNDsFtapYtEELx3{%B0io?eU;<}4A$ zQ114Q_-7uhQ3zr2eJ;!KpA!N6>FT(LiM*b&L=SbJR+C1;lL$BWhq1yfzE=G&lB8hL zXHvc1+RYemR|JZ~mRkxe8+Hf9D$VPwHk!6Ql`*eX zfHI1t`8ihzW>Ryjvz!J!6=iaCwUc2K9pCLExh_BF$m^fUj@Y${*F8Y9bbn9eKuZJs z0}WSwNzt96b5J9nrGFkWHnD8pZg2bZR2N60p}5s8dw1>0Q|osN?lcat0*HF$Cv_V=-=0y+ED~?5s@*1V0e}v)G?$Y{X`YAp?(kbtH<( z5)e^7X0O^&3l$z=&X&I$Po$Ny|1za5R$;ZZKY3TZbh}~JPCxknLgsNv>{5q3xaA~- z=OH4jVki@qTpELpIw0=EIewT$%^7Lf;%>Ml zbAYI;(YwZFN(`1i>c-abDOr+!IoIPBt&hci2Ysyrk#Abgl#%wsppH+ zYzZ^X=$~jC1mFsk3eZX(W)YmgOl7`2wAVB=_=rS6*X{ z>JaLs>?m8wznJid4slWCU--;{L%f{G#A^FOgQ{(5@F%bWg!_ZRYA$v`=*ljiZTyK!`@@=z=~1Zjp_mb^;s24e_K)idB7N zrf^qi+5sLdDwsCyoYPBkN2%*7I9=s2%kB)uirLv=;Db%^3CNmE-_KfUPb3eqv5(uz zx@Xv8YY8lzlz32Qsd;X3QtZlZ%_F;sOsLU^@H&f3&Gtyqjd>{AvVEcC297(DO6mzn zpjR$1-Ak~du$SSxg6w&)%=7@e zW&XJ(!L5EHtM(dPmYJSyz$IQ)Yl{)17bjE5-%OfFMTph>e7Nk}SV`Ai zLak~Z9tKC(7cpvEmJTppz0?V>QNw=URvIFa2@;(&KWp;T_gc~EpMJ_^}{ zl~xJiVdcW=Bk|xoeRj0KNwk!E*r&g*ExUZgPg`ZUMKW;kYvA6BWy`$&lJnUF7A1#& zl2*U3A7(#VUCkr->dqr?Sb$z-gdE9{`f;|+Y%;&_hVa{Y&n?IP`u;k)?>aRq_rw_* zH;ALt|E(wpn=GvM7R5N)?-pOEYm4UgG}fKbj*}Fd3U8U^- z=27>lU_<@B_`H)Ue$ZJ&YUVK&z;EZtm+flOhGehTS->=hQRb!hpI99JI?)2flNYPU z|Ng(TJ7@pr?w+n8!p=ML8)d;*N>S1f@V_t+s0*!H9+!u26wwl)k|HV}SgHN|nNZKy zijgc4ht+ZvNgvsvcc`nYm*Y}w-b=a7x3V2U4Ws<6phyBn0y_n8grGJ{Lc=Z`O>!69 z%WUGh!C554h%BT7`6`Oj4Z~w4^e_&+O+bM`f+drTf?*`(D)xrkr3E2@XA$I}bw5uT zkMJVlwRxM0Femm@M_uT8bcC+WTOK#hKrPRqy7;dc^8>q$c{C6jN9d8faGgi>H6LL=tB&snL)I(7Bd9#TtkQWxr6O? zuLU{6?&j%ey&2^S@SwZ2-%n%z<%yoatCNP=U7J+z-MWkQ(>E<8XfTAv0jc#e|?oP6R|AN-n$J;3kr@Db=fs{)FF6FF!5GCqroUHdat4#`#p>|H`5MU2bB{z`I~+rhCrG+IGOw7 zAgqh$oza;nhg*x?dr|nMsSv_N`q*kyyw8LYr|Lt9xh7E7RIilJG} zT)uvoJMSQz(bA^H(59)v(OzZge)$3+4plBMD?!*XUDg{lgx}4eeZSM*crs#v8)*UL zu&pi8`_VCBR&-}`u)dlndK})iR3xC?sFJX25@qGv>3cVI?yFQ`VM9q8BVm||!-mvc zrqxgnd$Ye&07m~+akEOndx zgc5K?Minz8`{|>>T*wlf9SNr14`YCkx}xG@QMMH5U#SU<{_k`LeXz|JotWXON4I8d*_}A;b!vXCfufZz1=Tuk zk}g?O+47azeMcEmnKe2cnCscB%x-pGQ$5$(Vu#PtW921jvo#_#p_O`_gz!L%#GJf6 z?x8xdB(^y*9kt-Q$SD4cbP)>~dD5$T26%lk0z>&Sh%`Z96XX)UY`gh^y(#ss(A3_j zQ(7#EvZi7e8w{6RfLn1ttU(csnot1`2!j76{WeuWh)vFS4a5ZjkdAJ!a=UK&KMULg z>3OJvS>?e!j4+g_0oc9VNh<)$)X~RV7zwC>WKaQ4vAW+kdVat@Q11O?ALO$Q>Qo2 zv}Qr4ddh7IMEsH*qU}e*qp&xF^5anQCN7+64L=2;YKcRmH%vS+;>Ump9wd1k8-D%h zLHc-)aIE`<%Am*}>*E}dRp(`Jo7s(mmcp-svE@*X;I|KG1?7aQUePvM9B#;ER->FPq_sxR%s=?&yX`zOyaxWxK@fg*!7bsG<@oB5 zKTQPnWvg_dA{^|u1N#SYcRJRTsM#w@`D)dfw`=Ni`X7jS{|;;=2dFk5O?Y%i8)UoN zRlLMD-7@38Bm902S?6qit)~D(fY-&fNWOnp|58t=jomrG6}Kph>Tm48TEZP za8zEtD#o0wEiwOSA2Fk~GU6B03GrPpIjYe;sQmK-^+^uX|FidF(W=Ss-q%|H)vs~Q z=$+I>g_R?7Nd=GwI=QpTS8G9Y1`uQI0HZ}=?2?PyB+Q$Kr!A;of7_^N(|_H$;9YB(>1jnicQHWOk|0%>8M3g z=fcG*Y9~^!b5&x9T>8;Qq-(}3Uqp-*?nn^$2y(n_+XgQgxZc*OQ?(7lFrf#9&{;c+ zC>=J)?X?a%JS3PCO1!Lo4H+WF4tx;0WUC(vQZZ`KpgiZR#T6WB1d-Io0BVs$9=oc# z1aKvy42p~#ChCpABBrWcDJUYdWnzkl=);NdW(+RT=ojpN%FW9jjhb>DaU~a^kR-ln zBt(Sb-sESO@k;I;7%U9q6~xVznW-|y^&j*AzgmMAEl5AQb zh42JZBQKT=9ucUSx_Jf3Q`lF0nK4q0(}$Al9{HDga07<##GP9VU=YHW&w$n5cZw!( zNCA}@O@W3^%F#(63C4Vh0u4pL1|rb#t2IXY8HL}C;IYU6^85D=r;f$yOQp!| zfXsmX2GV#ILF$Vrqg4Tu`7Y-%4N67L?8$1U;WYNjbjDQodvCGLL!Sem&995@)h;a^ zR-@jG!97d5-WzNa+=(my7V|h-AF~tMX*OYp%n+`$id%k-c--F{{cy8o1**rfymXa{ ztsZMk>~PaK`K-)8M?I(8&zrVcFWS_+H%+#{K06bpt!Q$mJs-Se_ir_j06WiOui?8> zs@D#Un%3O=jMwM;i>%wuyHvx%+5O5KHHmP{n~NKH#vxb~*eaS<=6^(R*MZcY(2+Vw zn$fN35LPy}QGRKs0TkvU0Efzw>c~LB0-2kXscdavjOrM81XTpqRp*D%f@(4{j4Dn8 z5o~F8@f4PX2}N)}j!%f7%~xf4;Fm?iC*Hk%lgw1Y1eDMsCzNRGMp^Renf2)%iH{$x zCeJN4Ris>bv)xukIe|WuSg*Y`=cKkO(mD8yfm*LEGjqrJs5OZWxXm=Ku_pL;p81$K zYCTK|!|fzLg^iqcLX}p+W66*52L5w(J_J8sMQn1wv8d30bf6CO5ylaBc_VU9?@GJS z7$3b73_U~}n_Z{$%i8yzX3g~5{6%01UcbuW;E=%L>UcylNap6I{~M1elY1YT+}-4u zMz4SYMQa&e9^Ui=5JO2_V}JF@qi7Lji~5YJ%}9ce6qr0@q6Ghwi6`x4YW2*&w&1lL zlQi1MY)~|kcy%Cn$KD>*jQs;9?-j2Lq?-H>2J1ZIR9H?=r5POD{U#=#@+4IBwp+~} z_tU&&HR9gWcBLtfGq0dmbf}%Ug`Vyt_ryG{RpIE_6s|`DFk3X9u30?aL`DDq8&`C! z)|k+JE^Cl!q7Y5n711b)+Vm`%>~^B}?a|pLOIvPI<=C<1Dv*LlAuBGDdNR#lgyUj;^tsu@{xj zut-Q*sFZ5^sGZC8dOJRnP>IXx-UY}aHWQZ%_mC~8Hf`^yv0{;uzls(+PPAx|;OyY&ludwKB{z`H zwvs=uBCCDd$0Dk3@p zhXju#(t=kh@gFcWLc-WIeW9&8jh^SZg(SFwTYKgwRTl;Bah9LtvZAClyiLJQlojWO z?pQJyE9##_V*%Ec0J53jwg2CkC!bmsw(IY&;Y-WEkXjt&Vv`opOtSH*PZMK=!%xm6 zl=XZ?>e+?f70b*T!o@d#?D@2;*32~mEmN!fUPH~BJCppH`g$AvR*7cbJITB)_F!E% z7cEV`d^b@xRU<^8!8Y0deGU(M9_&><_8iqM1wx(O?|oZ~A=vE7;OF-VW9jFa)T6g> z1YNa|i9(oMQ}1YB*X8RwR7O)3ngPWnp;va^d;k^WHEUtigN2oHq%nZA%>mHlCEBw3 z=f&%QO^%KiNYF`V$-}GFbr5<#hBVOe5Ig5(5*)e*D%uq>hB6{^YESuE-coPGCTp1r zT~TYw0?T?sOOFzc%y{Tlj(f0ji=jE;;Mhu z>{3qG$z1vIK0!)oW=NWATxLBb&hlIdv#}IvExkkhM`TW_>eyv|{b|1Blxk(0IXv@> zJ0l2bujPK-hk8rKdA3%2w90^J>-ibE*GpFU?=F2Q0y0_zs8Cy8zy)`^nccW~koy{SH5qtlO%&<~ahYyMt~|zi zRq6o)+-p3-&?rd383vXQf%fE(1^efK_)%rt!9jwx^FIZ-;G9_)Z!l~XX@=z3^51d7 z3smq_*`}7l%wT*$0df~?6mxYAjJ#{Oa)R8wo_d16m?l(ewbEDNtn{f!d74NU4!=H< z70#U$U)GO{K6YaLhxA{rTNk!v@D8kAb*b|l*8GUuj34FIpkX)dgX0xsmWQwIIJkX+><|*lX@;| zYf7nuy5fJGta*p;4%HP$nZGdAteaAhr{7znFTKKsmhtXPHG9dWn{+sTZnf{O@}=D; z{}1HCE8S`>K;{>oTO9wXQ$rg>esPAM7ds>X#jNK z^tl!_5T3g)Ih`KPB6_GJ&fhU%A?d|@d!O;9Y#6r1(PuLe4BV2J1_MX`f`rf@|7DI~ zVER_{Q+tjf*ueJ&9C%Zojv*vuVr{%ywKd26lj~Z~vvP7)X+4JGLWh{;4$%UOAS-e` zCElq}j1sWsQ}DhYu#%6SKmIzSI}AaB*A=lpqLpJL1}!A&i$Y^&hGk|IPmMZ(zyauo z8}7qH%Fro|4JRa}urYC1<>QJ2iilplT~Z(t9JbY`#KdLhY03U_+r!kOy7sjCV~0~h zVx6X4g&`|=;4rgfViWK8rBr8-mC%mKJifh$KGp!-4d`~{X;4uIpbnU98VtG}1RWmt zr_!ga`a3Hxaa*U7>SwT6b{WA|=)gdlu(}kKOh&*jzuXi8?ywMc6x|m2IkU5o*@PxO zGGz3;L~BV*OlFvdp0dUsR;;KJC*=M854#hP3={ZT31JdybAOyk*ci+tEG|f)DQKv3 z^|5`b%;RN0dsL_K2B+>ho76G4wEjr4Uh7U~ND@gS+aZul1(;d6R7xpZC4i(P%C}V1 zJhwBB;UqCbe=34)##MFInWoL;k3G(Y_`P61puy+K@!%4)P6L8ZquyLubzb1~<5wLO zw#AF`>EN5=ls&-;7#K{55`ob-$#l~Yz4#pco=2Pl3*9hj!31rBooV?LxR@{hG4OG( ztfASDD7Nra9hybbNTX2uP-TSg#Z)`xE#}x7@4X3$;CQ3M&t3g1B{qooo%NHCd3vpJ--;dA zm3!3CjSF3kC0zwoPRsZHsx~fsA1f2{P@cNRE49h^?Mc(2DxT5S2vC{ioD{TM)l3o% zgB3;Kv7+rpXJFuf`#UOYbdQQ@Ub9p@O$-Rku+-tqGLcckW2+)t5Lam=!C?eVOw3`w zD3)f7G2#oHJyt{IO}*Bf5Lg;(|NhQB!!2{iO=ia26hK!uKAKu>O}&saw6gG*btUFl zxf}kTsea^S7T1ld5{NHmYLuz0n@{8*anKEX^G_i6OvRuxPqK$w1CbisqxdsI3d8n{ zI>BRaYi?QaduI`?l_s-l!a8bvv*j$S^^=)aUeW%WKf*r}vNeauk+Cr}p5b1M95h=! zg>=lDmOJ!;Mh=imQA_guFt{GVZyl)2m-(tD6@v})9y_JeR z+UZa1#PmPL&HzF9OKt*@oQ0NWH@-}L-wfRdenIP?2e8A-sG<{T zcG0a!9+s)X^|!cRf@g|lJQDACvw2aB4-vMaaOOUlYzES9jqOivUiBl9QT_BY8`Hu? zgK)~?sL@kJvGZ)+V2HZD(^NePr%BYtZtwA^z4O1e62R!`S6kZl$Ia6v2^v3b-ODvY zU+zWtLU-)pMg(FMW)U`}7th{>0BwE+6KuNNM)S1}6c5&rgnr_R#1x#xlYsB9gP zI-mGLeBzfY%4vEx<|MD~JHEtHoV)>LRGL%}t3>FBv z-u=D29otxoZNJ05iX9z@bpegIYN>MmyQzkk5aa;DI-%8Oe%J20Nt4o9YE-4Zp=vCY z-pyEjYrk%7;_a~RRop{>(l8DI0#spPFW(u2&CXV@?u`8rvX`LNdTi%*oR>e3bIdlu zKZ59oWw`G$NS2J|Yxf8b0UQ}y^SXHl3+Dn{4zI()OE~wjggv_U4GzCLKmQjvnP-%n zNQJzD5CiRxZjrl!&$bP6lL@2BR-e#6`r;l&Rjx@o=gpW#JilDc6jNZ=loT@`FR6RP9qIwR`Hp~BAwo0 zk5+u+w53OfxoNoVfb!ZoYWy$NGx|4Hc4^um7J~sp1wZ%58!Pf58KWtuy?N3!f@1d? zfd9;!Y72r}0@JIY?<&Vyzyyo-{{YEAHoq(+Q5SBZ=AE&N{MDqQDkZ_=k~8mdsOT50 zC}$FNkRC!`y55=BoBDRu1{}i~xo~5`h`@>q7loWD!T|qS`ILuxX_uIQyKRxx#-U?WT zg60ZI5pHg|dh%*;`Qqhs?|1OW)VGTQ$I-6wE8k3KJX)eK#S*S&MW90&Z~`A%^rVMq zR8PY2El=5Xe$un()+Q3_NwYQMQIV2}hi-Kee3nX376aTg57uC4UNEy!dQ(3STILWI z1d)G~WZK#VmE;5+fn#Zi({y=^7-t=DHo?MKZ_#rkLI9%@f_S0wLLoj!m~j{o5Nk^! zPLM3Z1Yzl56cHwg9X_|V2og*~9+8SghObGHBYQV4QUWuXVg>ihIJjKmEU%U_Hz?r_ zVC3fbLOY;g9QOb9FjkLR_MoP5IK$Z7G?#z7D8dSNV!?~Nqc3GR9QRKrr)Psnr{@Mo zEr1I`<3u$h1}9ruH=|hYV%+h6;5o>{Vr$#_Esp!vrkaqi!wLSF&}xbGRjBHyefUSg z)gxT5vfmLGDB%la#tB4j41bu7+#e-kq!3lkP#A`^V32tTL+yccCFGC+b08V?A=ZC# zTh{bCT~P#`6?#pTly7-P5Z)*O8a(7###y>Vg@i=IwdEV@JoKCkhm=4*CFn*nQKnPY z4J4nd43{g{APO*ZC_FXjM)m@mtVk6#2#)!m&Y=hx2j+s$ph8GABO!*(h$37JAI#9)jYP*UpHY!1TEz4as165w7-5ZAu@ zo{;RF4hLiS!5sD53jmn(x)JUUBjB(Yn&FW^KG#1H2BmF{%kYge7-mDx7_31hW))u^f!P0%6%(s zxB6+k6YR_l9<0DIjwx^p+^dfiF4JLsN{27~T+cs3fJ?ImLkbbmM;^~b}}=@<@~ zj9Of~3-a07X@A(A3Ez=s(sJxY8bKt}!O7IuZq(oAoC6tzRIT4ByS@&w_nvh&8)wMKq ztO(I9Ip{eIr$uN%RRW3;v>)t;Aq2uLCY7RnVFWLK{C$h4YL3)htq&Pt}Dh-dKC3S?9X89_h4y3!%{_|27WNphO5F5kgy z2{DuBTxg(Myb@CCcL^`gd{+8hZ{&4-_;E82Q$b?@zb@-6!9jm@m@w?BcMyN|5YqBA zB8x18AX-0+5QvIYcJwW|6^MO=sq~HA-O(>N<^kocg|P20(};elSK5j71)%CQ`#jcP zRrbmc4>oSOrXC2*DS%wN3Mo=?p{fMK(Fr=Z4UG`~6$3lpa1RPY}WHMM_h;h_3k%ehO&b4m3BSROp#3>w;k7ZTDWqVqXKBaE1`Ldu*o^DW)X zvW+aUF}rc*jn8}cNwZ7#?2%{xJJx0H_inQ;^P_TqOb^B`)|YylUTkjle*9j*52mD0 z|A-J!`~DY~nP3rpUj4>5#Pa6!K7;xt;Gz}o!O1sfaXo+cuhF*8wl@{SfqkI`VMFlO zbN9^v!#`PqG@0>~hDd33#}3FkCHQ*zSu_{&_2fQd;8LTaM!k4IKNuq#uHH`E_uelp z$HUR%ab4sZIId^!O_ax-gO@eK$G$Q*MfG}iuVJm3Ti63LhsFaGN(*r_xo6%M^l+>* zDx0dd9&Q1Tm*4sY7=OD{TR_ov*hAU@%eI%M8)%ubh|;7$Qi;4E|9zw`jvY&GtSE3c ztPh2C$aDG5`Oe`90P5dlj&FcZpb%3-#LFe*D^si}iv$*iH;KGYScCxj65l|H8MP*M z2ya?s1 zVKfWm5+-G&&VNoC<#lFq11*4r(FK6&M}ox>uty{oWd+ZVo?Z0Z&T$K@Z&ZNPM@@j# zc8&*9LK>C;)Z*uY`*{^1K4S#?K$%;par_jR+ioVd(C-+dN>Ye&3BdEI5yUk85JnNo z97%|YFbNiN6LILSgU$5K5WzX73&Z=VFfy@2*DD3M_J32@T}QFy)<}c$v$w7QCL%~5 zSS+@<;ek^vcF0g8wpxPc2@j#E8z zX#mJEG($X}BRVxCkCBcJuT|2}i?nf*QL-|;Qa1&rtY|R*v61vO3m;R>FIxe09f}}| z6ogVn;(x-n7g&bz%<|dNMg%9An>4Xu!l*qXY-Zm$*1&RYf@hiVfpfw_$;sttuudZO z6%AmPCZLusD(=Keb7Trrp%J0ZDf(VUD^j}QX)}>oCR3iQ zyJClU%omcUJXXyUdGDE8duYsJL00A*2La;gB7Za^=*eqQXN}64l$kMENkt3Zcl@QUAxpn-J_xbjjEJp=r&%6!OH@ zM}MBe-Oh5*2AG2PA1{9g0prV3l=X43bp*R9rXYUM)iBMLGtBXj{SN#Q{ zkZO7x9!ANVg?y`D4Rvk%GW8Abotg*MyMKpyPu~t2PV<#2>5chdV?Dz=sp#bWT7RDt z>$iH1tCZgm`L+rID4%qO7j!!Z`|4Np`w|=8u#UCT8drzC*W94SFmkMM-|4uWalhXi z_D20~|D^|qV~qyA!Js$nc6-ivG;#+o?{Uyo5S@N+JQ$8fPNxx!|4*)J8dkEpR&y_^ zo4eHDzhdPXzJir!RA=QGS4^4xZ-0zaV>UI^5=UN?>a>tfSH?Z-3i16n@XI zAQU4=ipskABB!x{qRm^-0o%Hlx*KSlwusP_N>Pc@Apd=blI7T*sMxNOF2nL5%d*bx zI~SgF5~Z<8r$b}Vw|&b;jM+p9^ev1t>#S)uy z5mj*PYdA(mU{*a}%#6Tp_kUb}VE5c^wY=c1M#oD1>LvL-&-I;`kTmEFT*vQgBt3!Q z*FE(+&|AL7Tgn2bZ8rguV&yi+%G0rW!purQ3*h$W*Q?>TKR3?^PUiAmNoY-?DS?kz zz%}%cdVNAzYPlv6A`<2VCnPaS-U*pV8gVlucuM)aun412klD~Uu74p6qZ=YjM)`Fa zKD7Kv(EPc%Fc20Ln<1S}2_G6#q{IN#XaO6}Kav>pl2DRN8o{NKhH`0O6#T4E2|c*x zHYxj7F2&a*jS*oqqryN^V4g56$$@ z9O5SMuZ{5fRIfBfcL>a(GQN3(PTz}px*%NURq_m-=Z2s~ntw<%CWu2R|4u}NC`TVI zuRbHZDqz(Igu8w~K+z|H@X5aCLj~RQ9S^^*gsq(pE?iRhVIdA|?flY2@`Pfk)6qYI0Q+l(>Aom3k zC9W%H6fC#i%{_o5l?)C=u;CiX2+5EpXqNI^yCt8!J@lw}87jhzIJ+}~N-?ELMW>0E zH=(nlrgM&ED#$;Maj@$&dyAr2CTKK5U%o=#gw7JWHGcp~f6u85({XzeEv%w3FnMNS zIL)mwi^kRs%nNM?yf-c$K4wVLCfV_o<2fr3=>$R_7ce0~7ziOOUg7x%CAa9eC>0a3 zLU9vDm1|~Vf;CB~FeOhRs)2M((mId@l6ja|5GO^zY)4%*Nu(%p7h0%bJ*Zg`)F}k@ zTBU%70)OfRy!kDKCk)qb~$-PK{M#uTBD*K4UyAYa%r`WW~bR-^b8)3 z#$-z~6`z0>>%YUkBR)zZV@G4+8OVC@Y=&PC!j##9u za3%UkW;kV#Iyx8TJ=O9|C`;NVd?W+R+0KW8hUggwhNY-21Ygs%u=B9sb-O*+>$$%D z^M-{r9d;iU)`)b&Lft|{JGeKub7^SJP$#0n`Q?>ORnNGp*^DgBfL0&Fnz5oj`Sxnx zYk!%+G<2-Vih@O`KE{1ST}m}qx9gI*uT)@F9z|5q-3 z82;c+cq_T*`sA+OFT8?|r~fbLc>F^;o_~R+$kZP(_5r1-p^W&+ebONrq~K~~J6kWs z`S1SEYs<|2JE_5|*pIK9>DDdC$Bk{sb;tD+s_nQU->gpi_p<$6dfC4L00960+*w<1 zt40uh&#zc*qAHQfWx?Fi7%5WIrzj6?PhYCGTI01@1p|V0LUeTg`%EKFZ z-Abj=TVfBD{wqt%tY{N|)981J_@Pbk8HZ;GwIagdY(d=q)+RXoy-jMxacB)v_eXyz zrvrpNNL?!kHGSRsTm`&F-w=(SLVsh1L#>sF1rALeWCPQ+oOXNR_X>=y__{6mu5J0` zpRg260*m;b!qOWYDpE~}Z(y{1ReFL4L^sDENvg8OQ)R1Ez2WBA%SNehSXo@YmPI4V z<_v!F0J*rBf`5HNyalHpBT<5+3fr#X{uewPDbr;!k>3EDbs<4dM{UI}geM4u7JhKs24B+bCKWR50`-V>x(QJE$`jl~pvnybWJ%#Ue>!}nrk@pCE?@btg^u2>Sx174giSGkeMBNdBIYGp ztkc^5Mo;gk)FS&KDGM!2lNTJen%M@ZDIIik6O)}ZU#3*73kGNlntzrFgaE$BQ1!sC z%L-VHNlnOs)7!W%xro;@J<3)FOL)Z#Ev*b%WvMYsvYGJ&MPDZnlUeg81xQa+!YXxE zo!=AJ+!4nk$Z*IyDT)jV4KsbAKs|N>MaR^z0@N_cQj@cO8~}2v09A&K8%|U;GM95&QEq+$GGE zI7(*;fZ(i><+HtcjBt1h>80PkrkCPNt}B{P;Ow-L4h_s?5`Wml_B1FImnt|9RZjq- zN4LF)!BSx86tV|&Z^|$7c+J#E{X>_D5xX68JXIgO2RqEHzCCTL{j$ViN>{9P8H2Lx zKFT@~tL8_E;980e(RktrdAN?Rh7_Pyq0<}GJ!Q`6dj*G5J+}%Dg$L~vEbJ2}`#oE# z8Cb?y9p-+CCV$6$ZwRt?k1c!ZRHON(W=#Um_e|IIJ=gbrOEtI-sfWaNY}BfX_cFapOszfY%xju^bvUQS>;KI;{EqM+$$;k%A1HDagBq3Nm!6IQ{%_-&)sP(Um&A2aEnA zkMaPcTRTcMK#V;pop>;M1Z~VwNqI2V@2G8GW6(K$!5pJLD?GiLd<6gi|Nq=sOK;;g z5WeSE2ue^SX!W4pN|zK01|fe6gYGs5N0v3OkHg`78qVkXW~C>#iCixX{J`;N7RQ?^ znYEueqV?VHRN7wb2e$78Q7F->EwRAHnw52B(B)i@?ZES57rCK< zVmWfbpbodx7l$9KU~1(SW}3+I&6v zVJ&{%3tDNpB=E0dWak}81ucuM>ci#kYHwjl1g5#JwYt|NuPp?ww9-p@clwNx5xgX9 zpE_8}7+KBr%bnDdhjUtd(ltRvPUiZ1_@aqY6B*^&sxG!3>=c12x|d~iD`#wU4Ks?3 zj4qd?nCpVoM2E=eL4jI0K5%8Abg6CL( z$3>|pZD7~U&CJTs7Lt>&Ga)K|Ygub5vU*{r<;Eg8Z74H}#==cmSc|+|Sa-G=n8?cM zqPvN~r#syG#ZA&&zDMyRax^Uq19#YHjO1X1 zz_S)eat=lbjz^Y`(d*#bCWi&t(WS}LbShIW7%kRq7R{>US6p*W!3JThWLoPKW?EfP zp*=!jLe3`e@YU7D>bm)d-e_~}I}g`L`%$|mZ1r$`bpIp&mkIdw;pXPz z>T0VabB+?(APs-)i=6NS!aHZTncVsKJdvMEW#t`VT|)1&XI#SWvhD8b)|I)_AF0ND#p43bUDc(QoP8)2}+u_Nc0w~BGr?3ke24V z$F%Hw4vd(+^4%tRS>2dQ#vG>D|Ym>~@T@L61Gj zUT8hIZN;W&lwdHtk8h?FpkASu_hCoMpbfi}RI_2J4~_Z9wBprve)(hP_p;U$YURpx zM%xMo)>(hP%;OSmL!sTD;@PXa^CNZPgyIgh%=Uo11ySUOKnhVDs^KhWjC#Ir+fEpH zp%dG-8^66tB_xnW})M(GxU@pTtx6Nu-jWplJjJy^5irsT5_gnv(8ABxNXd zMzepE@%#zNXv(mZjE$(KU6MZjjB6@+S34Ey4Rzd@>R4kuiOQh>I&ei}y>t9cKIl_w z$Mz#XmX6WRK=_6;#p4$^Lo-ESbZ#7Z3I~BLlT$gR1pYSQ-&rWX!_I ziZD{7V+Soga&I?k`R!nAb~AqdKvEfzsiD%m1u_OQqyj8c zwhO!jRFHIU%jwR}TpIv$ip`J)0bzerL-Lg5QPP9SE|t8x zgz#WZR}9OP6BRATOhj0!h*@m-#|fTiZ$X@>MVigP=PzoFLRhkTL`f)RA}%d!MP-=I z`-^yOk%(rLn=G?%7Bg!e#dGU!(0BS!t?;~fh*g|-IFD8INp^CBkbPj2E@FRX;({;0 zNu->RC^-=O7bP3;N1XBnIneo#&qW*%Q zTI!*h1WO$~U`moq8vb?ZYR`X>5(-kE2`By}UyA+gHNGWj0t$INQQkx-l>O9I0#YVO zq%y|yNCiPjcD9mN`MIE`J)F!IhW}29s{ihXsH)~EC;dwMhqAAlqSO~)iOm&{)5Odd z3)%zEfi@btraDZ{b#Z;n8b%@s6ESZb1JcA$Yh)Y#XH9r!)${ez@a=#8pxTfBw(9xD z@GIA>&INjX7qA-_In|aHTLDy zth0WZn+a`A4S(DwvPyrw=qad$8*38n_{{oQq0pfCL!LW8HbTShpob;&$a4lmXE^E( z*(qbTRpaE2#$I>GZh;J;hXx+f0N1NYE6!H?eWl;;m4LrgMd3NlD!+dx_zHB4+ zx)!2NI*ogX=G_g{H4*hzx?Z8{rpRs;T9;iBbTz4N_P>fYM)Ydlv*Z5Qg`SIs2)g4D zF+1!+Ucc>nCh32s@6UGhB3(sVgl>E8&>nZ#f{nbfGjdReh-1%jbrC+3c-PABu7!h- zdN?cppC zWmG=7$4mUv)86tt`$jLo6wKyiMcMrV3nI2)0M67#xdMM`5X>U-H!y85V`P)#^|Ir^ zl^Q%6n=LtB+^dnVXnXH$!5eURvb{&!(lVAI;TtLlm=P9~PJ5@P759tz9%y;FSZ%7g zS^Nh80RR8&Sxax*I1s+)R}f4Pq`=BDC0UXaTR;yB?4jso7kg=nf|6*9s7whWov0}K z-=U9+6WQ8=trIy3cZ3*^_lbj?1$GLQZlKOvgD{iD0&9H}qSpuWCb-`wD6G88= zpatVYanNT3ylZYjhK!`1qS2$_>E2i8WfU1oK04? zfYG@kn~h7ouV;VhB`+y_nTs3M6u@xGhtbA$Cfo8y)>*yG(*kWrVHD_iHW)8z2u9Tf z8p+*IE_)p&jH~kI-Dk1%=brB^%pDs}r`UfB{Ll^1A~bi3^gb^at{-?V@^!*c8P!^e zVyjLRg9=|x9U1-y>d2)|9f{_OTr)0(G%}Q-66qJ0pE@N~1DBDSYTSyWSd`aQpElE+ zUYs>?Zmg^oUrO@Rt6C%r@vq9VHr^On16l2891M+X-(D6+K%*of#v;i~O-PS&AT58+ zvDD!lp-Ikx^hgK#SF8i+(vEne6YgWgV=QzgGmq(<2&t&YxRi=vn5I&$mTM-t6bcnn z?eytrTxt|Q!7K>TQp!6h3>L)lUI>%YGVug>Q_o?&WpY%NZRfhX3_Uk6NaTei}0UiMc93n>wp<`6WxTh1mh z%WTxpNU0*FcMk_z8Nn&34uOP0R*}sbo}Y?7l4u$#5C|%AvWD;EA0P@KGRnbb&stx4 zZBidS$UdtP(~6vSVR&T~EdK(dK{KnqE7NTO++SqqC{4Evi+x3Y=*S#rcU6DO(>_Bj zI1d2Ggedv7ZEvZ__VFw!c6Qg@u~=`T zN0WkCxC01OB$q}=ULKMBo05C*XR)sma>VZ;fK519+>-O)S%W5d3QGl;Czehj>@aM@ z#FlZ=1uLoAaKjbeKS2^ojj5iP4oZwtnMEbA|us39t|As#@54I%p4h5jUgh~pJW3}9DH zz-lc%hvk~Tf9nL;I+ga(>OG)6q0kr=rc2 zr0MEpOsd!78iLYsD>?GWsoiWoZ|OYmY*LjEG5P6(s-tG>Z^lV=5p)s*HNZIIR;GP_ zSz{-^Q# z3xiOQUjO0tg!FR2XEMc<27HeocQ!s3~Y)qZV%FIcp&?*{#G z-?1586c_P!>F$&4^m^!SxujmiEnKh#lp#t9Vey96e<-;JpQBWGWJBgYn>vN8#8OTr zp{ABRMXZ6@9ZkD@7KG+WW2>ww(&sQEBQKGH@@0Nj5uo|wwuk!Tp*5m8h_nR?B# zy-eA?ut<_gi2jV%CpCFIR8qR?HDzeZD4OcXZ_LO!WZepa{z+ za{V_5BC$$Y5eD!(32@3(>Z`h{1XYcgSnj4vBc z>PEar!3y7!GzLYLKWZOGJ&@Z{_q_Yi`EWQIj4uYGiGEgcoD;bn8{B7~$n6LXTTGY6 zbCef#wD#)H^N(?Va<-pYFX3VBj>R24tldX^=DNIJ$A(ju=EPz49{>OV|Nrb+U2oGc z6n)>Xu)GNki8M{xuIpGW@xUA61;h(N$W7hGn(IWiGs3^ePTG(_oHiz7eC(yovfi)b zTsKaj7!2ur0|Lnh$zX8llDFlLG&t5_~=!n=si|6 z;nKpWk)_Wtg73b57KCNJ%e#i?(w5cRyk{QDt-UTRIdnf$#+LrkZ523mod=g_9R?jl zkMMkuB`^a7$2-#6T>4HB-6>BxW|*55R#7}uK{xi1=tzS7S7KqAVqNmaEPdRO{Cdcd zJeWGoC^y!>Z7L6Dm&qLl9e>vjW5hsX|07Q-Rta_luD;JO$Aww^AEU%#_#HBw zWGXsyO*8pXvL^Z~6=a{C7Dig;6t1hw{na7DnkY;e1jaX`oM7PxMsI2RHN9{C%8LnJ z6LBC72C*QUB|NX}e)ze*e+Pl0ijgIJqyGRZ0V<{vY%*SCv3I4N(Sxm^g@2v5g8&PP zy_l}ogfF2InO1nWKCMh-imNiD@Dk3xLev^Y*ROwj;5y!4#6e|Ew;ZcX5S^{STr#Y5 z#$4~uvb4)wKg8ppfTdA-tW+v*e1A(-oUKMtvhxX}ITaqyeXP}{zhcSCzaNjTMuE@h zs=SH6OVAO?&)?`Jt+1m}!hbznq64(n)IkVKD;Pge@&&w0G7*sqye4{v1)lVD5i&MV zl4C$xkgP?j<5vP$5X9FEDXH=!sgg)4LHV{&?x|T6jUxD7wpO;9uqnE@INMz2pXqb3 z1p4dj;-dIR95(pqkz5m;5m|w>k5>?~4AaKPpY7u0o`{l{dlK~H-G81u4Zsx16-2N1 zp$<@KzJ&VI?8RHNB2_EGcO*+e$&#Pw_PECz1#G9ap>RH(&lb~g9-33<$2r=lYhCOy4RYFQOG&?GXfH0%m6};*iKeG|N-WLcs;}KoK2k{5s3E&~i z!FC@X@YMcD@Pf3$df1W8x9DRdcZy3RZ zc=o7zQhx|hW6gq?@O?%F+Kj|`M7Tg4i=Eoa@1_t%JwykBIwXXgr}>?#28@UM_0msv zDvemdG9@llBHCqj9VKh^QCnI|q(*FAeMz2r=p`+#9t>S?2xNW|E2N?bWHb^%Xp}Zw z{?6!E@N2TqeA+PBf2FV#g_0{GlRPZ>q^H=F(AkRACb44iAQe?;r4r3^jU|50GZqJj z(L?GzB!N;>37&EnJ-QHa>Rn$iwl~FRb_-VEPr>@l6YIB&>+8c)A$}=Xg%)BPL^N-> zt6>}zTSb=&_N8%{JkqQZZmp7W!mQQRA(PXBL+_V5)R9J^eC3B3>!Jvung06B?GxRBUU+Dj%}N! z=@=3<&Ka>@M`W3zZ*m;{4MNspH5?sbq`M}XvPmu+e*`=`#{TthdoSo1WUpJ_vSTRU zYK?Chq;GGBt)9JJ{0;yB|Nq=sO>dh@6y4`nj3!Z)NaY&?PHm(vx{JE$)7!Lt)fkUq zR1An2r&Lw{dk2FZa2R4H*m~~9A%pL|=bn4!>pBBLj3}MM6#B|cqiCJ^v+{E{zFtA! zQ1z}?e;M@Oc8&jlKIj_n3t^r-c;&RA|4W46ez6**281iak%IPnEHV?c z#`N@TP6U&qViU0-8Y`ju5``2%30dpVzZ+~de-#+=y778466<$(LY5S?&Vav>;t#{wK?EjSdHlq{-Ix{;_`AMOb2nNDM*g zJ5IxIG+nVZDl~pzzp9i+Gn71g&57Yo`J3&xW=rzecBn)cEpfh6F9K1HS95sX4sl=; zljJ)DjIbEb;RF8%uoSR}P_Wu0Thz5bf9{d&Y;&@;Q`QimoFFA4%Oy_ckmVa(f+TyB zZ6do>K3CEt{t5lgFuT;MnTP6!$k2uGXEhBm6}%Yl)K8c?(?@dH&?6M;v2aG zbD;1F+=9*e7ygfr{Jy)ox!Haze=?s-T)_pOI5DPs?rIoY^;Xe^;(cx$rH?qTL|iLo zm`H1-wa8?+V9@_ihuYD|wPfiZ|%307iNaO`YwciciZu{?8Py7tsj_+oP_g7vY}e_am{HsHM7 zz+eh6^cso!5mN1yi)s>$*lp3rb9Lc8ptiCupqbME+}q0;t(9XPd3|gHyM#_o195L7 z=lIr3+vAZp==O?uWaFj1l)tj^(r`AQ@6xs!*mh|fOLbjV63XoBxs2JNaAlCa`dZX* zCu+s?Z7`0?&%CZUUspUrd!Z5EfJL|L8MX@i~a9QS+XrTqHHlr)AZsf zmS^V8<0Cm78g#vcV|RweXgZot49A7ur-jAwHt`n4C-5W8`4Vn7&~r???NtK3_if*Q zgC0=1Lc_UoRj$Aq3v2{woFzs}nxFeyIZCU3usMgG_@0*oDF$wTq=ijGH?xk6)cWn8 zkgEl)H6iu5cKe$vQZ4;yctp0sV+=h*wbj|?$R45jWHucSvCwpda!hwV8@VqZ2+2V9 z1S#U2VX*IRtT|SW##nXd^#%v(toY6_IP`o}`qSG8g5ySD`SF^_KS@d|bi@}lW;@r1 z$LeVxVNC?45d?*Q?C z!jx>5@VXJ;#3Z8ZD+Gdyh%Di~{0E2_h>&uy$=D8Z3U7r z5{fh<1x9)>*`={x=u;zR(Rb*zhS_IU&N*B?gwDa51`LZDBdoI{tyDh6nO^hDAI$>ltW2tUwOMc&D^45(7;Q`<0pBx$MI zN+SJKSxKuwpCGw_-XB${J#kV!NPZU6dQpteVRge_NtS?8Gf%|WSVw{cWQ>zY=8IR6 z1VKq&EFJBoq7S_fQfcOgqtX}Dprf>-gY4Ord(EAy3sO$$ z(RezYps6!Mwmq4-7j{{i?w@&oE;tcDpcX)@!#7kQ%zlCbVXjjkII7-Bx0TK*exSYY zmFo(fy>hV2j_xSv*S&8#Nwfd1>u7YiUg5r}j@bRV?wd}ay@mUxI^@pk@3romP5}0o zwd&^R+-5agG3n@4lg_isNk_Myy!^VU9<8gQJWA((00030|J+$kbK5q55WV|XU|>&1 zZo?0N-;TtY9CAx@X(pGZ?LZVHVnUG|5^|Wy^uHGrC0ddRTBM<-&c&iA?(W;g<6@tY zhtra*a}Xt@EN3u>;oa1z?;PL%3PaF!f6ZWsyMD+aSP)LMRgrCSjV)#nSr}u__iR@~ zE)4%1Zso!7k1ngSgkf!e*6ne4LTD7U&V)pmE6^FJ-=8l@wedH`V<7RE!BFcg#1e+4 z?gWgAOB8}7}4;j0yYvo|!NY(d3GUeFyn z8gZ7TTi5O7?rI-lK{;Uw1hwxqD`Nu*D7E#1-5)=r#gr^)v0ov70^))$XYhJwb@Fh+ z(k~Ej#uGY&@5L9uGr(h3g5@UNBG@?+uGrqTU|TzF4FM?_(PFk(&~yfQv7u08##)my zCv6I4p=D|E4ThazMDF6*J*~*@B1Ww>Tcw0=3M$YRBrOxd1>#h^4s~La>UL_xXj_t_ zv}4Zm^3E_;jK`aQxgKR}gC?wEg_c$Zsj}3VC)wP%H+5G>M#ARJO(HPelNwj4)9U)Y z9pJWdEXq=iG_?T*O+*lyTh-nW_KUN1jDz2ZTi~>wns+h85cu*TkeU$9kf6+!LR5-s*t_EW%o;iXb;hqpTmCJlDn{F0 zz%~rO%K+Rhm6TFU*4CL?;TYIIsw$b1HSO$(N=xZ~^vIMuxlZZ#c&1!B?ufTzT6w3@ z`Ge|#GokXXCHq3Z)rU&|IBpvd#;=lx(fGXCUFn7Td9)Uksd6WPA!Sx6vvFCWn|En7tNOyt`Zc9CSQe(0`h>b~ok zfo~&!*Eb#PdX~EVI;L;v(HrACtYPF?<$=R zU&Q1d{0}B~WR4l#5t1~eek(kpl_{D^fAGXVW{R!LrDTllk0O&{KbIsxa!v3A&tI03 zK)q68u1rgy;hO?_d13}vT69Tfj5QZBkaJ``^zdgeEN}g;L#J?j<3FnfYe?%`o=B ztq)yf&jypx!V!2Iy9@mZJcfl>!Wg=@W%nPU3(D?mZGC$-o|@EwHI~@g@F%VdBj%E5DQepk1&8-?lXTK|9~J zSl#HPx1iU(1>^43%}w@j&_8CZMN*XkjEEwTIi`%VdnhH0eJK`1JHq#W%*|<1kC{2W zPGTxaDxv!^H+6`nqEugf>ResuMJvh`ejrH<_C$I?$dlr*iB%{CVaip!VhRK$sjdg5 zbt&|qd#C7U-#yW<>^Wt`E|t%-;dNDNG)1G)WT2>RsdCaW0lH6XLXadB5wDG*Z@^F~ zG=T0`13b(0q8Zq|Hr`o(RVMoHZvwSrLaXYvr>aF%Eyb{M#jBZB+6A2BaJNbFl?uJS z-#Ah>gzmK=ldGlfQ~Qr*iK~P^u%`~^buE~x6aJuI+t$dC?^_DpNfT{v?i`QQjZNU% z&9RGDbvZ?3{N}C1vpn0F^Q=_if1~}F{0rKTX_NM2X2|&Jdf#h*(6qFa6aR42(j6`;+gnZ5i2Z3oFkkttEP-k{P=l?O zg7#Ni!3^=1l$PjrqiNb@?W2jM7qsQjT%%|Nus*A&O`7#do6S0E)~%yAwd<&9zrOOD z(B0Ir>(#EJPA&R>>Rhb9nLwugzgo)F=4+44^sVpEpb@k-H#BHsaP{$U@>W~p^m6t{ zXWXp&#%vf+Qww@weM{9)rXjQ2+nJ%EdC2qp6r2;Ea1wg8&%sLs-|r;+b75?8dKR8A zcfqLpV!ncTN_Td|Hx6D22TgiIyEmMU(M~m-zeOv`$UTvNVh%p~KJ_RGCHOT8cM(gp z=a_+elJM+HK1TdO06*ejF9*I*p(hu2am=W9*p*`CgT~<~K#k{U&BkaKJ4Ji2YGADgbHtPYe$a$pQPa325|@|;M>s~`WdnW0cSZ&F;XcNl2Zi>EMn~4N{zA` zx=v6gByb26^*LaVL4L7TH%R-?!=4Tn0ruYJu^7I84HUbhip4@!jnhtsmG;<6foD}; zB^ZTNQEQ#XSz@tpfr&eIXLENnaV>(q%er{p=Pe^pAY`1jj=`ykq0wlJy^l5FSyj(B zE9{MjqiR3io2uvc*sENtCKt}ncR~7bVW39+LG>6_PaHO`cynfb>chX3*b{UEm36Ga zuG@frHAehLO)Z!Ru8mn2#2OBo>-eh622^RJi5Rp7bFp_;vzs+rz0Ze#b=$nO@dZ~m zbyGmqyFK<+W2d#RNy9qVFLS#@+c4}+yF^x*FZC2O!cA)?+Vh@u@=&L`tW(Xd z>ua_W@Kva}*>9lc=3Uer8P!yLvlZze0(+`|iC-)NyVQ`Os~2U{?8Rx=yiaFCGSh4? zkxZWVCN9;N1b(`T5#Xgg@z|{O4Vi z|Ey2tPr6Be@By;Tmqm9)N^&rGyw z%W%S4y?7%O#aTFwneP0yVdAFoTT4zP4!#vRU0Ah2!-%({PNNcEW~2I<7fZ_TCi zsrMCrcXUWZ!V!0LA#?aFLSAbAIK+cd+@d&rqptJEIdZXiF)iaH3k+`wq(F`N*}DO{ zw5UfGV>?tvP|t-}-^nuC`KoLrYrc(VFFqI+#^;@@z;jby#xX~w$Zvggs?CVzh7tFYwS2;>f99;Q{A*D>KIq1o zHyh*iXoiVsab&PB$~Eju%WILP_XDB2x*yh1cQ18Bz_|ygcUfIxzi@2qKmnwAr?%>! zlhShIVlc{&+2tJs#ZEm(ETz8jt2yRCpAU(Sw+be(Sedtp$;h>0gTLX>4kYV_sK&Tk zA{0S~5H)rrBb~dw8kknDuT8x&eh=47Ag~AN@|@tPqMqWi3oXNb!t2X5Po%mPnE(zy zv12FHJKMk%2Qq9>6dJt|}UD(-K*!OPLC8igo-=i>?rcZR^ zxnRi0%K)7Z?{bFhm|DG_s&VNd&|ql?FHkPTS-D%Hg}*ALuc?*MeOOVdc*pc6kh?MH zv}HWf&_>jU!iQYkEqK{!RSjbmw&Y`Z{@ZEwN6D6P6FCd_J}U^Q7J|qiMwdSE0?~xWSY4`ELX0>Rwd__gy(sv(;i$>MA=>P0ipMNdOvN?!R|3Yi z<-gKzX#^y`NJ}EW*ITnBnMii}EPLvxa%LCaKqyFZb#h?Zwm>6T;p~X2xzQ`MbN-mA zH?}_*Uxkr3J%KA-e@)B@n@WIKEw9(@*#@i=dp)|f^|`bh&d*I(@*6X*OXT_m+e;%D zGrVdp9hKo3lwti9A}_H#kJoDz>AYWNas~1mT$J)2Ye`X|{nM_BVNsw?_!i#$$e%AY zwXK|7^An4jmTm6YOr77D8?Em$v(VQN_3YMKtMzu3*iY6n0~>1=XcdzmdAYc>a>!u| zh)gby_lC1f{-(dY4$41AZpK4br;_3n5{+XW6WV-75QF^iUXJ$SxngqrQ#4AmzaYW6 z1xp=aG9L2u@Og6W-0w1?Bjk@Gnl8eCMMzHVfaz>VvvrX+jVsQU?LZ)A;ci7uAO_8ZMSug$&@hY$Yw?F!bBa#^pJ zzir#^?}m~wx_>oe)pmdqQra!>GwVj}-&wIY7&^H%Bl|G@v<-U0!ri@f`xy4>Pgz{< zg2{I*`V#!j$}(*Peke`aacrk#+2p+Ml_cj`7Pe{WMC|HW(#!FYT?klVo?{rkhLXuk zy_cmC<;-;KTmfA!9*vI~E4Q6l<|)UYv!`gxP*z&4k9r7Q)dUH6qi)%(EL&d5;&9ZC zi_0VrVu>b7lf1{GEcV5c@ijhjy)`=K+)1v|9P-&Y?3q6xlNG@UA0uVE){xWB4>aAS z384sedhM%9XH+j*!U{0zoHpx`pN z%q}HCdw*5^=<3Ij%=hnEA9*IAZ?sO$jJ5+u(WmXDb*7HdKky|g41`BS3CZhax^Y!Z zlA^ua6{S;?v~ulr*kREZn5xq4>_!+Bs^G1PAb<`}v;3F}%hXpzfss?*(GsA%>-*Uv zGpyub$y8iChb35I>G!_Y96D<|rc@+L#!uA5B`^wBAyOjy+R)+DnDqY1`2oa+D6k3Z z>a1_3=zfm>pcKUioBc9FhhdMu?{~w(mDuFIYrsm6J*i5lt)iTuv+;Uh-qSW@@d+EM zJuw_Ke~hxZqI4ZEVW_~`lli(@EaS8FrC_VJ2%iCLQOX^P)Al|a?xVY)+mW!$JIHw| zG9MW*C=QW>y%sgU=4uZ5z}f&U?7zh8g;rJwjhLgPDRMg)ZF`x${ze`F_JSWM8kjsyj^no$O zwwj1r=sU%^fiD6;^rwN7a;nD?$Ds+Iwz)O-66&n*{lMPRvAw01SV0GP*i9S^g2nCD zLlLv31V4&{>*lV)R$lER%;A9C!T=Og>^rSVEBqm1hqA7Dy}8TFVyoTjb6oI|{$u+iaRUA;3Ql;>n)JfQwz{fk>{4Z_Nm{1=tN zncHj=EmcUFd+dajP6Qw7%Yo6&Zm0i4K z@rUL%kL#k^C>T?E_DZUs&XTVj6&Z_VN5dZ|s}H1%uQOdd8XzBdrfI5WV*n34Vo4S3 zO*1F6aiTUYG1aC`Q^*}HU~~8;LgnNJ!zf4dCYRRI_e%>7Q*$BV^S(Hcx^?``Ak}YM zKbBslP&1LnR;kWNu)*dMYpcI1V4tBpk(M-8hX;ML*VQmD&3Iu>_?;~4Z6R(+Dj&ZF z)`D^Ib$774br;5$a@~XbL1V|07=gw-gxl`ysIw7s5ogIk4XsyiKPUD|Ai^Lfu+!$4 z>%?XbpTk>&)RG8Q2KK~T@>o(z6_%WLjYTf3SO(=5Ov&@^L!0txv<5CJc+oqLbV;u^91{eC6;3Yu#xUBcptj$fdc*C@p zqvio?EqYz@1Q|o{_p|g5?NtJ5@rcay_$`FLj3b{g5g&hse@h*`(jce~zb{bM!k|9epLLnK^s6cAX@b;O_VLwb;GNq6f76RPd2_IQ^CQ_)cvx(PF!kb$i;-ScCbrpmO^vScX%9oaf>{4s4oq@+QOoVe zCyIlW%OFxi?HaE~QP%Qw!3*56PY((vcp7(9^Kfqe6FfAdYXC2gh#!1aOFaXxZL)VH z=bnbbTt`5xYAM+={1i4#@IlewnWZ{)`7^1B-|bHa-48U~*|hZLiH6hsoL8^MX#7Mu zPfN$xPzpT^HoPeF_El0Wb%)UD1SSwz9cvId-IF;uVr)qvM{F^os~E9AHL@nEaaH_T zMff%V)(a1yT|2yzoIo)th8}I4$GkTsMIY7PVB|4fu!g&J+Y19m5?=8BOj`Qp_BV_dvtu)SC@Btt=u9!=pOd}Tx|kMFuK+GvMKT0_a*Uxlk#tSNIF?0`WV z3ge?MN6vdrm}kSg;t#N-4)ZMa6&4E+60wqvmmEf5kyi!H$2zZhUc9WC%jOvAKs73f zW}6ThLLuBykx6}~z%ju9MI2izaBlh}G+sDBk@;Ze)RHH6{^M# zg{>JVJ#O>sob#78lhFv$DVEfw+AJ5PGW0^O(HLQ&!1B3IAH;$@YV)fkzuXPaewsD4 z#Y6XhUt1f%UHZ7|3F{eVXC1z9{dnZEY z%s2f?3noC{dY#m4VApJ}OAs2;2v6>2hD6D;Q<}A4oLY-JPLrXdv$M%07+-P(>7$?~ zt*9!Kf76tD{85QpU)}88R3C%QrRb)|OAlSR!Z(zIKN(yt%N6 z&Rk7SP9B}|-~y3utj_sqYbjS>Cb7ClkOq*L?7H`c<|8seQ=nTp&R>#kdL@&OxeI;d}zIrEtC)&3C4sCE-MDqfda;f=NqCeA#%A{{&W zxke$Dy^MPrpF4Vl;)N*;!Q_LLO&&z?DBFl?N&MXM%F~mLBfS(7UHp1MA{#D3nFqV4 zVhX>xzfcMDInQ_Pa0}kdca_z1z0Gztsra7mYT`SQ;<`&E6tz_EWn0L<7c)Vzp%3f! zj2-RE#ff+-db3^L7uQ^4XWZd7?L13PDInx;On4o~;}vGQEyCj?yixN_v8~gG_Qy<4 zQvsDPU*hkFQ@%5LMjly$e`-I#Oj8t@a*Sfc6G9xj6{#+(r-7q`;ow-uVHph!i~(rQ z*!>kjpSMzU9nD^Y-wTxW+<+hJa?Mxjy;lUQ_+zx+v@SqsIKKmb@cA@fNrW!!wsh#d zBp#!d=EZmtOZ*CD1EsLthRGc786OM70@I8vxB-PiJ%S{T_zmq2GS@p~SS%_Q^VyRa zUVb{RI^l7e#^vXiL-}j9`Gu5~t56!Hz`TdpfMcnll z{I2Cd_z79LY7Bnp2bfc0#!2kXa@pZK5@JHxVR#0kUTtX~CH!OrST?mo+ipj$r}2fp zlJFNblnsZTU7x-KBe5H&4ZFA1CMDbcg&kif-|h$UJwKid@sH@`k?$O}=hf`8F(>0B z$3W)hM)wVr-W=9-Y>N3jkYN8t00|d6`$KcWJd$Y7jQR9Bo+TOHo+k5!9x<`x_?E;a zSI)qPa?v)f*Hqz$Qm_}!Iu@XlR($&qM~Qn6<+%n9n_eFPC~P5G*{%lrUf?w+|6=Zg%juZX zy#KQs($WGJd*2}rE&>|ojVcqDz2<$@J=$oAE^C?E7k|DjbDV`Mta6ux%k z#Fg*=7RN&g$Dfv@u*;c4mVbP7)G|)hpCgrpkNCvg5v+lt9YyDq!?2`DSBaF}RO%p< zE5^q+9C{BpgcT_5nGp52X(0qv@82k)1Str#1j`rvZr;Din?Qd0bm1-b`hc{k^t=E; z=dYrLq~~gK?b1oI$#x2Z2k*Jb3E06F6W=@3*-pGloP{WMR)=s5*`G2vNWS9!3P>o! zgRw8je{mv{`8}pfocZGVRbwa5M{jcN$$RXkdsK}y%c9WssB1Dk!h8=m=5t`3xT!^K zaW{F4`kL^nPz-G;%3g4PP>y4D9aX3(-;0snWIh;{OFuicN*sTd`@O1MBuz}~B9PZ> z-BH}*ZH;{1cCSZvlYpD`_p3<8UdLpo*D#8-meKixM}TIWUg+8<8s6iEu5mogWrX$XoiF zS64IPHC2&%w-udwa#vD!jvPiok~&A^lsrNokMw+aW{t*gi_dhQR1AsiFFvaf5!tL; z-RHC-q95~(sJgO>e_g&O>|b4j5!4-xad3D8GM6va`LfYZ`qO7;w3xls*~5HTFAA(u zzOGeiIx@tYMORtQ(k5+YKmBb)V6Rdzk;oqRImh3qO4F!%0=rd7syr82+A%f43#}n# zUDxtWT0Kb0)mSs^NSrm~-k?C}?yzl}-lBnHclxrILZ`Gh+3~1ZV>rtmR+w%$#1%Av zT)6hAc^U#S_!A32QeQ+%k(W0c7u{Wrs*zJ^({47Uo%Sv#9uI3Dv$yU2uajvnc#NBd z`i;1_`zL3jd?u`UOq)m6`wg?5lg&<|_V2KA=pyH3s@j|Mzu^m$g&ho5`}X8Gem!D+ zQ_7J=Gm~mONtXu874c$3$YlZ~)F8C56_W{}v3}3EG{>Ui)xF=Wtp%PrI|Rl&LlU=W zKd4974;Og+!snme0&U$QA(zJlEC2al>8suN{+S=(8xOi3r?}@FsOuEP;Bw(us&l$<0&!hh4|b&AdZ zz4|uxgqT^>nv2)}^+M+ae;agf*cD|pZ>jwT6M-H7iLt2?jV8l8xBbn<=!*UkrqpNOy^zkIbmie@~o%|!I}Rq#tmoWu=+TWXb%fe<+qN1KKI$O5G z16^YaQX#}rGSYJniVR#0{i@b7qTr&me?_V7290xSmz^kP0 zyX(|+bYW&7sL0cvDDblQIzG35uQTa`6Ui{EHn^p!tT%#0=V?#@+Y!fx1&lm-^Dd=V za;oXGW(X6SV)?A9v8RW}$0qiB#vh$%ffc#{la7Euif`bOb5P@FyYEi zal~#H6F-iCq)ewdFUO!Tpo9Wx;(D-R-D<@GIRDK72<$M0)QJzDu zicWsyfFU;)p66;TQZO9D6oFtE9bwOLTT15j{R{i($b@p+E%~XjN}1p_;j;{6P%xk^_(HEUHPskL2^lOu9UijCpGaq#>)%{kX zGAJ!y?G>s3Y%CT{PSQ8u_;kcMF8o$&=$N!l;!7}^Y+9mcnz+BKw|?E&#OjNnYBKlz z2bwpP_Vg&Da7!qY_2%1TmpaykxXU?jB$c|@vWVIxg4>4;6V|4ofECJxv+RM!jjltB z^h)|wQ<^qYO-Xv5<- zEyL=SBgY!81OKB5ML~rnDb3w!qfA~QiHn(@4=`}NPUk7Qeb0uM`7sEz9tHw2 z!r$WmZz~2{(k2JeGP9~GfD!N?{lCg4UNA)cNqBUTkPrw&^S{*aYcaSMP}GBPQT>q` zfs<+X2Xayq5D0$xrxk-cfGj--mJG%eE03rxf4F~jZwUdA=0|+I}A8d3hvjx!L$eH7SL9BmCibMwk z06LhB{ok0U>{$m%aDzbmgXf*53HTU zGcALZa8vAqKy3do{;}atpw0*)@?eT{mtMzdI8l5*2t@uD5&ANKZv0fDqY~e@V*>6hU4kGG-Csnr0GA1b`+=yjXJ6q9ywEf<8VE%87tuEG z(&WEd=(;75_H%+jSMFdC9R7dWA3%moAc7BoD6$(NbrHfPMT0;j|B(C{6C@X)HH9!e z=-OTc2twe#hF1XsQT#=9{{aEO`iMXYM43V`O#e7l)>YrmcQ|-F@H?QtO(-#n08)7_ zt*XIk;o|IRZ!?;VBn6~aM+JNZAY7_2pg0H{qNkOA0c|CufnX7K5v z4+xt<=pRr)#5fh7g1d9((Z5GI4+NS0clJm}q?s83XQwIGYwpp6~pc6D}lTz|iu)EAA=2-rhXmW(kMEz7~kG zgs?tz^ib9c0$;nR|Gf$Zegmd0A&h?vd1DDTi0Ri%L aARL6M3P|vQfk02-e+=-cOr`nf=l=krGYL%q delta 358514 zcmY(q1Cu677j9X$ZQHhO+qSyww`_FTwr$(CZM(~JzL~faaWnEK?96=j%C$F#vD>Dx z;gn=S!O(#Iw`S}U;JBb?>?^g#mjnEc6Xclz_KsT|NI$uT0~K0rzzxT3-CQnUuJ*0y z=LYpHxKYMXSdpzL!Y4Lh~Jejdfw>scjQ20X6c^Ti~0o^wOtW4!KP6HSDVWL~7nRFA_z{&HdcK3Y|!53XGr>pp8E_5$%G2 z*%5Hs$t-$7K6;8-)xItMXf@*?EVxnZBI|3qG47^hGVGPGjj8z;5Ebx?zm%}#6ZV{Z@IFW5bGuHo*0T)3)JbxxRjxuPb*%3I$6$iVJdS1DE@U@(u zl`2C6{@+XOb34sA`9d$_$c2g0iK((VNXFjx*Q4ybm6|@>IoV{{aX%u02 zJ7w_vA#x)ShMu=-NzYD#kU6)Q_Y39K;2cJ_=&?wdjY$s$%WAraG35LLwjdqAz6cG} z9GY5f=_FZ1-w`YnwtxfTz_Qbt2llWYj%?^UyABvxNveCPPKr3>Kz7WmWYN#PE8FFB z(7&fuiEpAG`?Q5^+WML!?o;H2mWIOj{ z*B^-C!=JqfKg4=J{^PZN4J=UrCCmFE>skfR%4TuvtkAP_jiwy`rd~ntOl8=hJEe_* z*o$ffAxK4``qyEUDPy#FY&dWA`Aapy%_Zqlhm2dJRjA6~SxwQ*uDcvQA~l!I3C~Ox zXfN?JI7Hqzmg3tn3Nbl8;jdXw-47-tB)zvqHL|62#KI>J{jlV zXL||yj)E}Czw)VMmid@~Mm2Kj0J=;vbAv+P2xaas^9Ri{C<$6aTQW~|?Di^_Rw;Fp zL6UgipXe`VV^9G>Q&c>aFljAXVHJT4Cin=cf&gJqEd0pZLlfB4WGG}6b6`r6pOmfX z1?Lzc0aTFk@Z+3vK1~#+$8IFJ_dmANDY*&UndvC*_o=w~_Oe6(3Npinkkqgh#XsA) z>hwVmB{Aq?OqD%q+Z^U-6d&c3Y=OG2(iA1GpQ5MOiYz7v|8VvRx&s&Ww#&Kty^Gp` z0y|~`6eo0+T7(lK7srL9a(y}-*3oWx@E{;%yH{I-$N>G66REbF1kfpAGriIcFN|u@>MkROnOR4C4kK>sPVfXOxyL<}=k3P_>5>vpPkA(Fm!i zFTU5h@b+TLn()BHuun)SNOof_A<_5VqnoH(N}qT{X*@wnESUMbYAq{r)ZE1#IU2F; zVeCN@UPz*VA`YJ61I(L1&_+RAGC=1UbjD(?ndbSreC|ul>SOc`m8!!Rcf(QU!A9(ZEA(g%y7+VsWJmD zTQ<#}JijDQYz5_CsIE{tsR*i3nvr!o<7XPp$V9G~;q`a6Oyr9wO{ zd-LY)gL23?3&(k@GVb70qpFJViaj8w8BqDnRP!w1`*%)o=X)>VZMzfz3)#jp z$VOrLq=eBQ8rL}hcQ2thyG1p8Gj16lRTv*lv%7jOIboVwL9~T3_a($?IYLlJs3q?T( zwC5V*dFdQQR8X$qZmgkTiWVR{DZ`k2;%hn9qmV5?Z;38pN6d&z=G4$H;v`#xP;pY} zH1OT|9izq7sGRQ*_r-6T4(6Xd$d#LcjY!wEi@zcrX|GD4jA>t0k3vQ$w!;IvJhOiv z-oqyw=mFxuR;AhlY3rUt)J)Q1at;@jy`EpRyKB=t6~n=zCd(M?1Ogah3N%J3!MNdW zajUCOqq&4EZuzR|{I{zxga&l+nH*lQf9(ay+q-n@trEr}s$!!m z-IBHKqZygg8C9!u)*V3G|TrpgzkoCh5A=Z|e3jzinCG3gYh z0AmZ|FO2kG+;c#TDhODdu0~jNu@!DK#c!m=XT)BcqHunYbycn$iI9JMy?P({g=q?&rPk>N>;*rdLm_4U1VX&BwkXV~p;8mj1B5{zm7XVV1D6&nosh%QEF}10KdqBicQYhRJ^7ru@7aDh`L?*RUw( ztLm-#bm-;_|MIV{ni0^j3G8Ordm(IOFi=yFjIJu*Bh#UHK+oPIE`tyqXdv3|$Tc zswi`i0F6xMNHls8f2yH^CmN3WHnn!+%RGr4C zaRzdIbx^pAX+5HIhe5iQidypho#iZH%mq3+q8l9>V-hgPc$k4Y8p3OCBIn?lkU+=B zHFO2&5%nwnWfcmXOJWhb1aQ`uxav4z2OEv1B@dEtCW6h{&`Y_HESCJm$U$2~>MCGI zzCe+}id9k37H-!NQ==o6u9Ld3HicR@lCvV0W)q@P2-J<}SRrf7MrET39)!TLO=W@} zbO)_GaFy;Cnn&%qXJoV2YZn`QC#yqf%sF0fwFe)7`+aN@w~& z;hO5ap@Fzb27ukO0-|?jk(x{25D<&WQ43O%VbJ5L$Uwj!m~Da+c?U=n`7w=Bj)${? z)u0;_*6IZ&wG-*i`iU@!Njd+Z{Ky1@pajcIDl|9N~1s^R%$xpW1PhjE{~ z&fsRvEC^jrN<$cj<-O?lMVl;p^u!!K)k}k%rZa^=m%4>YEed8yP=Ot+ zXG)Z+Zt?PibpA_bnl7?W_RkqPTGC9I=?n!mikjly(~ltrfL#;-mnz|80`iC(IUA*8 z1WXUtl$`GYm68yKOa4OQ&rd=dDh#KBOQoa|g$`1h5;8r~%jR6GSx=Qug8~9=@nz^L z(P3026M~ouJDXArR!MjyBGn)gS@w51**31DUrn|~qg?6le2^8yhI|BvGkD@66H>9! zHWL$fQQW;cK%4|ZJczghE;Ug_7{1g3IVD@9h{-ZhNs6*m2*H=zN-2#*2_*(26G|Af z0=^8yramSF{f;=R;XB{kOi?bYXn zxIlHKEwOhhssr0m7jcN6l@LUs_S8s);Mz^Y_qfVY^I)WziZMQ{#F(B?jBo0-Gz^I6F@9?*sT%es_>hmji^?jCBf=@mI8 zvb5Sp=3rI&QOXb-nt7j~9IOIzQ~p|$N}M^*;Vv=_P1T&wbLh!SMpK7h~vihZcgIv?RH0Ctl zvHS2^$8+{D0)X8UQgvY&)#2yVeN*!N>^;^N>fX#7-izp}X1;gvA35E0^#aOjzaAw? zwG%8uV-L?-b}A{S$s}HT8!K#;f}H>1b)eL)H@m&Uv7ZdMhd*m}DNZT23W!QKgziBC zlF>7!67kp>{$TJY*Fa7+vAMj1cFMw>)vYMD*H^orGq`-h>tv4Tr=E@9*axy$+5+$Z z%SC>3``5!Xv<`vRM|Pp^_6eQUbVy(8WIIJ!!zG`nU2OJ<_PMTCyqw#cgR9!!WJ}1q z`FpUAC?(RMr|tx8HFn?Bw$~UPJiu>&euusTxK(p`LezK zbn9J*@6WgyV6HR49F4#Nnu?@g@WmMjwnrQo>BrJ%f(CTF+Uo8(9M`tFesgQ>_Red* zy#^bC9XO-&o-<#*9($#_l_}F^zQc*bvwrz-z2dIOkPWh(z3A6rcR#!8@#3xfDZkvP!n`TfUa9LmyLR4azUj8v0#y3%Tr&XpU(ZS_ z0X(k)me*}rl@@_96@+8ym$NN;ZT1PK`kL%|LL|T?(}RT0;s3mv7%=hZ&@+FfCWn(r zeygZqf6MIDCb`65-t=D&o&8$w>t5g*m3`g1@Yq1z*wP#P{j~=Z1qapbv0bt7V$~=7C=1x4Vm(!;aUBwY^Uh}CY^VQI;as2q5 z%E~(Gr9J-7Kk9u`FXq`MH)#)&ckI*|px%l(Z>|iHNKGJQ090WU`amYgE#W{@GPm4>XEi{UQdR^F z(l2}lDUK#J(nuNuGz4pyGc9T?2@!1IfdL&jut9G4Wr zw*XEbjS87XOG`Sa6=Hf<2`Z$XM9EAHjPs%3(7)veTnNig@2!n?%>>Fi06IAl~ z2Nr<}DR$LH{Kh1A`0KFo44kj)w+IbX5e{HQVl%3B9TFWjgFs_)k`WM#2FW(-F^?3r6-r ziW#o}7>h;6J>+yesDOMPPlM4-U1WqCX)E0F$;ltHO%^!g5@F;gzTszsNP@`9 zF~~(J3<>mSRe^${xnT(a3&>#39e)-E7R22!# znte@Qj8t%ubOtEXN~Ip7fJ)_D$m@@zb!gxMP&Q9P=|z}nNTp}90>85Baq8P}f;xTQ zrrQUb!Kx|Sx$aNNZ&^ldg1)ryzc~LR;lIDr+FK4s97uk5nn*gX3m~yWpX6CkR)M|f zewVV!Xn`h6t@VxNCB=3@?*E;|8W(HLWSSZiGAKGR3QI0lON&`1ag8<23FE9{PjN25 z0GezlxT%=wB;(u_4eip@edMZmTeHj};m0mOPG$LRm;6F|?{ki(0iq;E+L#GL3}&$o z#lDe(cgt%{!}9%FxnOo;L_UBSIDQ)=QLhvZ{5q?boPDN|_&FcQ=d`VpDQl$4!91Q>?!V13HIs(Cw; z8te$!>Xf*gG%3>ou(u2dodObb4H{>R6NEKg2^aV_dw+rwIn8)eniH!5EFWXnN3B>1T1u12{f_=>L;$ObUv00bsI||S?6(i5y&_{ z8D$!3fps&4%i8rs3VJ>pTSTg#maP|jhpYue0J29-bdixd!xY*RS*3h_I<-Pa62wM` z;xFYnEy_n!6jC{|#~Z{Obwm=t9EG36rBf_me@p*~fx)#?Dt{oaw5ang3kTt?z-^{0 zmfqG!UhtQhJTw2vroR_kyEyTAtFLr)f8lQyUzK4nI#4mEU)SWzdaL1`MxQWSc3#P| zJ#$D)`sXCsPNUQTfr^z0I-}wu_EV0wKoNHC$PT)khz?FDwyP;VZ3QX7JFSLH-<1># z|H?9$`f2xL5u=XC8Y?4(&3Nmp7`jU_la0>14oDicBODtU^MFgV%Y^i%d)mA~`^uJu zlSA1LUD}ixMu5f2i4>fb$(dzfJ`{QcU62b3`20eWWnslFmcEwNvXm}<#R0vg-2hqcLL<7W;1(mca;KeI=U zXFO|mDi501uq!rH^5mdN2H-)@WA~Om<>d@3`HE$mFy6aGv zNO-5$@iEe21F-RjkYmG^mf4JrQQEY=BM-9{`dd@(vDiK-9GJF-NM=F zS}g+6C>$Md34e6H`XaV<)-1dpZGpcOJ3c{B-@P$XjJ>XTd6P0q=3MoVy+VhPpFyrZ$Ycj{LEF6`|Q5%1L$o`dm{=>`nRw;8C1{%jB+ zI*>C^HphwA_|^)j`Ah%UwM{83sg0>^S|BcZ`qgGhXk^Jueol0BSHTB~HKnRWBBF_` z@(1qlcZkr8zl1_>2lC5rP4+q9LLbjg!(feYb9!s0re-Kq{Ie zdcC7SuCP%`s%piqwHfHmqix$|G}jrfy)gKtwoTcMj#^88s#>zZ?{t!?YN65PK(xw+c; zw&3l}5t!TEb8zpPI=EuW#sB>F$6}ll%Cv>o4@&8Y1>kA5`t&zmVFc_Md}mg)`&ex}ezjf>3OJ9Mw&9N= z%2D2FhCQ^IM*nnUOE@;XeqKoCu~WBz`eR9Wf(zSV0pp1WQPH?PJSJyyrDL!Po17IQ z9V`w#?l2ilbD82qTpyT8yf4gQQypI`^>a~8ccmNLLx^+RZE9(wrL08|LF3{AQIlLQ z-2ozq4&oG~G~}vXr?offwBaY|uti>%K9w;(8lZD&pm>jtS@r>){_6&N0-)n-6ys}S zLcO(c?;N1->n1l=kuwaBwbe)8+LSbqUk;7`z1w_g8C@{#u;zQ1mqBTeOA3=1QvKu$ zm825HN;u+ZWqh96ff`^fTo!!dB-SOQk^tv^f)UvXP7R)d5hm;qkp?M_=EbvdSUxIn zkdBD_&*Q}<#|kj=jy0KCOr>U#9@w1+F#GMa-{x5WWEQ>1Mc&edYpNU~4N@L(7Re73 z3sQwOReEZRBKXp!`WVPDwn{C~cu>dgO$>WWXFynj4#SHOOY#I;smC!KghCB-4nR@A zC;BVfBaDoDElEPlMJs^amD=TUKSnPx&8y<3{cl?@SNB~Up-x18b6^}FjH{Xb{6eja z5sODB&H%Uga)2+p^fT6R6?N$tbYkZ8yW_F~vr6d}qv_GSjCS<`Ot3M+!QJS!?w-dp4S8W{@m7EgEhmq-G6)O(nt1?D#GyGr-dD$Oc4o z8EXiPt}2zrMf?mB85|qtK#WI8^fy;G6^vk9nV%L4J|AN})VEfPpS`C;iU0G3E~bC7 zb<|uZ)dngGS|R?_H#Xc-P6Z^-!b$U@nq(mrFi=f+Br1p$-y#1)S$buFsp?q zz9=8)?>x2VFZ_GB%#Dq{Z(y*|4Uz7YbJ9@C*w6w4UX6pT)wgm1$e6e@SctHNRMLSK zgPDO}+0k+OH+?-)iw9(TVw4FccK&2BuOLl4^yJaK&7!Q}jywGOkE*YcN+b(EO zCfp@zg6!|1DY?TK-rnwrxOMbWxQ#bCcj~)TGQoV-T+j>0-bvE^z0v{+fbGzqMW`!? zk}BqOQ_k|zUH>FZBQ)V`{j4s=tfHtbX?0W$;hkji%S_J(T$v*?!PYv8qng2cP{!4) zPqtDEAGT~Iw(bc#+c*Rj+SiRpjQG;OWjVjVu!*LZWYEwVvZ_DlXA&>vXN17o2Fhxc zlu}))wknT2h6P$1Z8o@;9Vg?y5UUVK2&a9|Utmam%p1lJE5V%@lsGHV; z1dZ5h8!|m@cgfsSaNEQ%R4j{A!9Y`wp5k!Bl<6y|fI4p}?QEJ_GjML+=sj@8zBf-M zl~y-TVCuTwe-N*2di9eZOxqZTtXy@rZrmT5M82K%>ITmd|Btc(^`Ej)sg=4!whe;< z1auG#1VoV*pay}KcB%$}4p`Q<|3CIdgH{_zg4u>wgTAhi;bM*G8iFowK2}&jf>m_1 zSS13MAi5ZAZ9=q}Suj6=I!$|IZ0r3C~<%aOAtl6yK3e zlxj`%A0#>qSOD{VXg1`56&p;d1O#ruABlgyQPg8rpKHLC0D+W9X8Or!Tz4&%X&TvY zgF7~)&_QG>ITe}2^2O|9HB_`RF`CM<_GMPb2<3W%WjRZ>4?3Ah3ptwDbMy(B!i;?? z)IuQw?}m`RI`5xILWLk5|1l;??wmvJ6EzfiGKq%$Oy~6ryLX*oi9bjm2QP8hM`==Z zF!PJ96~S86fEcowQmqSh33Zu@-QjE*o~*+VYW(zm{>%pQB!l1UrMtY{Uf&zdPTV1y z)arSu2~W+FqpG%G55cd0qrb3!Gr(GuT%C-BU^uAc8N?bz{z#yaS>p$caSel6t*$O1 zHJEil#owf_zzE}ryuwNsNzR^<|fsxWf12nN00w6H=^J$>=A|A0M1)IpZ z;1L}e0?YTFvyUSFlt`zt*I;0gi=jF)h--UNWcxd!5#Wb@FO0+fdG2XaA(RBLQPbOC zbY|l2pWjiCOS!O7^bc7?FBFV~OCa{IE-{wO&*ja&JW*f1kNMa6&yVZ@><>H!9_JXe zeOi@aB;e-us}8ecJqwq`E4Csv2k04aZ%V&E@T6cu&RpW1=Ez371VD4SK|O7wM^{8$ zmW6sx!L{?QDLpunqG)OOReA3}+M8yQF3E5JxTVyBT;472zyl)cmkv3n>@a`lnjH~b zp99CxYQ!Q{oW+rurC1bDlR;%Rh)q6tL%CurRK#lkVo0f$BlDDSk|VEcl9atPPShLT zAa1H3kYmCe)7~SgmexXhci4{9F?nxoWIHiAhz9&8>xmU?Dbw zHE6V}<#b$R)Tph+(}QHBoD|@WhOapQYjDBt<4>c{g+n&53c{zU&+T!)wjaoGG_njM z7=85pG_E(Cu~`bS^O+0V_|RTvv~J}#0jg39a#t`?)M)sdV!mD55X zP*_1tLm3ilXN3|p3`5>@ji+MS?O8geRRaIYcljaDbl$bJ!Rch_R%ZzlE8~9itVJlv zrqQ9jpwKlHvsFe>Jbgr;R8C+(z_`i|WFHaX86HExS=utb_EGN5;a(sUM+BVpDAQlc z!4!6K%hSVsf^>T#4sZ$zMj(_OEzwur8U z4GAOFIdvJBslQ0E`XII*2jeQdIu6thEk#?NCSNvmI6lBFZ>+W_{R@?V2D#lcv8Uaa zm1dJv%y#>SEsK*>h59kQs|%Ml&&TN71L!0f{y=r4!1F`5c`68Jvk zP6+aNAjm#EFirqfq;fjCW*+b|SYR@^3K3i6DK=FgcLk)XC7zz0v{Q1h8JZIO__SE< z9lTEa;ILf!JnJlzx2K7zD-?yy#^Z`(vTkGyFt6oIg2sj3~Ci}NV}P_*~XfjdNW0aKO zIg28t2PKc6Lxg1l#EY=#48ai=lX(HV4neG&v|vKwExjQ`?ut`>k~x{F!@SHs_N%iJ zuxuR12kT7mc<_?RBw?N8FNjhN&$XOrmULGMz1O;87t| zv!-AHxVqae#vW-tBB||Dft#+Ta+}U6>w`$6k};H2U(9Mg<~NSeg)SlnluvHOs0XR5 zqo9*8Uyo*AaR;(#Q-8HAHXevdG@c(3g``fglv-`$0YOegb{OX%%dAozDxpdtr&qE~ zr#~mQW_{!$FGkLQQ9q+f*y{J?p&fBN9N@wLAma@m1_l3LraY}R_g@S^>%FTZQmbAM z*ra%uO0Y8p?zv{WR(jTe3#4Fe(9m8atDIoE2HYhbQQT6#YQ3BqIz`lV*~SMVF`HJGWgXfGPlJXr~G_;IL~!O9~<8~ zY5$6k23apXvJfTD7^mUAa0md}RmIC);Lo2G{qs?5vml!5ylk6L| zaG}SlUD;Cu)@E~_s3^~DA7zB=j*Ito#|zp?7MT~?smhTrM?kezItq|80x%EFGk>m~ zcw*}#*H%tu?~vWe)f+K0+{m)Of9)M#>pL9%@V`UYfYltacBq1PAFtAw!rZdXJ@7{2 z&jIkgTQS5*nd_^but!b|y~g1=F_N}K=a74RvTY>wMTe0^R94Xc0dL4VthV;})8oz6 zOK}IAh~{-uN%044E*F4MBJMBta9MslSL$9)J@w_J;4I!^92_t^n=8umaplSBdUi>x zRktv5zP~zkR*Co^xs^O@ap9)}UPJlFBO1aL+_3QNLB*@vb%n7!E_vZT?Nv;sD8&!6 zMzDH+p5{0uO_5w6g1c|8@N=x}{k(@3D(FGkK?xy#G&%Y*wh#El5LTKM#b#;28F=VT zy?lB7JK-O;CLNzZNLXF9So|6jwl+uFYl(T>TXyqzS#Qp7xHJAq-=bsqv`waJ5-4aSon$M{|m&gwf=t>PS}pfUSD97 zZDUpH+E}rlP>AXgrLN_4bKuV_@qV8vrD-!@L;;tvf+JF(StZA(ZmwsIa}4r~l@hC~ zmry~((fis(@fj=L(#v>SQO zBe9K#Hq)N9tJ;c!sYrtvF@oUeaZ`FwnF-#6^M16GGCe8VioPM~U)ZzN)8H}4>g_JT zjMyd7x7*V7IkI7}zAa&OUb>b>?(rE1HPF+$27W7Sx@oGhA!!p%pYnZW)iQ^FRN--^ zw7|{QoVtp_1xo#@q$EHue|!I{S@zGC3kuhSCT*2L;2r$!I?U}lP+Q>2hN02<(Can! z?4DIIf5v1f@BB-WTVey=L|)L;9mzW&=yn9z(`bd5IDjM>bO5NdWPb%kxj=&psT?g} zoEhY{UJIx}$S4wQOj}#`G+-aM81BG#b^#BDtV&nvjV3JWjSX)fz7i$?s&{M~I7n0n z<-Hw@{%pbmLv1}7t2FJ863;~SQhIaz(-K=l7}kXRphuNITjaMBj#oiljP4Ew$iZHHtX-X7sP)u+yQ>AH1;KDw10nkkuL;)js;-NYM1T z2APK=W^4us2{v#RQGUZ!9c1(`>?0`ki^^2>q) z41^jHhZEort&oO;4T3b>3Aco9KP$V+9xHhP9`%zp`O)7$vyCd-&GWy2Nt+$N4-PTe z32+WJK2L}hl~H@WsXU$NKRO(bEW=!br#(nxHx>9gllrlilFL13Om2>_>c5O9uLFxc z8kxQ32ron@4sug#1{&Wy~VA$-fu z4CBSJUZRd*+d5t5&B)GxluzV#A?#`yH9;T93Z+N;o^7gLJd-Hr`eb0Kqx*O+Jty~Q zmt{W9I>J;SRXQdzq1VXr6r&Pk!HH4+@`1VZ02bIT@=e6DK*Ek%LH5bgt>wd11m>6U zO)X(LiLp{ z%=6L|yF%Bc6wIT$86Q?uxP^hN{XkFJaml_PqUHXw8O* zD&;ln+&hqrZHqegeXzfok-!4OL@RMhbB;}z71^x->x{Pvs5aqI$qJyi>g-wg AX zjPAQurdB`#6<$-=hm>ncwP@ws+dftt`(}d!AEm{lNRPJAfGCVy#=L> z!o#a;YP^%^_uIl6i#T4J z=6PE-X2Mdq-k|Q=?Oe76=40uC&2yNQfFQF(t@@Mzm0;uNE(ov=&=`q18Tk$*=p9ef zSju0+g=ASX00UWrE{YzPF|eFgLZMW%wm3B~PTh%=40FfX}RN>oO0Zv8#BIr4BLyOE?fZn#BYe@7>+Sl<4sxHBAUl z)=Q!e>-dN@xB_mio1`-ouvpdGBTc3{+V(fWuw2qgz7nc#_g~Kdzh66?oV_VEXjfVf z+ekHsehFP|d!fHypMt$F07#`;P!Fokb|K#&nEWfBj`hD>B{+|&5c4M>CLlyPuf>Z0 zOg@nu2t%iG;1dOo`84S|;|?53xQY;~$0K0}NhOpEQ~@?XP{aeAIjYL|=na^DbA$ti z?Oy+&;y%W1VHpoaa5GECa!(1gl|;K}$)mfY6ZFE47Oc2%3=lf~7(Y5&=KG`lxhb06 zTh=u6&&3JjpoPPVu)zc3ZDV-Y1@K;gTzftgiDy=-QaUn|as{7#Zbzwq{Ov9lsq6JP z1|IuEzW^lla*AOmjSv`czsV?{FUcnU`q1G2J_h3b(64qSwbQ$qX6}kYbp`NNvf%72 z*M$VnH~r^ZYWlY(8`qtvJJ;>nwK{n6%^iKTX;a7Ff2FJ}PLiIJj?RVmI@?nd%R`KVx?@>578B$N-?%x(PRydZ;og{&nS>e@oQJG-7a| z0+bqr>f8Ha2Vlfi|1sNE5U?EtZEr%HCFw#LnWFH4d(V_GF7?7>2L zl>n@A&lTOYHSNHK%CSd6Hn`F%^ond4@+It6kV%2l!I$(J#I^YZ-Xn#Nc3h!~lZN*p z!@dK8w5<*9?6lz`!}8ET(y*zVAb^j}iS1hc9nbhP;oql04-#nWVYQwkfwolT2&$#+ z7W74Bh(Pzig8No0r1Yk?cS|qvgW@oi!T^X)DH;xhMbs|T(R%`dHz_@!#f%@srjwIT zQ28QOe325xyG-??OM?EnM0%xZq>h#bY;Fu;9O$abg_8lL@JA}lW-rYds})PuFO@O| z#d&2tjc+>IlvgIS+J1d>&%H4CBn?=h#C^0B_t>9OZI(t8UJ3vZzqb#a*lyp#4**tn zU9VF(QduRJ_dlezb&E&s%TH|10rDxe2iw|NI@ZsK5qDKb&A-jZtGKbJ;}11aIFD5S zP{SvOuxuy;y#w@#bHESEOjq{fZ|u(QDUJ-50lQ|;(!h5o5BK=7w_4dy1f-70#B#8sf<<3$rH^r*hIhY0${*Y=!AoytZI_ zM^8Vb4~!Rq$Z*cisBR)$I9yJklX9R=6H_zKeV(xRpaIuOKYmDi^%i8F^aiH^e?Nhk z=QwwyIiO)3p?|g)ZQkFM8o;(Dk#;rV@Ymni#|+`8t95**HxNUAYK5C~)4(>9+2cHS z6cNjOZ1!St_6rv&GzSL44}l8MICO5aS%=eTfrZ}YgVT})(Xp2t%+;<70UNsxafnf2 z&)LWLpI>GTdPjC}>ev|G&511@*A0W#`>>PQUaF@mA%pw-)IEid{m12I#*RH8A;Y7~-2((o}3yuH~yXNf|O zh-R4)`eBig>??rCv4lmdySUIZ#$B3$3l2TfiNDJ$^R0iC%N8-3H1u|C{M8 zzoifuZ4N82@nmzahchIJ#rllS1WDh)LQhloOZWSri{)+X6&Rt0Y1h}X6jz{dL5WT0 zJI>*pyaX;~KSP>x zL}Ue-!(a}LOCd+E zZ!2O=4T|fKi0muZ4)LK<|0h9&4&Cd_q=xuJQENdn21dn9X8y&NXcaCAg{Qbib)3W~ zzL5zHXoQ&B9sDXtlB*J`(aWcoe0>N(ouOnhw+fp!Bd5c>)z$VVW3r=eorum4+7Ym8 z=w$V8j%Ppdk^X}9VatukH7MMkkn?0-<9nAkio@iNjRfPPY=Y`YfeAlonSTB;vA!4K zK;L#TeBFhS`jjYh^}_5FOprt4m%S==)3wbCwybTMb^#9F?X8=mJmQRTdt=*hwr0#F ziMQorvyi_}yOrW+n7#=5KbIMc{s~ZrMFw-^L&;3B=5h2VXiF;0JK55-0bfxfdco|g z9-o{-b`O8lR*rXDDy{k6YlayDLyHG7GFUnitD1=yfd|5Q+6hBp4c+$N*S9A{#ym{GiQq{Dn zTzOy?O(|8TvMj}IKC!jVSq|_zhTl&g7FuEXRZ3vu;J4>?!RyB86LI0!+$RRz!B*|T zb5><$vk)LyhQEf1%_>K1BYN~_Q(#72uZb$F8Mwyenn~ErSr^C~}73F$l zn)gOQIVxCknwF~H9@J`jz0sxgB;Iu|Ij3+Z@!8atu!cOC!Rj2fiUknyIpI05pdV0o z!UUS#YK+Y>V#$OPVE|*tN0|I?<09B_;4S-dm2Vy7W7sS0%&wGHYI+gw>{`20!# zO^HiySu*kp;?2c3?AC6FD2=c4^RFK(uc~6{k;J~SGkY~6)cc10zr(+SIpy3NA`lRc zAyCq~5FY&h2HZzhfl|>nBD0EgYUil`m5Jc% z_l4`FJa4WVsUQ~*&!c7g=z#bbHF^8Ma%GVei_>B1>3M$F2t#=s2Neu~G-lZjtb{j40EMapiby2DD7288 zZ(6OA`Om~r2AWypnOjf(S(-B~E>va%s^u`4>ax_yUn!U&uxn2a-V|gQcM!(*b?jGM zRMxrw!hOaAX$^j%miM6*tK$E%J?tLpNEoHUbjt%W5N1rypOerd6WNd~kl zssBa$LiL0-#ay>pz`uuzHvllw<@-sYlO^Q9Bc|001WRrbRS8azI`_UaumEsn$txKV zpMk>Q{mXhvkUxqL$8l6>50n^2mznEyRCY1}_b?TWs|_;JYGJ6ufo zz?5qJauqJ7!gweLoBaR7)jLK97If{lv2ELScWm3XZDYr_ZQC|Gww-i3?l|cro#gcU ze&4v`p7XCp{ja^N)|$^;mlm!Oqbo|DrDjPzXBrFK7f|o-y9W!1xwk{WxQIRV-!=s+ z9gCCl3m%0(l6fehynN+}2~T(<8X<^nSb_D9f>=v@Z|~bl-eM$*eSI!Q_`Y9T3#Il( zn}*OamGX)BAN!of=iSfvh75(tx=;@54i6Yq}6U>)?^>pPQQdCkW5 zt9lPmQJaZaRp-+?PPC8Xu)?Ww zAE7?FEt~|sxga0`azw#zEwCeqW~?%H*p;}`9MZ2iPk3Y01Q6!&d8}IN|NeEDMhLi# zE{0esBBtolumUO)gPya25Hne1$mx|P#|=dfmcu#stn=D4Ro9<)8DFVQUNozz)t!dz zg&)_=Hzci}YKy{CeE0E;JMox##Wf4p_sS~u<3_15qy&}R8Hpm0){7snkGdyoK!E@8v84Euk*l5O&>PcRsAMML0w!pzzU=~ietz}I)67<+GQiJ8TnQdtX|lEw9O&8D+(js~#-N7(y6J zy^tLxT#A8xznTmkM6%jto{~mNn14F`rjF*W=(-;mt?C?X{9dFVV13x!xwt^pF_;@d2q{q{;U|LH*m|v4-}aSv2#=aO9;J-GNdMV zfm|tNK_Dj$wXISJ_plL9L0&!04V7|wi6c{!bNH7)?nia06a$L;@2@C(g_AobD&Dx` zC1iVmA3W@~@@fE<*?m}hp~HClXC1hW0++umzr%r_?W<#mzNT*Q$jHH2rK{Tlzm?g* zG?5F|4in)L?5txLe4TsE?t-(a9He6>#l}olEU$56oH1gosedBgdwaNDX4^os*tK3x z<@J|oF_*rx-<_C!KdTmTDdJ={dVPO_V|F=!`19}E@Gc2W26G0lASQjoO`Nfy)pz{$ zm7q&EuJHSq5Oe(GIQerULy~Ev!39wqwqwrodOZKoFSd*;4 zo^Gv_&(AbUy+KkZCf&c-;>Hy6yP1eVu%k7e$e7!Il&nOnflD0WV5m|b;Z=s0p`5Pb z;v>6saGye$s63NyDMVbb8zHRL13{~YC9cx%-4u@CHpQXV6!LZwEqvxu=#+R%rrIzD zq^nWO-t!R!*jz#IQ4l;t+FK=ZZJ!bU zw}!v-zeG^Kp|es;J*3@)$Hn(^G|>0PBFJz=oM8Jw7Km22rbww$2|%T3+5XF4QsoYw zjxPNYVhC*e-$m#m++bFD1Z5&S!>^jvFyES55Y{C*5YG@!%Pm!!;Rl^SU> zCPo)?DRYZo6=!JvJ5$tpUZ zwcqwQrjbbUXg6tfhK@(iBIB14FEIZLiWyKsngVZBsIB}9rk+iR>7*_|5#BmW-}Xeq zI(gSb9qgCiq&h^nOwL@E@I9J&#{B)lOR-I@Cu3G*)~*6KL4HAAf`Y`d=W}WigjraA z=UglP+SR8W0^mX0l>pE5Yw^Ou*rXHV13}wnz*EaOk+!!h*1*4=FU7O+jT^fI`6*nm zS7pb4pP9@kx+P$I86!+xn-uaQ(-(*BWrHtqs4VKJMn~QLLlb*qGgDytPYLM)jsXb{ z>>NpAT5vE@2J%4(NQyHe>@o~|XfzSF)+jquW_N^`G*AZ`rrcagFU}l@gBnE=6u*a} z=rY}wBt71G2s?x69ZW$Yp0}_p%X%GZCY_FwpbY+Jmfltcf?ZDQ1X1QnxNLz>ykVWi zAO1ePpJ8fhA?om@0C09ZQz%p))yUC-zQeCh;gV3IY6GXbnhAE3&V3Wb=ry z-OQb30V3*ZMj;_#2x|#A0{Ip@jntUWxW72^6ZIT^%jyn2_+Ca}bjCs0xD_pc{~7#! zb*R!?#II_337&fu#_YSsTa7k|$j~{t*7)u~J9Z$c!=`0X{=;yw*wlss0=t=8~5I%xU%P$@wghGqiHC<>L$0z$6uSIF| zsT4wP_Auw~O@hl@a|8_Rz8ngNQ-4mXOpiH^l`dc`f1WxHykt$|VECLS&1?}9j&IqL zlDPwJSlJMvJREY{Js({OySrf`%O_Y`fB;TNnRU>_4@1mn!_$D z>#pQkYZ(0RlnF4ia{jc4a6Yx4LMl*g~PhJ|7rp)JwP|(X{pr=u=mlNMuWIRxdy10za^) zvsrhX^_y~mt?I9j?pkeI&r+10n+LbtqxCk#oP3?_TinUc=6nr5hpzA(J_Z}w?GkvK zU)Up^-X64~;~80bRe9ucl)ZCI*ID3$$T#Y2iC|eC4Z;aInb2)u19nt*+qZLdL(FvhL6?zZ26E#m z-D(VmRs&Is z9kp>fo)@tPD!Pth*lsfIglee$OJH)>2hCi`>01Z(b|XV}DN(_n)QnndtVyOK$(K|4 zS&BYS&1Av84A@=Eeo%ln=+9nFC=<*6G+&Y7b6{CQpsWb)FZ^nB6_$}XE}gcgEE+6n zu7MegWp5vE|8&Yn2!8AA1>!*TCC69=rNa=9VTBL8qf_}5rFLvYqf0E)8Z=mW)3u)% zQS}AHT)+OcM>QfPm7U!9utCO)(_c+$Cl`A+oACw1T0>7spx5O%rQhjlP7{-Yg=delBPx84jR| zBa_*(wl%r3Yf%Lhsk%CTg8L30mURRw`kpW*g236%!YASXEf%ipY}2j^&H%sjndPV< zaS+Fak1&0|d-(*DK8uLZ$m;QbcANM}VOlT)o8){Pf4q8q-1q)%@9LwP!2D&`+FU}8 z&46bc9DSxezc zZI2w|#DkbSJ<+MLdHpf9Ht-rI+5?7iC_;Xa9cUJ*lTfL`Wz)r!rJY_JwLwZ+g+cO% z$ko6I!ii{#PH3fM{(>QKHFDOH%SxkO)wB7DbDh*w;a_VM9sLf)j{<)=~r+#Fj59IqS0l=R=4uJmO6@YKrA0d}1?@G*oq2rMR3NT->;jq$5J;sFtlL#)mPO&c0-Rl42 z$*sED%9PnFrtea>0lf0s^QION0WP-A3 zk*G>izbbx+++YQlM5Oy$bVw>Vd&JOEvl1TD{^O%5DJ1$2qFoYQ+Bz}%sjgbDpG#UJ z?{HP7Au2KSX(kj^V=3aszpGCJk>#XiHg%3P=Vr0s>2#9x_KT5>nXNlD2?PGUpNC6x z*6XpVhRtF{YH!;V9oXx6$bz4bd!OjFY%bBFdy7e2i7o95X9f_mKP4$3q3bXjaiH@C zlObOkV_A=3{4u)#9e!^@JL zi9JMJ5F@Y7cewa1^Lg%P97W8i;^q!m2~g$HY?&m=bd8g%tJ;0yXPvyD)$zTTwkW-; z4+9Af=HdpX9;D|_I&v^<-%|*a#@rH5n?a>$5U)JH$xt6(Hg(5|O7oX1jk`kFe{su; zZ%rq}^1aU%@;}eMhcxg3zvP0b+0x9ad2U@?W3*}a7iL|J48;1?#roT#{dy0D{Kp_V z++2p5$nbv9Wh4}kB8`EkrPrf+rLEb0$Ta`$!Za15AlLX29OzO>##X}9rj6=B@wKHP zoq=mt=}6B%WLi|Cy7rEt5|386Y#9W-7?I|+B9tNe9)c2;mvMCr1Sc(z^iWw641(29 z9AAdu=b+uR($p)}p%wp4ESCzykY~%+Rf?fJ6EIC;l1NB6d|}GY5y=2k!i>*pTf&Fn z>dr4L`>_yuC-n7)NeDGZnUPnP_6M^OI7_h-2?!ZB+#)2#MhF=9tnweDyFf53M(>lV zw!Kr7uwNT{6_s;fz!m6=7KFCgWh$b69J31td(hXlk>yM+Lb%HmE@Leh1q95~mVVoRxBqMnc!bs(GQ7p_}Q)uaI zXY1>;Y|q?!(ns zK4#*Ef{|DU{F+nB>&)r1=}`0P@ZmB{i1LvzlrNJToNO zS2Of0j7r2y+}Wk;4^V+57Ktk^%kdM}#9zMz?e^+pw_8i=Kv2qld9-!haBz>RCo0mfJ@_kN{8n4IRiE{QGVAqqf_&Kk)KC zsi^#`@Mh!(sqJ>JqfOzfgedHQ?0^}>^C>S~8Ov>hw|>2L#WMf5d;QukQ!vgukv!ch z#xW=;fI6@g9gVu|_GJ^bCQ$%Lf?A2E?Luz$vJY?digID8k?uI`4<#8Y*MYFc? z7{E>ZR-f}LO(ye`tCr$|D;lcMXW)&wSm$T}_#e7V`~RcMBuEn1>R+vv?P@4PN74C) z@I{iOcUEjnpeM9-LB9xla;Svjzxi5+F1#hmxF=k@`v30IM7qg_Wz=9n8eu^?J@?~^ zBK6UpoUVCjA41SSr~W~|)Bcb3BZUjLMiHMT(HXBg7bGZlB~KT@$b#|!P(Y8xYy#P^ zA*M-vT2AfWf~15W+AV!m$+5^5StYcy4sFStr1d5t%Y&eT!DRVpA-_p>b4Ydvoz+IX zu@?%?#mtiW?4fh;Ug%G_NO#>){MKEhDs{u7ROX2Z-r!t1lw$Ll_dc#HY9OqOu068P zsf&w2swwP}7sk?qSX3kiHnYZc+9DhYGcuk)uV64CD-+H=A4+5oRxfGA%h z8I}jht$^=i7`I}XGUm#}#6*<{s={T4YKf&3V~))?ZWC%7$z7KR#3^VM`o}Pe=LHIx zL%92kW%2SIoA0EcTZq;?U(Z=B4^)YR7ixs8GCjiA6d?)mfXOrNgrV*oz8J^OJ(B9EE(O6O}0US>WNaFRCnGT^;mFNQgGdwWapvKx+8&1V!GK}X= zg0X6S)wk0Xp_fu0TuS^!O^5{2qAp(+QY33;KS_Bc)jYc{#b2b=g%85KfVhmL)8kP= z7_ZpFP%XkLB2Nv6d1Q<&dko%Rr9rN+4zbkC&cjNUfhxU&2&w*co2fWIKnc=p-!p0} z18aPB)!8ie%DkQOchI%($4}~veh}+PJ(?%19d&lX5?iKN8v2G#NtSMyii>0RLtD?I z=*8yAL{;)!LtU zS)N)QNl$KQw#PvfP6TUa#2WKQMLSK%^g0q7=}ubPFe%4P7d#XPQn~jMys2TfD|N(D zGn>6(#WgcMF@gM2uO5^osK78a)^M`*>wk);b86|Um)t>uLj$PWLKBs9d__|RDwiuWdt1{odNx$n^DYn`4wigd9$Cl@K$sTL_2R7aqkya{~^Ip$GNu$)0kp(MnA1 zZ|+Y*GnK#8&rnv+u-wlUbnerm`y!zCY z08DUqKc?nqRE(kI$0%S#;6wR4jNf2gWj3oUQGFhzmjxD|tYs`z!$iuX=>g(i!ZR(O zJ9DWMWv3=BW7(pW#778TjUMDY0k4???x`3!b>ISP5^4gB0__w%se}s=a<;PwqItOK zCC!Q4Sm3iYMV4|AhO!!+X(N_sF~`&Zg)+#L(|DQEh7cq9Z$!XJyi*0Efvc075AIQj zQw`Rx0PHGX#KB4u`onV6_e!EFQB;b54Egen&T-1`1142JUd6+k*gONd_G+r+%G0mx zo;o|tWRHY!RnN`Ol{Fp$5RC`9J|(5EHiX7l)Ik>wWQ{7fAk)EJS7Bipyycp9$58zf z6`SP(2NH}sD-Fn*xk<{DLbfSY22Bz?qAEjU1=AYBq#`Rmc{qW&Eq!WW8NMs#N0MM18^Un;>D1WD=A zHv(p~7!%KU@mPjUqs~wRloXf_q zsv{+lDoYikvJsNh*vA!7 z5pt6V2)~WbEZ_`R)q5~5XbEwHn}XfTeHXtar#^3#o7#%SNs`7%0{slm3@j2*#AFqv zgLF5UpyUsAm)Mkg%Hzx+sajFSs{#;{mbec}7&IkxMj)v*Y!pw6NsHKrCFO~}iO^qs zgNrI|*7=uM9Q(u&`N_E0|(pGV@!zo-@*Z~~{i-MaQG@voV_zqmSa6Hiz zk;Oep`7k-dv(Y}dML}$O<{XOZot=|KS$+BXU=qA3Dvp2fbB#o#HTZtv_W%7(Ia&JP zF+31j&942qELidDpoZMjd=Wr4KuHYXYppRL+J1h2I3hPMIZsaQuPJZDf;Y@Nv5iGJ zpbcxf>~m_!aw?su&)8F1!~uH5vVSXZj94pi7%bK@wh8p{8eh3kbfsK1;}4xRJvcU- z5&3DpU+~i4VMvRkx0hkdZ}8hL8a@6ZJ2wXzm%OghZ%X2>b7AG0?kjUKMc>YmB2+;2 zZUQj|kMRs&b~^GBp|B{c{4TF-jf)MkW^-wUOjSt#*?)=lTWmk&+8bb%G!2ZTLwUkV zqP>j#i`y5~r@|`{n|BcWxAYv|{A&C#a!(fH*gK&`f1jV36=~&;gca#tp<;6%#&^I@ zBfozuKHvUx!e85)Ysa_!NmP?*bE+Yx3C~BvEkEG#r2LuwSqmRNV6Kb`-BsJpm! z+GnJ?+}c-T%}#f%rUoE63uBzD<((gIblP>Z<7K+pY$x@;oD$6NENvw0X+QTLFdFFZ zt?u2*S%5A=?#UafcR#ODpLYn}ZP-h9S6>sD2E1SS+)DO&$D(~O^(8;+4c3#ogzwA! z9-rny3Vhd)`^zL*|3JRu^cG@r(r>l%eY%-xQ55B0P@Xv{+O~Z=j0TcAKD##BF>*8d5a90bjVpTbFZEkvB=<4# z=d(HKpRwA+H4}sXr)+nnwH?3Ag5tm5&=^}ORJ>ijUfP=ObqM;GsC-6;mFUNfJBrr<8qs29Mt4I)vS?`j+$$ zUZH82DyCDWrODXVGpl*3B)Jq}X^y|oETSn>ad47&sii}aO)({NdhnK!Z6D&BFk^(D z8t|WDolG_FCR$Yl{5=OD&hzPs@ekhNOWO`mspf%rZs=Zp8+#V%&S;I7`azm-b@63IimAe3aXGAI_844@>xOb{e)`~%|m~@t4CR3SPwbPnNG7c zQu#nPb6BeWn2t{>sm9jkcq_P{6|*j^(R#vWkt5kF^w(6GE`;YSW#?N&sqA3-3?>^J!W|J92 zzU-oa_L;W@A)7*sSQIchgMDGMJ)GNj)9%U|DuNy)oT)E-U-|~b)qL1F6-}jl_(~9# z>TfG$sW=JtX(Y1`NAc9U4}blj$pT)%V1TK@pE7XhbDVvL{l}jOk*l;_^s`Uz}M?7Z3c86@0rS0P*5!U7WO<2O_;c-*gx1~dyBEQc67q96L&!@_2&|Ndc}JtG>Gqv#yFalu1n%pe02rt zhZ=u#%-s%*6IdQPo)?G^^+(uUQd}JiuBpmaCE}zNhbvh`vyZ`DrQGA`NjoN~gPATR z`(D|%ytloT0?2kW&bDk*1{jNI?xE}oOhAfrmQcobjH1i z->T!Ju;tU83o^5ZW^h;<6stoiaI6n3_utB2GcZ0v+11{y0nD1o9Qh*PT;-)-b@#oa z()X+X_q^Imz=XS_9)lB^ijjCJ^Zz~tr~jP{jNJ#!bX4Xj54@Koz#_A0G#t|eR5p|@ zrE}S8XQP==p8jCJX+)H;QU6MsT!=kphOFzE|CFe}H$@}EDW1wI%ThdE0&+AVE5O;g z9K!lGtS>kjwT$V|1JRi^|DUpsZPucm`unKm+z&jHpWV*jtWHd34lD9Ah%W{SS$|w45U&J)}EqWJ=_~ zl+**4A1%3of4q0NWQ2Q3<;N=3@-gR4ZE)pybAQ< zcbIqJ<-r*_tAUM4(9a2C=xW*Wq#hc)1e_s%iYBPyL;GxU0G0Kfb8yKQ1a0c6l)mM2 zRv^NYEdVpdB3q(vooyt7T-xn{<8|hStiW4KAm*#%q29KJ#4-2KB?= zA|2lBat>bnRr=eZgSq5`IX)+JL1A+N%5flsjoOLNBVQgLyQSy~`G$TozHQ-G_R;E44g08BN>OK1UtJbCEgl&4!r2CBSWd+lX5ROgbqWua4 zo4nu^7~eb-#_Ctp4^Pay8L7U>{O_JhCN_@w7*goCVndE45X$2U_RXi)1-w~6lX>^2Q%g*3ygqCN?DfEhOQ z8O6B$y0sZ{#qKk5zIgsw_p_;$48Y>&f?F784~&>V8gP3LSWmU@?YyeJR$JHn9|k~c zD1L|QU%!YRim6*DNP$d-jJ8~x818XP)lt0B$urL^G3q*&-#V!H%fQE@gMYNy7Krm` zydDc-m5WX&C3;%HIO(_VS!aH)x0zZ$c6IG(c>C-=i@xuvdKthxvAFP8O&uxP;{4{h zs&wMCftjwYK-(*E;@18DSpj$rc4=M1XeKK6-2CGu*cZH~av-6~!J zv8Oofc;-{4oN~as)zf`@U(HhmFL3toh-3J-PvWUaz$54RyW2$3hE-p7fFWX=MgwC% z#2?f3z5e+yMld`7Eub>oPZ`!C=Ca=5?QML=uM%0lZ{Sr+Q$C)ijb(fCY~Mz~Fwm(n zd@lCM)%{2Ef~r)y7?@(YJ5#Cu_UWc%1qlMK*k!Y046~;R35Ewkdb_g15}-A(B}dLY zRiilJ@#nLM>;+g`H1qaRM_xoW|T|;M9d-I6AIH2wb9>g64Zv27p6>M|y>fa%=rb0{DWx z+{WntNDCWtD%LjqfB-kjiZFX@FipbETq(RFNJXA~W1p(6Z zYw&1I-*1B0+d~$Y3fG(epvDw2T;HN_=s1x@#uhUkkgFH$leP3PMTL^_@q{>Mt=%BQ z%?EDKMs1uEm{h={^UkC2mU`vbHEEY5y0?e^Z4oB}{XAwHI^_rkT0~;JD9R6%CO*%~F_KbWo7#4X$`_352d@OJfW)zm zgS2{GpVymbGHU*i5@G!Q_(ui@`9^8Wm{oc}Cr%x+PdCUn_HFVblOg?-Tw5W~-zmGc z-}N@Vg*2uRSPb~79+j_pyTqKN`;3;=szBw5Q4$4);LI=xNv{su%|pr1 zT*ccsd{{!NDIwT!HTn*#p_pf~f^KCvXg8G7G!yBhh{fPjcgl6uw(4APV@S=u#w#Vw?Oc z7N!|*qzo4R7Ro%*!=XDYP7GG(nRnw@xcDBv2N$`pb?Q_ufL|;O@q|!>D!%FM0zTM6RT;HEyZJVNLcs<@(OBRl z5(o#j#hO$@(}t^sW<|LL6Njj57qd5p0m(>5j% zfRUD6${B9XG$UB5;dKxFz~Tv&aQIhSE}`juYh$ON&$_;OyUjHs|2{(%)t}z{Nanye zy6AxqS8GHfu-8oL>kJeC668FmNn4%g+x2PBv|%A$J*>-Hb>?t3v%fmVjh0XWmq1gR zWK)IqOr~zmB7)+DFyeMSD$Z6%@NN>5;*b%r8}D3Q3?1Y8zE8m!-(F>R2bma>MB;+5 zJUq!dvzJOR^YYh(BtnvLCiB6nxJtWU)D=fG>xSV4E&NTvju{PbpQ-pe->X@;6U#=Y z<=-ezNe7=&NsuaNT!?WPS&8wGM`uCch*J)ydZc!E2ugsRY&7Gq;x~R5ZhR5QGD(>( zXS(neDyrNCb0+0_!Iju|`5UN^=&Ls&;y9@^A-ZB*-%p6`lh(I5taCVUB>cMYNe#{_ zwIZClVrpQJd2a_?DP@*gncvCIT{MgI4a-I%p3(XSLf%D}@`3yUiN!fi8h7(uPwzHK zc|-LP-}_0f$-Q=(s^*BHcIIt*H>VD)e*ZXu5x6No;0#4;TA?GY4cA8Bx@Ux?=tlw; z1BtI@&yTbc<(t= zjB`Y*zlQA*8~fLVr5nLFUt$2Y#8nqOx-SmThW z2Q>Fgoz9)!*r2*U@j!diFmejtdZtj#?Mh{jzeFShK-B}}fQ@RN?H(;+lSPAZTQ@#5*2Zt7b z^qVt>>B3Jm{<~%od5k+|2#War`WH$WKL;N+J0Gv&Rqi6zFgY^gu$2E_>FbKN?tkXJ z1pJ%#A}pu3#$>(3MW|Z}8$~_JEApR=I}nq;F_fVX@z*mKqY23`cWYY7TT5g0?(~MB zz*juR+Jo#79g`F5hy#V}N4)c{!BGnN-MS5aA(||owrly8w0b^N4TnMKv^d&Wig=^to7r1FJW>{%NlAAYejwSrw!eq$nB&F|6-(;Gg{97oi^xbE zb7W^rfJeuspt%eI6fIJgsTA6bybml`a6FyFe(G}l*X<4huSUGTR2cU>f0)`ujfm<~ z7MVyoZlPIQRMn^uw|=OjP`A0H(?C74w^{?M2bR=PV+v^h*-y;KoAJLg>z}u|ZAl5U z{a1g7qWiI7=eDb}PyEw=tH-Dpm$u`2gU=hplK6 zRnEo}(vJ#@@vaib>lA3k;yFRmhhZ-G3#v5Jgm%B}uZo{?dY|bUh4Oa%+3qPO)zA!8 zRe4X2_LHhR@Lq{G4UVWc%iAqDDxX*Bv1K#YZb8GZS>*Z_#IJU*U`O~i=@*;qnzP6)Z?-CsJHYZrO%7XHHh|PG# z9cQMg#|55coy*uD#C|vJKjVJipcgkS6|8Z#d#^5B1)b!*kOk_y8-L!Z3Q zMCFK^L>|j93-^gK&hu^aPj_7%w368RG2E57zq@5=NO*d_FDnuW>?=pAEDvIe4L<5s zf=>CP^oF0!8r;76F3IY60^^k^vvFFHKL@!_F5v)bw1y~1drIsaC>S)2xYpH17?my zH&QX;FGTVsFF*BB*MdGbrk=pFG30gHmbyl)p7f-A4oO23*u@N#g>@9wrbWuS%5SS~ zrN=*p-{wBROvX`gU$)|8g7s8Kg6&m9^!u}Z4vkxLWDbmc&(eBtX7xZ%sNiZn3Xfz$ zOi}cS^i%2ZYyRrE0_tflqjF1c_bj+e?`?=(Quk1j#3h^F5iL@@KFCUktyfULd-A8J zBpN^^8N|!^R5Q*6Z&;AGluh9`wy)6gX8nc9lS5sZOf@d6mK^QlN?o z{#jx?biZh1N!{XQV=iH3?yyX-GKEpn!S=ey2UIWRkCKil03{NX0=T;rkq#zB_is6fZ#LPRC2CSU-aUyjtgM7EzuSW!xPXPZ19fWcKUFKzt7S3-W-i8EkEZ;_D0Cdl zf93)Z!m(=dP$lmfaAlPr2VUr`c@QwqrHy0ln6VggUUE)v8~rw~t1XbidyTSecO z`@WiZsec|v=qm>ZK7Te8>&5nIir$ciV_jA^q~APW8+Yu~GQ*+3L4awb7tshx^z!M( zzIDlDwTNqfyPQ5V|;VdpVa+al>U(X}8<|W|loSpUbQ}Tv4H37?9O^CN67%}TP#T+0kHC=|| zH)G~K^-oRPm#5bgw?{Dv7u5n+Sl31Sgl{HAA2!|gtLt0*d7TYtetovgUq8}xDVl(D zb3py+wHPj!vEF^x?Fmjp^Qph8qvoyq9}A*x)}j!jmeWEodr-IbM1TNAUE*DOwA-$q zKhy^=(u)YyJb$=eLJZOiT{1Ed8!SZd#d+5vh;09LeCo05pA{~9GPc=MWs9c0;f5u7 z|Hog|U6BNqVakzAEyLjHVfK}ID;n?+oH*I!${w~q8Mm!ad&0Ul1JqIn#+^pzo1V8H z7gDIW(Twl}P@?$Iamj>9W`ZKD6T&w_jY!W_2~^85KsT-1&)nEqB9sUjn%_gosMjV-vBq6e% zQ&VkjVhSyNGAhwNd6hUaDWozK**WCT6l<8wP_QtVc08W-^UP&PjA;S#f1S=Qpf zXd=Pyi7YY{M(m3q={^YYc=!%l4%VcwQ zutl@cfQdU6O{dA^JZ_z2lTS1G1X}SlW)L_bvHT<3krxGSla{o{f$%xW^(Lq~xl(HB zRUs4&R_SnlGy=bHV@33_FT$kQUjc?>?8+7RxRU{fw!LYTQgHJCcJ+v12J zSi)Yt$Wer*7@!_j8|)Q8x*CT@sp2&$!67X!rxJaHmSB*maFUYl?Y0gdfF5QsS};|{ zKBq|ok?%d|3h123WmGN^7G=GW<^xAabQKGY0^OKA-5q!yz6M92xO=WBB4MY)bVFnn zO#$4_?9i7%b9NbdvJ@RWPrXZa<$YfM_Vfrm`?oxe0RFyO*K?K+Cmx?!`2udR&F{T- zLz;z>br&52-Qx$$i8Vdv&zyx$WPO_*w%-0$+wa5`#C&))@=41n;`r+>>m9Wh_;aaw zToLK?nV#zUiGS(f-;3TjTAxxN@Gxza=>IfrLOQyX+y4G*&}{a%p8 z#tdlkKuet-$Lw!m@sZ65RHp3T%V?(&r|tTiq@7Y-x%is~#XCDj&&3`I+rEDAAb|ot zVU=>3-fXUepH8#%r`hY49B1INa`HKpVp@Z%dI|0X99z-j(CK1C=|&eJD#J=YjfB2e zVIpNKBH~3Z62xa@kwH%==?}PHvJO3OgMQ;b`nHGE*|gqQ@HxM+`DDZO((z?#B#YEtwC>fGb#x$#d%Lz{Bbi!Y`u3%}@KIqM`6AK5Mm)I(#rG7%&n zR&0V@uATaA0-vrQ;SLIDJj1F}RTMf>v0y`-Ryc>#uk@%mLmtcad-{0n0U?iMkhJor zTkxsFQcGT;X$GTe;;9#D+exMTDX2(UOZj)ZMXWjO=dDbFpU-y}6PopD`m`EBW>pe0 zP&s781_o{WpO3%V-oFL1Ki-RePDTw)>Vk6iVx<%Yk^?xAiS8xAUUA9GC${;7Zou>0g$;i9PK)S?;O|IK;ni}C1K zz2N9pKz|op!Ll*_nkOsLo*V1F*xLBgqku?yhK8urs$CSMZz@^X(?(Fe6DK@8}NgDir1sa6l%6yTa?u5MnIQKNL} z2vxt|vaN4kq?tW;r5OFASrtZy6J2N+*-EJPwG~8604_p~HfYWD{;~&@@k;aije+dK zupfn5{!^KyR>{tn_)%E~BupG=DX?iAV(ai?f51Ll#~>c6MAlPIgT8~JrjP_<1Dvo9 zvBI}#ln}Mws`VaqGDRt*O&6>{U_sV)&dMgAr`qhhI6LIb16KqK;W{yxwP8>o$fVik zz*tabGogAw%e%_a0>uT{nUv_ng)j-iVlYE|u?zXu)|h-(Ih8>Owd_9NVrhk-XcB16 zg1*2S`=B+!JnCA5XX2$t0#7T-t) zb)DBp`kIG24ZK!-+KsDVmD5G;g8%HGIwwm00*$+)8WmW;tV|tiRXG1f2yiME$%;mO z<0i**SKIN^kIU&(sPJ91;gIJ<67LJ}0%yc$3#sf5*de#ttTSK#g|gP|7W`qxny*YX za>thSe5L6L)An4z_B*t2R)99BNT(VjOMe|xfwU)dN+L*vBWSrq=d5A5M&M{ZUgGw_ zCK%gww(;r0f0lKvHDasZ1CZHhBYNFqOgy=%#zk0w$oAH<{kz>=kFce}LWk*5^J~9n z>GKx%j}ezUJ=I>aOiPGjYJ|JQnf{Zf=x;Jdvh%-IT!e__CA1erRRZaq4#v3q(zVw8 zp@1uzW`O2~V|f*_!vhLC{J&!TTMO1!z1Jr+DE(9_tvyzcZ%>8xE}MF@_s(p3SDik>BbzJ5CyW75pouG? zl&6NZU{*-T$=1Aj{I1&7XxP?X@Qrwvn1m9 zI=d5S8>cx9SNu=@M(h_oK^cd%O7f_g2#c@J{&7Je-FA?Ve_8c8blE@gjAn*p7`O8w z6RU4<;-&Pmkp^6e`C2EhSp&d_$ddz)lxXsN)dP6tgX(DP>zFIRt#Wh>ydg%`#dG&Kbxa`jg7ZCx4ulfg<|bn0)4wgwUwdx0v~)E_`%9Zo z*Yu*i-D;bIPSJUUX3<@W;3N_IMas?yhzc2!lpmGQ+oajAA15jBl>%9YUsS4!zuTto zA~V`tA>9RT%k5+7_5p*)sosIKliCXM)b&eQQuqG;lqaF`Msj`-5iyI#Vb^xa=(E8h zf~G%!>u453>_UlIR1S^kk{H~*d*&NVDGwD!xpsGgg|$|T8!T3x>;D0$Kv%zN{|(U= zYBNZ+zdN+hiWnlg2qvFsANfR&mN$asML^5zd?-*F14Nb_q#E1uVvJIMjLabgXoorg z=f-e73h5X`Iv$wxSW~1wvr)3A1wX${IOCyC6Hv-p#~*B8eIya zOFeXf$HR-kj)(06oKBs8OS8lD7xcrW_3(c8#v;(AEDB__+-!tK*Q(VhyEzg>cgK0i zD5Tp8*;mb0AZ`rLwjxKOJ2JErS^IB_MIo=G4&M}8!uGMO)*Xpnu87&HXKbx2ell9{ zvZp0#Q42fzytw|m30S7-d_G&wmT@-$&t0JN5Xh?2ZM=b4OaeE5xvrJ^6954J|LmDv zZ__XohVS<)DkCP4XlFbA+}21SaR<9V+#ytj*4v_HNk~$y6Y$@0I)aJ_t)9F$ofVl zO82eniyEbGT=#E(Q95(oU!!zYntiw0nmczqe|En@I;&8+1b%-sJG`7)ZH}i+R&J^r zAm{=p(XPxdtH#;NtBaxZm8;QBj#6!3V*tet7G^Si*5a2+m zh8q0?oz1dilTTF|<5g~QN7ayvs4uq@Og~LTKOjiMSI??l zSMNGBvlNwWk#)v0W#_>oeee6S$C3sJ3$OaStc#ZwkCs+ZwQ(@Gw6$7bt?lS0kk>(0 z_tClA^ccyl$s4B)`=nmmtzTi97;0?_mw98L6r4hTqoWDzyCyW0F#9G(j{4HXkWuq_lD$9f(I)iSP@N>rrfTV zn&LP|>-R2aLvxa7YTb?zrEru*!dk)H(`Q1I(Dsrff-?8Ubfg-B6HFN;A`wbz#v;Zh zTq-3g#fhRqV-{tj_vhJ>5Unr~ic`X(h9-=E3Z;l*S_&?ubgpM{hnJ9k;Y=9Eic)3A zW6W&uPOwpa16Za<`>=y=O;eiU8F;uD@bKRN00960%$eJ6BSjE~?|F)75M1G<&Ij#g z9)TT*bb z%OFVg+}JSw@aLN!ukUvEkMF+UJ#6pBchm31``hhqcR4%^7gyV-?WXncx3^Dk4?XwC zFn)FM{5kaByJ7smbn_9 zX-)ANL12!6>_QgG*ag%tk&)G~=vKW_f>avYu_QE9_YJS$oE5hT&LyD<3&f(uWGLRt zf|8Kl{CuwJ#@U=F-Vs!Mx_-QWyT1JrV4=%kp|664t_=(8D&ePar`rgevoZ^a)$%g0 zAy&)kyv|9q{K;O|*9W1zLVZg?c9iv^7#LD2JEF+ak+4$0o*2s$yi>6wVOhDPt_MO0 zST%`7``swdd1NihF138(zF|zua05hDt_6AH^!79)29S7J!Nmx2{r;SvLbB^WAaj#z?x z4QSM8kzU=8J)JbUXYWIQ05XlD(L};lhdPp?6$_&2=U&R&tG2n^M;0S;g$p`JmUK|^ z2xsZ(p|EUUsGt;oETSWJz@+R`xhEi~b`*_k#Up80G3oOT8A~xBas=1$z&;e%my-Yq zaGi7_!#?%2cd=DArFkl-m#c?Z0bSKMO{XjD0*XOPdmKr_ib>ah*`#Z5iK*!=f>#q= zgTpp88Z=E{$dX&3H|KtvgaAW&U*1Bv~p5`}n4R*D@^xYM;zYpf1pgtqYJ>-aT}_z0>Zm zKc+VX=Xw7@=eP`i{09I4|NqQcTW{Mo6n^ioAT%`ChFKAHSC$AEh5{W>Y!ByFJ2$0k2`jHVct)#+blr{DdZ;3jEH;Ao z?V_%3ydo{~Fes9&ScI8ZzW(R>vU?(S@0G6*JcfSjyz=6IL(k=pw>9Yw?+g%w;;mQS zTx@J*#;nM_@|z(kwwe{umgwFXSFhOvNDdN6bEUPM|l86JWZVrenvBM z2Q#rB#6^m;APIvk597?~*sqbh^L9U7rp?Xfl}kf^h3Nig!m~!hM_`vLUQu{y1w6Oq zAG}8^fayDm=--q{bVpd#P|+dD5hbE|Sag!Nh@ghU3m1o1+Va&Mx_v-`%3iRVZQzP) zh5e{e&6=7~ue`b=ja+{J>0q?tObXs0NyW}bZ{fF-uR5*GEk&Hc-+ByEtMCX5dW~GF zRQ_~-rGGZ0`Ubpk2iNvODAyc}2LEW4Yt~ckC3Oj-dpQbyq{0!h_9cAc8wwwV4{?F0 zsL&iE?Z}q);Pa;5*TO?m5Y{zBQt0!LenfZkHijm|{qO1KW|U9Y5_k^WTAM{of_H$#kme*nbSurcS^`q#|AwKe7>>ryjONk z)_}*qn~mJ@IJpfoIxzK@-waOh4aT;;1G^FfyFRa3s=~LbhO2sH-nOO=F*($ttsVhg zrUt%s-Dyp=8iD2|Q99D44aH@GMwC@HZGWod9>JY9enX4ly#$7hx0t6Mi9?LjI4|N% zooNo5xhq*m?EG$J3q5LHcv#3yOS#%mvtSM_G?HP(vq#DeYm_}wJd^E`GJpN!tbJcM z4XSrLTGde9ng^tp8@i{fUg{+VhWo=xb29ns_JD}gU&?w%Q7zFu>GUX1tE(@b+krMv z9cg1z`~l-%+nbyx!$g;nX)e=nLGqG#4NMFvhh=KE5Kew~(dq(yA{59*YZp-b3T3r&Ah$Gfua zc*$I9)shM#YtCj<4F`sSX`CdVi9g#Oq0D=8xZyUBWwQ)|N~@0m0FxY)%zuFxH3cKQ z#;ShxTQ0cG_!(FA)yGqNe0#Uu_uC)g>5nCmdM0kJ?w78ah=Qi0n2F=3ho*l4L2y-n zIa(LDGpoM+_4Rk2iuSN(%d1^BgvXN`KRd70_e_iT3swDn4gk^`Yde5esm*L~;Ow2c(+RI0XwR-p6^eh~Z8_9*GT!P|Mg(6NA;rE&6fBcHv{mhRh0wo_usk8)O zQUSu1(k@5^i3HGKEVI_YGLFZgFi?gvFu;@+T?+Mir7I9E2 zfm#a?h&WNVL6&@C!)%|-O!%Dd5xM;GvVR+PI^QKr(tqZ+$IvGo9`4V5-M`IU)wv5e z{xPS>81sAtUqn0VBz^Js+#1{i`J>3X=YRr<^!1Qcf3==6`*5@R1*=iFQ9)mpV_XZR zuS(Jlj~LH3G%$-K4JhJ-yQMpt)6tnuao3+_id+|ENj636e@I61VzY6rt9s)8@~>!tq3c~!x<^JU z^06kxhTPVBQut4I{^^XY_g!D_Fh-S^t|fha2#0PQDeJl+*H^Ck6#34v>t2zfKc1t) zxb5*Y=-%5MAI-|^?6y02>j+}nW&OT&zNtXcN-6S=E(U zfA4TKB9|X!bekt>?l!wR-Ww_-f|$ zJg#kV@Mk4nDdyt%?xwC*KVgMdAA3wwC~7F|F@>{&Zv07oO)m~Ly*QNl^BDgiN4Q~p zGusF^f)}5;%RROV=4L9~;QesQD5MLae=jf2XXQUhyC^i6@oyKSxEQB+V7W!UzW+Ip zgjE1+v+oC+Vics68BnDO=D8~WA9T)-!P($-jLexRnKQ96XJTZ|JXe|1OUj&mBHl8` z=#YV>3dP3;J?Rg@dWB&q^K%6{($Vmi#=l1bChYZe(zh0hq zO=*jl{+TW7vgWnd^9aQw-5h^N1#o4V}>SkL*ER2p7m{MySnVnWo})*YqR;tCK5^$CJ|QN=DRUR zyDl#-Hf}Y4)Mo5;lNZkeagV0aF7x7tq3hk|)gQ+8KbCe?o0g=2j)q6_;;imp`0uf` zn<_6PO5w#8NvA?mNiyS+STdiIr70PZO2W*%n9t`25o-;^T8|SeDT!%pkCydqdEYIr z%ewCtwR26X?BYZG997qS0`#N>No&wA5tDhq;`0obPOht|~h7XTgBQd3uVRU6k*U<-(%si$Z@VEh^4 zNlY)lyK{E_I#5vn9VIO|2>w^rw4YW9N~O{QA;x_rz6rMiQ`$xMb|_o3T9~$V0sLt^ z+d_AL>(Lgv&)08HxdsnRT^Qf&CI?4ig{XkprDy$O={8MmdK1Xrv`RoMXXU0DP0)nq z9ZAWKP?^RpuLA(*pQ%W@nf&k4gvMyHP4K^(BB5TEmLc@S83N63a!f2hjK|)v@JqkU z)bfEBJTJZ;9b5LT3d|7GJIi(tbOE%Bsq+wjPvzE=f+uqgqrglZC&?4>lN~jEc&|dz zen@0V&I;bE@5}xKy{cW8h6GpO)Eqs0O_4K@1Q6|}sqA{+$w zXMxcqzUt?@`7cn4&~Y!mICSmyn7Q%X0a>qT-~udS4;tF9A;AvZ6vffrOm{sB$wLnQ zE#8`7&Y9+dD~^Rqa=Qilbr!S9i6V#)#9=I;Qi@~PBvdj2;4w#vYUO`Sg)Qdbt-_Rl6JP)lh?*HYN+Fn1L}(_Fpae-llNLe~2G~Ms zAth5H5K(-;M7Q8UiZuPccE85onAV#51XNxer1Ii(R9+leAt_VfY*cc~C{u{N&qSheP5;NZnps6O4X;3wP3SE+8 zf)yc@OMHYDO<^L?6v1Rbq$E6Luhg7s3{h}`CD$mS`g5(Mk`zfvA^-9SX(AK~<15cF zS2UQX_W%F@|Nq=sO=}cE5WV{=!ib)X)@N52n)wNyyotzaq6Fh6W+MvXf46!gdfA>~ z+RhLRA+TY)s%z@it5-GSlNZl_c2}G2_@ewhdVjIm?M{Yo!+3P)|DQh$BRmDn3QUg5 z#BOAVzIrtroo;S6#~-gRzg``mpPmdzYJiZ$j}eCPO~vtpqy5ifn?7T5vB5Nqk3Zf# zuiIQ~k%sZ-;X`9B4HY-LxZ9Vnmp9K(&$hd>w|M(uNh-tV$wYX|gF_sD#z#MvX5ddC z@6I>7`w&Is=;5JL+xK(FUVeZ2Dzb5T-Q@J~#T3+Pk8eASADEUqz!QzrT{>mKiws-5P4!Wk0zbauTxySNWDqHQ#y|3o9YHjShS8p%!O4gziZl=bB@ zy8|xEl`HndU&b`k9+}NiR1+o1W)%i%;WlQ8UkNd4NrXF+ zeX&a_IYHdm9hn z!jx4JTmzs8nmh88kcaUoF;8x4%h{ylXGJtnt)<@9lR{1!gFgsGAzP)%HW4}miV_JC z8#wmNE`(wya$*R72PrDkJ6@YIi%B$9j%sjTz7Z*pe8mLU=D(TY%9)cUOM5#xS0r9j zg&4+kmBn+y&F&x zN<~QY14FbEqr^<%L2@wW3L1ff$Rg|vAyP{`DJeYs?x-~ak;17lO!lw-dD00960y_HRG+#nE!@B1r8 z9CC^`27|Gc4fnQEPq|c855#8ImM|c|X?*B^FS|W|GzWX7MQwb*NNApU-x)r>zU`lt z3DzGTf|EuZ&LebMhVXKhR~9<&y9QbB;M~DPugxW%GYd?^R~rMSZ$^KdzF69 zr_qUTc7{nVAiGY50Mhg@yF^lHx`4Ajj@E#GJwhB5XlosWm{#}jMJWfPfoV)3agY(z zt1=OWgwV<-lvi*F(OYv8#>SNY&m2B~dM-LUxlybB9)g7>X$XUzbq_BJI_0BG;AoRD zGfSAx>KCegjYv zO9KQH000080L^WxSO~qz(}t6Pr9gjV*-2}=wt%dUtw4(oSi7MZhJco6iwi}nBo*81 z`rmg+$&T$vYNWJ7^2Le8dw6(xALlBQ2QS_|fB*5kO-t zmlf;%B!IimBnc;xh{*;##g`-p3wsa;X^FXFSc5aT0DdaNe6y5CPWXS8u7ERqx&H{3 zdR5OCp8=@835M9Pr1;~9xn8=De8G=Ua$C~3*Rqw$M53Hzq;Jo+$aR+M=D>fm?iQDoxJlop0ghvce%A)Ry9s<(3m0%HH;+tWKyq#%Nu;UZ zu1Pp}T7(BK^ZU3hS|YB9bWI^~%G{@$N0Qp_-PA!?QtT!)jtPfSBXeT%pB90M?Z@t+ zO)YZ5bIatg25NyeOl-od(lv0&N#A67_-+nW0cn~Nh1McROALSZsgMNCDJO7uU`=9M z8>L{#@JBiUJOEFy*(%EbAkO7=QCX2FH%{}=D zCgv*)`!ZtAA=l`v*F#jB`V4651OXZo3#3F!%6Kp)d(lEUco7^nRyu|yWegx-suO_a zF%t6Szq%9KNyr)GyM2ndlYlnH7eD7R-hfQZ%+BOdw)))52;)*1uk&3M3k zZRh}kA|?#{50-4_2FFpPw$UL82zlHmrw^CfpCc*{$^Gr~ z!3i{a=#NWDZ2?A^#3215QYr+@lqfoM!6nL-7U_SX20*_Na;*;y7IgcIV(o=pd$Ch{wwNlEN z6RT`YR7xYaa_}wH#vMjq6LU3Artp}#{=8aa{N2ZX-0tXbynoHu-NwNk-33#hs%*B6 zN9xzOE2U02?1FZG9V7|MTc2of>$gPvM)%hZ-Qwi5Ax=)apxs{wb&8X-E^%^ppWV?{hj++bJzS1S7&8F%lor0@Q7^f^QUzU)Qk@)!7zM($}xWk zzY8LzsKIafhT6U&0PP-1nP1N*V3)^;f4(tVU=!;>&LBKf2$V)%}l@jOZkc^ zF*pl?(b;KWmDq93RW(~R-s9J{e8_48r(xz!iIy~~d~@0Mwi*e7`1+ypze&r@KMk5> zF%wGF%i?-M;k0c!H6EhwKVpqWr=5SI0jyq?qhOaE>w+PjL#GS8U@#a4=B-9e!SL(4 zg2KVm4_|h_P(aq}ul12R*cF1!R74tf^`aNo3~%-{a$Z?{j~6#V*>K zSEx|DZ7;FL?BwFaU3q$aB{3tu8#P8HH+*!{=yg4k0zcU~PY6Zy;tr)>4%-rQ3Ek_fFa`gQhVCMdA|7euwGRc{myv zF3w`#owmhn!K+e&}RYA58~H-$Q(28PDg{t1f-cR3@n;1IHsE96fB!wdHFwNvsizKVp(fhnO53+SrFKZkOJVd$_n}YvDtQA)UevSh(4mOs0FdnLAYJZ)t07ukGKDnuF6sc7 zTf>_{KxZJ(SxzuI0CY86lUS*J{FGduyC)947ib@ z1A8|L^KNt>??!idxAe7>>7kRMQ1eDY?hV|@kiJ`)N6jCqv{MQxb}p({;DfLinxU3N^imC3KB5q#BABO6f*yZTby{o9UA=p;+C zJg3y{$a`PVG8SKCPp4mrBe@ubG_{*L{9+WWW#Ifm>w1B8&uNi`hNSBm z1^$zeuRF`Hy`2x9>mqn1-Hm=s3aBm#QfEfrUwV%jmq1vM(k?a-BJ0}YsQ2)|;z zQYNt+4ilFEK~t+i`~?F<_rIl~Mpy?CbUh$p}^BBqsEmH zAM1t&tk{1j?lwI#RA>JH0Fw^0aetb^UCCI_n7jVve-(8vPVm`xZ`<{!U%uWxy!mjw z+CF`o_C#}AAzlB+KB>*eLt=mad;V^{+1}na9eR3Gwc^ov``x$c`cEp;57e&zYoF9V z*Il2Q)crmE;m38R$|K%({n_3}|J3z#&H?#kp_Y{jepFENY6@bWRMRt4HGl31N+G62 zToU1`-V}k+sJ7v}lC2}wA105_>-)|6<~h)Zwb6(D5`Eax^pQvCqXB5S0<>|dGB1af zBXF2k9`UEn7Ohb;{4w#97bT&bJSI@4(G5 z1gB&MWn_v4Xv0K+1Xp9vM5zMq0|8nP4l)i0{4bQj3m`(bS;%1*_J5LCrGRpxmYxB7}yE!O*Cp zWdQ;nhgiU8G)lA7PDPqlz-Xq5Xwavh8%%D(Y4ZzS)3~Cekaj;-bypY(q1SRB= z%RDvWfLoJb&H*K`$uRXC|3uEpOpf9{7@HTUDQLcu0{-aD!Mc;jwI&2T5nu_E@wKLZ zU2oeq6n)>XAT%skf!X1wNT!K^4d~mjr@a(yVQ3QL2$4lql+plQ|NE{oCv{L#2{oYz zFz3N>V&B7a?meW)OV{R2cJAO66?N|1Me-@hen9)5^HcZD=WT+@yw6wL&JOLWDlZbo zwNeZV!d6Tq**jmkjha-02^R!juoRps%B7e!f>+O1-tn0H z3Cwszl4K{_{&g5M8j?aGzl89j(Z~VVX<p<-~o-J#lUx8t9ms{RUs41zIMZC+gG zTT`O*ao~HpRZ~}uNwQyWZL|K}+I9`Wr+m-uF^MxE3sgtGhY-8`+6*l+b#+^H3F^DN zadqCCi==@i<56;&B-=q`9g|Rh!1#NjsNRw`#et##=-L~YWWXOX(mk`bm}8}>PSsf3 zpM62jCE3}|zR0eCQ46Ak45dMbz|*<}-> z3Pf#O5AKc$8lRl8>4Vaqd>m~5+O)Q~LX|`I#_vi8)Azks!~;gmz)$6W%QOMM#e4T| zbqRo-MNfpgL~RN?E{MHZjAUtUs={0DSu9;&R(6iLU-X3S2Co4pHSxfNyz_W)iSq6WX3c`pw{8i8?W!aT?Cs)dRlVu*zOv0?)Ck6NO!8p-$2+FLHpC8n zJM`1rkWt>7G3!ey3+Re}Px^0pzZ@&E7_Hj6WnA#@ftXTEIf+@%n*B71TF=14Ue*%< zp=WG}*Wym{X65LlfRiZ0T48>4lBZ^YKGV@jUMCSLG$-DN=l|YG?C7KzCvhQ#I6mEc zHS@02M<<1xM7dP-#XD)SZTYtc0X;UZnZT(WTUP#aTDYEM%ks<9u+9eDMUJ@d-#U_2hW zP=V#*nRi={(H6NpIe?tLa>PXO6wc3rrd*ShFR~Y$y8nYAF-cRw36WeY!BfTb5`*qf zi~y-Mk+4ZqS}?jyNRwlKgdjvoB9+uwq?+Lcp|Mf~ibXL`F%)hY*3(rW1WTgEQ;`HM zG;|L;OHE1@P8k=1QjS^Z9(!3esiK@|p}|mNM`+G8@yxt`lU%wtf4~aS)m>e$UcJ{` z41W#dtF!!i|8{r3*^ck0?`M~{o89hWxF620HjkV0PY-ur?$2+oE`}nCzzXK)JdCeD zJ-$1>Zv5%k@8|Z~Ww<}T+&#YgLr{cT+8>7z-av$aloA+B3c@FL+0g?XLW_`c2txkEFti?_?}qWbzNLY& zOTi*2O3)>l`5jTh(Y%mYrfD6ok0jIuFvNoktM&_xR$Pp{f8^2gw*2LreAK2c}q=~a%R>ks%13{=En{PCY= zVetFSX7?>*K~1_~TGlQMrq!z|1LT>xEKT8hw=+uRi1VbEbcd*Fmh%Xb%exG~Lue*O z2o1JK_S3S$e{2OU97g0hHwvIamw~jfXHnD~gH}wLs{?(ZbbYCK-yq+$9YJm59eoz> z=-RwvUH!lm?sQLrbH1p7{A$MNIpkOKP0zC>HM8?1r-J5re}-!U11)&eL_q#2xEZI_!WGcK ztmJ!7Oc;F((vw{QGz*kMoT!jWk2i&e31l7sdHS^Vr$4Ju%?iCt)p54!`_6Y{VGt1O zX|013Ou~#5tX6uyd3e$XvjL%`AMH}zg;q~Ttw}L?r&5HYqRJNPuwvDzO-lkF7>DsBp{mf$}9Ib^EOifE{rzlAJC z9Gp@h;M8g>JYc2qQPD(7sst~x!U&Qa5;=S{GEoYsRI&w?=@}F1$*=0@RXws$wG}ba z=DtP5V7fs7%PF$PWANdnn+p*nB}X6n|34eUe^Nx!5F4U;7yLcFGfy5n)wT&lY4O-p z^Vlx~2W^8xNfilM7oWsb|mE;v82DRufD=&S4;AM}< zy!7C_Ju!pYLlQ6j)gP4}+Rf8Ia+jTKPjXQ;V&>m`F<=-nP0t4S*|YN_FMajHR)aet z-wNLB0}Cjmmwi!HfA(0|r`7Z`s3Fy;Ne{}QS~D$Q<)w4#+fSHy>6gB!W#Oe{6x^Wu zMwo?SQ;Ksh#nX`aIABT0ND@TYx6fJQyWKI?DjI5W*x`>@<3gxPlzU(fN=BJOhmALf zj^3#`s6SwiU=&<$p$$h-rw-L{&K(*UQoP3;gUSYdA21wKf9(5&L?H>HARIM;L62dN zj-wMA=twV}m}$llrG6aaAPJKwjz+Kycgivv#gfEH;KLry!iW%(M5E^2A{q zAq>X)K?uBp5qrgBZR|*!x;^ODJF?r>ST~Qot+B?lZ?qqP9c3~X@KGyxuhS1wA@N;Q zXepII^P&;}f5G7+N13czB}*i~3x%#0Fh&n-(SlcbDO3Zal{iG#vO^gp!t)&dQnv7) zDO$s>__8ixL@{3vl2v()t~emH5@idKUDUFGZ>5wqYFge3J%PGLJ`%Oe7R^0&*9gRK z(T^c_$Xutdsod2ZzYtxGL|Lo~<)K#bswn{zjy*8lf8Xce5+X~R^#zQdw!H&%0qNV7 ztfn7es!^6Le3?E;ZAkF6wSD3ugK?<*_K+Uzm)RxH*(u z6(n<&RslZcZ6PaXsS%9F6v8JbK&v&O+7iR-8RV{e=sSV@L?|(Slo!^Y_r}vW&*N=n#xEjVh_K&4#zZ@I%*D!u_ z^7vXE(pwAsIE?g`ER31lG$b-C%JB)tfA1U1I=Y~z*z74@5Z+x(s`Q9_JB+6@yOF43 zT20i6t+IFAO@vXZhF7J?j^{)*icxi~o}}ho4(fT)WJFo3)mq#lIxd<@8Y4Ofl8g^s zV$Y|Sxul4esVP_oZ$3S|TeOtai1gzu54B_K>V8@bqmUZI>p>MDp^RrjE#EXtY5|iK~30-2{cP)N0L-Yh6)NzWvqG+ zB2#k6A`Ql#TxdPYOs(?@J>HibOPd^@Vy=!T!}#xduAODt3y{?1ulxm2E%Pvb_s?d@ z@Y~h-;Y-+sY*GxTK4DNg0Ge$gr0UcYe^0d#N#Ue0 zz-mX@2AvfyB`S#2!P{L-*aB~JQ8(C)O#G?v$(s_<7g!(5{j0Ve?AuVpUxXsQH;R<5 zx_=6{-h1$z|JeX#Ed%rtC~Ntnm)Vb&sd?`F!1pvF3n@jZJIH4RYC^2zmpIb*5bV`i z(-}U2E71;sYN7(!%L1n$f69$LE~Kqd<@3n5sYqnrqlUts8vLn-xycT9oG1(lhE1uK z0<+b{V=WYkF&CI5xYi0UzC&3WTzN98BgoB6*Tays4ASdV3C~uEMNomggpLTIKDKz_ z04Fyq@R^2p$=XkYdARFzXCi?zY+2V$x^cP)uWn4@TwuvJrD4aUe@i3j3#plUC<>H! zOyaxW`Av^;(5jS@(+_u)^ab27-BwU#WAC!&g{!MccTZ{`X2Lnvv&58~9ZR8U2cI$9 z@l`|blVV8E=P>3layTlh3SQ`QEw(^Fq@I8?z_SM>cYG2iB1k&>(1GN_Nuw&Ne z9d>~pu<3~&I$cydE@Cia1t=uQCwAzMo|mDLLBJ6=KsyMl3%oDXlcg4jC~PV*=X%>C zEr0#ur2hw#am7V{g9s|Z!@=g5W%-w@i>@t9eihH4Y>KYC&TN*?`|jT_vm7oEBphQZ z5yo6$1ZCgSHp|cZ?r*XjjHeWk3dJRZTp|Lgnu75W8DdEVV;my{86}v#d@x8-PC1ex zcUUL|ML#8|3SZ1`-9X{Kj&yVe_Q!ZOp428>FixSOFn}O`B?qFMGO-kM$)&^`F(?&9 zC!l0hQWk>b5(>!?nXWOJ;tXM`I2DM(fwnwSwcZ~2(+i_9kvxJq9x-(C*{#1E1|1KH zvi#Th;pltP1?Rr6t-q|a>GU`J_-9Nl1LyUD_!5RuWck~lV{7mn$W2{zuOW)iNN@VA zOnJ=g)5GF_bEt-ONu+1Zps}0lmf>u8G#Uz2~9kt zvR)=ikrX8(K^Q}Z5EYaMv6{qt!YJjz)+8c)IfaM>!W>GH5X~_an>eJ?zUFrO@wCb~ zYR6t>9P!5M?7zS+N?Yl0XEbGRHl?(83|8#!?)TV6Km432{Smg>79k`0LPtMk+sg2I^cY)Y#_b#0e ztg)ul&r6~g{mY7GQ*68a!Ug#7kE@_?I{a$CoulVCIc zp!0>-J5Hzl+8I!UoY+PeW)TvY&t|*Q-q>m!8;i2?_f0xo%3B2<7t4-6nM^usy~oj; zrYUC2xkH$PcK@9r^JgHYBrmQGaA=$_3{tP#9d&pvf1MBnW(mP@Xe4JDQ5Z!D7LusB zQ>cYIc2K8!si;axcquStlr0fvF`{fi1Yr?D8IK8a(Tg=_=%(upJ%83I1@@2`&dA(Z z5=I18TsSD=Oc4f0U94a8NDBtO=ieTgQg)5hCI=}+)~XXp=^#PUxg*skB{CsIZIw<0 zSM zrts`cTdA`()9i}J8x*Bjf|n`v=B1ZFbD3e3ScFqAoF+pR2e!XM}we^|rf;Vanc9b0EkNSRbPO^?0J9Ta1Q z-KF3~?rME{gVA8r?~jKA*LT8i8VmKD@YiEPt1=Fo!UazF*D)dRb2!Bih{qAba|?%~ z3;^rt$AYWpPnYWX9Cf{c*DV*?@cqZ8ej8i<*2Zhkwn7~pXYepFXA3-DsJu}1foIIk zf3J_?COwk~WKE(%LHxTq;!_Ff;_kA*tT{V05-G}=tFsLR%E-V~azAMRl<*rOTwOq% zD(C6;m7r$b%^ZXU#co35m@p_Yq98i&X&H{#JoZ)9HOL7swj@wV0uuU@oUUvEUQK#B zt;3&rMxaV>h>$43Hw49WVFZ*efs)3$e;|U|(iDg*0(&SIXq6C#M8QnyhzqehQ#+(A zD4?V)WVO0u;h{GmPS!ZfQ-WqB3uD1^@Mc9~)`2BZo`49HsF=ivvvh+Lhr>}J;dgc1 z?jQ`@(1ME$@kMof2w41 zO-K&#m8z~LB^XXiBrpR7F=$I{N#q@3rt@5ZYJz02B+-JdZ}}fO#w#0?Eb^QSRhrF> zNv=%*zNo?rcI;emLoreb(BIDv${>CqRHmh~O~fOf8_67Y!0}*7sH?z>raF$#!OvMv zQ={{_@$&6q&>IsTOAo!=pqMOhf03$lBVEOp0l6Dx9{Ruw6hU}OLVzTdx3sS|2*%54tjvI zU`?B1O&5X1D)o+eX6w91TQ5X_$-M`POUrblBlCr2PN|*QAY~fcAg~1IZotu^O(vT| zY^!~bs}Ab@ttYCciQK|mP}!v={AyI{Tg;`isow_e@ft|dWyWdTSgApqayNL)mZ+Ly zW2J_++=ArN4fY|5U$$_Wamw?S*x9ct0WmVRyuH&wqc9^|tW z&|vV2X=Dei=V<#9eXx+@cRI*@nG`>7C8H|Q*3Sc*o;~~6?N^F zvybIXBek9aOlDsnO_zJcEYs;d`ZjiKZIZP!d(UomdmObW5OqGhe}~`0-xA$DvfJ~= z{$w(o_+&#E{?Y(ddz=2I$GeZq0-LBss2NQqP3x+|^2HxTUYmT{~!=0{41)vBuBwu90dKo-`6A`=d5?g@jpK6?I}~!I%{UV<~LS*m-wIc(PnO}c9*Jc z)cyZXqI-JU?xwy}onvqy!NRWNWMkX5lZ|#`+qP|IGqG)FW82!;ww;Z+v2V`#zFT!~ z)l^N@{G6)pnSQ(9=eZ_sAN~!z;CoTK?qPeC+WcJr{tKU*K+{~e+vog`ih{WD*c5cV zjxpstF?EUvP*zOAwvSIGFXlF%nuzO9m-M2dn@~_QqDkEvlHT6fJWjr|$@a9QkdR}$ zuQjs8YfY6~TcShytFe1iu;a>6{5&V!x5@`!FcLe&#`L~ai~!e}@}-a}wIwJegTppe zt)e&ByxSrOvSZCbdxF%HxSBxfa`4mKr&3Z6TZTmkJSDzOn1o}1UhJg7;r3g?OC_F= z629uaG%tpn2P`11kIDy0GQQw=GQPQ%ZE5qpK&|?;!v^-YgE74~$y&&EEquFZ=@l}B zhTMj%N)Fg`&UT0o)&V~P;c5Ws%foN+GKADid+N2}iG5p7a^o)72tD>rX^3;q^c*nD8v`!^%R;_ ze*aedMp;aa$s9HQA^IhlL^yFW!c6UsQA0^#0M4=NPr{liPt=`t_;*nFj1hvuGQ* zh^v$kQ|Ij}S_HH%F|$1#P2@sJk&E`@<0KpxJpA9pZy?^mP;rn`f&}hhNRh{6BnQ;5 zCn*tmSC;4lidhhm%(E@K^478fwoOsO!Lz#fx_eKL$hJFv ze~I1FQ+-$d399006kcilTuJNEn@tBAoDtlmSN$|=?Lv(RVw-F@Qvqdi!12#Qt zU$IW8+(I7W+!h!6jn+m%2uEtPYHldXmb_g@S9%4zl=NNt_Bb9ykM2HV|4`?MHF&Tf zB?ugiJfImcwcSl_g@!a!b?3F^CmPOlJM*UEuC!&txx3pjU2V!Wx^l?LS^NOJThK_f zz1e@e^NOF9?!7MEQ28~Roq-&KSf1It5V2*;=w4^xvO%AYCYMp^6YDTt?&WL_4L_`} zTgW2{cqH!{#EuwS1iQ{MjTr*$%CS7e` zF3%5gt%-Mv*}2r5gqWj3HxIcVKA+`R;UQ--NOClNSGXD>^T9;WlKA=s*lcRUk)o(_ z{U_H4$LnT!OcFbX3M;YA+H<-qnYyWkUua1@yF~LjF5o0O)v@?@MY0)0Ae-vG)AU2D zd!e{5#Yx^Hesy(EIJvl4C?O2ktM09(YHH~KFDZF~tJ<;PE>-0Y($Jt+fG;DAFrMJ# zF>OWB-r4?B=|N}0jp-X3r{vFF$N1 z^yDvDef4f+|4#1%KS>LB;)&qv=HjZ)&rk@Q3DRHc+PnwGreOIGM{lUju23^}JZX>1 zGqgRky4?8M(yA+l3AXelx!EI-7xz38bHf}ZKLDy!DzVieozx(`8Y-E+Uo+t?+Ca56{aI>B$HUg;Kq`(B=__f7}t3iHuJmXzIQa*rWR7p(_hT3A{Zp% zFTiR@if+V4W?tgf;x^KJAL#0;kOHy)X(6nWl4?sxkO1bff;GSbk1H`lj>|xD$%|~G zh`DLSE1@X6kxAViUXjU1$H{E4GMvj&@(jV4`x|>H)F@T->(H%HqVEDGu;ubwDUVh% z(`EaoRefMELJ==^iNCT|(tRFY6cuNQAQHVJ6ZE>rgjH?LP(ZYtRqefE5*w^D;xe*O zv(PvEn=6s|zeA7tpSOIigYQM%ju~JlC2tDy6~agIq{`b?rXA898$?%SINS+e+m%w4 zGi*?cAbi}fTOZk3Mj9VRz#{1}3DqU!?{32iclX;z-+yWdha-^$t+*aqA>lXS+pvVc zGKeq1*^d=1hzg=jdV+xsjt?0Ganu{sFYC#Ixj8Ebdd6m-1?s^W2Kk@mQz*BWL49!Rb~2nQF%*riBPzB&=>WZkxNUuIaYd!n$3zbE5wL zyTEHGTgn3cHfdX~5sf8P|C%Vb!VUKKG%TGC!r`Bx$9%)@h`^AH!H2N5vqCp&3v@jj_h@diRkqYgMaLqzEuou5{US!4- z2wWpQ>3}UqnUQ0$m_$4s+(+*mv=iV%W(!tmH5bOtBq*`<5@8d*U|^nE<< z&n21yJn%GD+l(+I)hzN&8tI*G4m%&u0D1mTY#$*3s46SP)&}ER%gJ6Gop2AS{3zOu zOihrV``Bm!8kX|BL4%R0Pfvs@i|uM_yW5ut=t zq^BQ?X~$B%nCVtgP3&(qB>wY$5y)qFYG&|&<3P5gY_<17WUNvOn9|y~m1Qs7!!5Lb zDfw>gY4ah)`noawLx`flcWe)v%S}Twz|y-?O27(831nkff!m&_M3Y*`Zsvr;`_6ok zF@%}poUh#K`ioY|Go88Yu83w)nhrxHkzRx>BT5Q0kmZlO14~_=kc~}UColqn!VY!c z$SOfe6RGsCOhocqdlLRKZe-NH8svd)09o+>j+$j=7;EG==%A1piPRPFl0p@Q=qeG3 zd?^+6q9KiZAE zy!aPsa04Vt%jV+L4@*hAcK`vEpnuq@IxsjzuV!5XT-Rmk2`D>_sNZk|2)9Ia5wxDz zi64(xL=nsmNqxhI(xQ>1=dwtRpek^piq#|%!o&NLHCPK4Bo16NY!QeJo*C9m`dr!z zeYZuh{FTme?Ut1jmub_$Q__BU`pX%l^vZ%JfDYj%vhlu3kPN~oP5^?q0?1JTQk7wf zcC{K22Qgj=4T=%-x=9;_a+K_&E7;63<&sj7U=EYykTIF|tg8bGwO}LT$?fL&XAshR z`tz4&O`YN*igl@eJ8tT}muSWM)Egyy!xw`(ihfd2#T7Clt)D|)+e2>?k2k=w1I0k= z=b&&dS?fd@{;*MB7x1U7w`SmRQ`dq_&2KQMuviknh-Cgw1Rdg&b;D{u^3zGnW~nmv z%cGarKfe{9=^3^BZC69W>co$Yn5nWzsrnwfa2vbvkn@jy{ZDhf6-&}<HbVP?{;TW~-JK8X%R;b8d6!`|?nGy*ro9 zshW4@rf2zc$XnpD+g=kWft}Bs7KRh^+I63d@07zoOQD)LyghPN2}g;Go{Esr#aaU} z=VmPb^e*?KHd85-m+*cJ?&R!6*!1Cx9>9QVQD3 z03?Q!(qd*g1Auqn2z*TeD(i8{ik?|5hR-k1H1Tj^+N1CanRdJoep`tvjEp=E3(o$k zVKYKm>(a8P5`OY|7M|lfWQMa4-TlkErzp}N$5W<;>*%wFRhosQUh82N?~7z`rGZIS zTqKKuv@vR|4|u=53$Nuc3T!KMh@V-hhl?CFK6u>6%?)?McG}lD zuUuyB=!+Au`Q-3|=ra8kdL^;$NOv|Tcs4+8Qr7+0~-?E%J>De;tu)#a@jQe~ae z#_O=D&PBb0FXEVpEnM8=D{y4%PZb%}`)qnjUtH`K?On?7!%{8BVWk`E*a?nj|9W~X z)(2hYR(t$g=ssuH!aFP;{S%xqs67Tr#rl#YL*K&1s^Nm9JHt2E8B>mbHOaIlzYTaK z`tNVqxrVI$Iu~-+k$NpHr-lCtrD#0*nMhB5FQNCTDGIv3)qJ&rq>5;n;};11N}{=C zG$fy4@moYNq9WzoL?+VDskpYRbkeDU@xyRTOPh&g_ajmB)Rf2PzQ~V_V;`^VG}7qq zRhLFA*{Wte^vrcv9IRZMTsR~KV4pUpQd$=)(Zus_a9RkqG3ir~*K7sigbb6a6?7D% z%@xV_1%K5t`1Qz*WtW`JI{D zN72$^r(>XAm!y+|NA#aptgvgMb@>LQ+lC#pl47RJD4ox(V10@5R4fUTo}OQ zEaAn(M!leic=$=% zEa_?FGsa0J$|`x4((Zrl4?>9bsqyp(UZPC}%Y*Zg=-qV%E=%U9 zv{bx_bU?6bP)6DbxJU87cm#s!+(+0g=XyO9xeaMXiWdSZ*Hnr zWq#p2+p&G|L497-%taH*>#2CvU9oasDO_s@ti8ht81^PI8si`Uib*b91jSW5-a>AE$>k4%s9^U={7^8LsoVxS}61@O6gY zjf(s%0=bwpXHS_u6U8bBH&3kee{Y{ONw0B#`C1Toh7q6TC-X9U%~cO+-w`gMd}cQN zKF2ow_-{7_xGtfBko+5*CM?l&9(+-AK72`eN#DM4gCrRoRSi>t76m}0y@)9E4bXUn zKIDf31tky@=0dELLZe*6y(eoJ!Aa={35FA3ef>uxA>+kfQhkZ3Z|e#{ z;llk;bE^GBSOr^q45@`NA?|PzK>sO{e4`LjqLE98oZsjF?|`1roV3FehyU|P6UC6B zTy+g_Np!}}e5vLXOY6|EU2TS)X23+ZyMSC=iY|}F88f~xL@;{o1Ba*gxU*P1*Ek)?q`*PvFxn!%|o)A#yoiQ#8#wcE)|) zXG?B0Pp6ztT=mNw7pD}#mAt3OP<4H4UdUdf^!2`fMBH1a$D!brDV5oXdWXaq0FyGD z4dpT~Z&*e7P&Q;0X~8bN+j(iHt;DX4n~LhwJtrnMu*myQJRm&*SgKATP}l`Oz;O!- zhY-snq{BSB;Z$x!)_;uDhdh4k`VEpi3M)yYQo6(=0JeZ0rnV!3(7qX8T&R=016>Rw z;}D$FHM*A~O3fkUO_&)O5Zz~h1eB7IEkM%ZfHly_5SE$?h#{*&Ua19GQweH=;8JkA zgpr_R&COdIunPz=5Ck?0&53e&(F3oeE0h$j@1fBCem#p5d=BJTOMXmc$b zoXnV~t`A>4cZ7DtJ{~QnVk!Hd?$ksdXIW+dKW+rJvkCM{%=axnOr!6>fu20_S3!_3 zwu^14(N8ki0&2?xZX;@w!hjA<1=dCvf#ic2(nvvq2tyFU--t%U>48J>;S){L>Cjo{ z;vO5M-bRIwKmWaOyYQ%lf;AHe{_5PDaewl&=tV~GOgYEJO@lRucI`te#huye%kr+0 zmciQ+f5q}hw0QYLfh{!m3kd3GDdco4E1p2tC>er}(vW0E1odr-y)RLR7MN?B6>RYq#a^i5$ZP?=inCQO}`TD5wj&*T3*S5qw zS<-7`)Yb^REjL3wr4-&!V|}!GwRW{XW3C{dtsv_UhG0*II0Yfl?Q^{STWP6hqPoT5 z(!>*gE00#USRA4Q@-I*0JGUp8hiatw?CMgNj6O{>t2w~OJ50)Ez1r*J3=FuPIpfOS5>BZIkksJ77RSoJe74wF8y5c+x(q;Gg&%;-$2P?_7IqYnT;clSS zoPET025=?Bnp&}3tW@9p7YEv@fFhpsQm;v&s?P-a0|E`eblfI2$KbqNC@S$8gtIR`Q>Kcv_vAwssxTiixRTI|F7V^w*@ae?E0Y03BO+dwt>Q-T?K> z6#=c}vLyw@USO1f*m*h|AO6&J)b|bYlbTeb^|j;^Q;&p72V3vtbNhrOJAX?le?{;O z4Vu+7z%)B9`)uQ|x$MO&@v0iNiX%A*yeQM}bG=plrGMqQ2ZtnVR4mcNof_Tq?D&~WO}2~Q7g@c3@%AeWopLE z^c)mGLukh!3No~E} zXsT-Tbk_cs!67Ilf-HBKs8pa)hL)?Fm+ zwX>ri`13+GT3&o(tDF2KcS7J41{QkHe@RP4N$a!;?+4O(eMUTUl^9A+Q*wI|{v){T zFXeEIrfPZaO^qeYY<|%6O*HRUS~FxYM04}pZ$NB8O?7w|;L&f!80upWBG*xWJa}qM z{DtQ>w|~oOa?0K9sd-Q&KS$#7nlv~ZEjVq~*Lu^zcjntolZ($t2p+PK3w+g&)wh=i zkUS;9X6qSrj%pH6;ob(h>Gbns+=4FF5vlbCp3HAvV4?3`SxWxl?TTjGHSUbjOV$vj zRO0j>sUBg@@1nVwWh?Z2a;!<+l4rLrrQRS3oJSjPkGn>@iFFT%vUI3BaBE=yPJW}89jzLFV)kMww2wi_|$@O`< z=~A`~HHht}`XnnyXMJBvrwe4M^KeQ=J^9cj;n$)HHQ>K<=rSo&Q~4=_+r?ShNE=`0 z4;<|YD6?6g_mzmvK8eWoiK^~|N4&)Uz4L}r_Chxl4u|OzzQhZWS&8LWL zzCmC{n&)o*63G4ac)Iy_)4oNW{JUv9g!xmb%(&3;E%Wzco(mVsL`lBY*dkoFb}A~W zY?`TWj&qiRgV%f}q~y2qpmqF912h$tPn zNrl|18R@sRoPoz&Wm+Na3x~HTHi>uRjrmHV$1#Po6UO$g&C#y(BW7omuzl)Fi zv+w-FpO7w7Ja$T`5HKnwf~S>C4LFi;LW1QCcy$+^nt0NRW)QiMd{nuZH8@R6u@!DF=*292>X zPqgmb5+flB7Z)M9vH~Wy(c7uM`>VMtmv5J5!=TOXIH0w>p_lgN-PDEve;GPjVjAfO zHA(}ylBzA^rsh$%0A73lKCG2N)J>E@G(Bqf#=7MSp851ToWW=dHPLI_Q)?;sIqk;{ zJgN0`zUfDuNRp%!FJl!r*Rt?1bz|^^#^x2sb`MMQm`n#u?G7E|`_j3@5DJ?Q1p8nb z$Z3&a3%D3|=-enP#4qu-tN`m>f@l&0wWDk|T_ zrtaJA{o-aDv?CgwS1|&ew@j4$sQyoGKdh7=J4Yj8VDF9QrMRpO3F?^=H5X=z0Q4Z~ zTU{T^wslZRY93TM>_tz2B0nApl_pXX`qf&&eyN(=LLcBU80yAgtLTTnU*>_f5A z^j;V}ap*n|f_9Vf|!iQy`Uk1O8KF`jq5I>qrAFM?zBuM&IUWnW| z2gC@3IwB_tg{(6pHVw8a*koZNkpD~1fo+|n-H3+h8a?rBEaE)P$H0X2jFb$LXtk1! zxDPxdx{P^dKMddRFIb?I1&y_#L*eWlHwSr%vDi!0H&(@s9tx0WI@zhDuIfbth%7YC z>z*e60Ny!~^7xoh4mN&^j_-D%6XNquLX-}?wI7SPMBMDGoMyq58kjwLfXc=;!T^vA z!puP5)qQK4HY~xNuZ-A&*@|u%%1n`*plR#5_nVGbaJ<}rVxqteyAJWU6b59!P;e8} z&u#wmu7*kO5OoyZS>4HYpj01X4Ggcoo<8KGF&$de90VXjr8aZ3NiO8AI51ANIsgEH`Hh0IZalH zX5~iNrJ;NwB9HhjaIp@?F=~#oT>Tp)apyWtbJnZr#F8_ohW^)x`G^ACgU{!o)MtyD zeio#J*Up3Z!ruR$LOGX_bQ>w7Q9S7om-viRuu{%7 zmK`neeGK?}0C!kZA3ZSWKd3;ph=GM7b}z(l>`GnFtFG65Fw)V>WaF9#i#hhJ z2QKo54fuLRfq}fp^>ZP;S9hOk`a*d5gSBWRDBmDX3u7E;O!Q+aUbSp(0eTQOxUBYt z(cq7Jz2-JH^?xP_&S64iul}dK5!#b)xCJI!FDxUTfR1G(E(Y6Cdyqjd#kYv%w0~gV zEen;=nn8({A|QM4R;2O15zMdrGXm`s(7x%1YGj-!1AlTl--n_+*T^v)^QPEqD> zV;aZsNp8fZ!d9slG6JJt^DEl)6OM;e1R>V^;Ot%3Ok%O6z#tcoXTjL%uz+Q+oz|WZ z6=rF(2G)rkU?ljyt1dy+m~l;d8Q~n@fnkmngMtbhJJD36p9$w)>?9vJZDXRsW%X=< zXe(6iGB+`%U2qAIsNAlN$vHW`E=f%Yt^Jd%+|dOwzSW&)p(M6zJBF=#yGsY1!>nGt z9=QiAGb2RU=vOB4R)}7K#tl~&65(#&oksk?0obhD4c`Xua7+!s1{_QDk2qldd;-v| zTceQ}H7)8@_&-{03GoRp2rpdq3D0+J*|c9?tnL{!XPBQmctIBKCiVU{`t~#0Yk)@N z=LIqJa&G-88fN~x+pa3de|WF{?-94OOnp-wFW?2=xf~p`%R{ib7@ejX{8PV5l+Rcu z9U!D!QxlclVw-CYB4qUqWfyK%1xalQY(myV3^U);!p9F9-Iy)uTCq^HwIBRU5=rCZ zx?T2gT&Z6NX$hZR*JoIMAewH%=)8vo{p@Owp;GXeS{AYXBafs!0|}0zgVz+u6mz#f zq#k_0g1oNcp@yF)xBc5@I`Ii@Y4duj{%C%Lu z5L_&5>U*GAGM`OIW;A@EvaHh1N72A?r!bK^kBtK}znqgRN+Xbq$Tg;GcQ@C{sjGfO zRN0-8(>8 zGuz^^SkjwSHnbwku!?N7TwuMmRx$U=)I2pok+-IZ;oUQD@3|^uSKP$ixFF(Gu6T-) z%NaVf>6ESGtkREikJw&+EhjwvG0aJ6;>!!UM@8wc^(V9zca{272ZF#>XOJQknW)8n z&^F)X9|6;3Pt1(Q+x`ceV#BE8$#-mG%8cfwDK8H`P)@^q3k(`Zg~R|jM*FI zZK?qM`fw#IaiXqpvpdqMvOf}wLSEw7eN=m|C|tKB9sJ7yfTdVvu}+F}XVDNMH5Jpg zKA^xes^_1{oSW%g9;5*GCQo}t$tG_1(H0g18Ecm}UCA(^h&<6c*)uHiS8%FSM(}ks zy>3U3P{xpS%os)=4>P1!q*O94kO*`DF7|mLv~L8h+ox8haJneglHhtzORn>nPBzGr zACbAQN<3WwI4iL_6rR#}`D?uC(%umUF2i^&hB65YHQ=J6hhi{{0zQm#?g;N7vJC`~ z*Q!uTQb>I&R6e5SwZj3uJC?_}}P$->P=NBK~ zY6vFCIHo0bwoww>p6F=Mi<|W35esr>9>M35oy`4|mbO-FONT$7#3)#3?Wj;mZFAzl zJ9{h_IJ^s(gzJOo-dpQ8B8&c=(pwF!WPrD+kYKk08{F~QQG3z@`8`Xkm)T#8Dn@?$ z9e4N_VDqPZvoGRUh9S)0A=p2F$q=nRu11O^#07M~l>hx}rN-yIlG68QQCPCgMM;N; z%emvwV?c6q=?BrB14dc|v@DTItT1wF>Y>;xN&ka*PtCOfA0nP+$Fsk3EO@-^l-&9+ zI}MC&>hbu!o5ISePqbf=B1D%LU4%sEG2+nM0OIV`tB(AgDRlo|^o3>_Y#4@;CLeTr z;!9~rGtP5sPR`rE?b+^hSPC<3x;7_$O6|Xv!1H{vr28uN6?j^Q-4&b+#K{dM<98W% zxwE+$(gk7qv!zgZGm`;*F7ih5xz%M z3(su)&{=TZ?|j4M<2-2EwzxJ8l?-o}HEfnom!UcI%zjthHkC8fFdx*+ag}g6 zdotd>KoUzlY2D}PeN;^iLeqIqXM4`DO^p&K5DGgtA@+u(%*WdImJt--a_h)#Q1xsM ze;_Jnw9VQ{?tQOOudot5MJx{z>wRX+t9;edw);jK9Nnax1AJ2oKAAj%SCb_tG&nse zRRSVl#slQxi_#M4mwSUu;7w;DtQ$caNhAZD6n%ZsV}dAy0?;-_p9xch=%q%ojQ@0a zx*>nA=fIezvwEmrV?N=}QstkiegE>O4F+XmjCIG?>CriQdD zoF%akuaDmt0~c!CdUQ5B5Zky8Ro$x4FEr6Z8jCIPynwW#H~WHw#J%&TvN6 zpcKFqaHm9SIJefv(a>V$K^8pzhRL!Drb&R8{2kyp3yAWALI*w1A9@~V2ah1c-`sw6 zi;;9Yg(H-aZ9@zo6R$%qEqHL+g*Uf-zhfAyWH%wn_T}SY2ks@sWcO(g1PN37JA?6M zH5Z>TM!LJ;l_OirL(aw`?7A#uM6^`sQhqDmt>Hp?7qs?T0wFbHhuw7ao+@>WxFCk5 zN?cOu1a?JGQ9|8W_F0F5LaUFOiZ5DpVBWdx6Yy0En~Ke+nt9aa$j6_f&RUJ$3~9S+ zliO8Nvd<*nJ0+Uvi^L3R{t02sgB04W+gs*nHJ_uKAMJAE-&1|`Jcb_nznTEG7fok8 zbm%ntO45C<%+voEcsS5HykbwW$_5=~9DbL{0tkuE>ft`NUCvzoOEgze;ECU=izDM7 zi5P@)6yke@w0KSP%e<&1uJIG#WFaRFi9SMW`Q0HM4s));-O0c|N*KF*B&KpRay$<4 zcAmP;vW@4h1niYg{KaI~k}UUL6lImAg&Y&kV@ha+zA%zq)YHLwiOZJ+9SOlM>&}ln zK>KkbDGD6M5o&%{sS~R5e5e^V{z7H*;+bsrzGBmLCiVzzig)^Qh%g@N=#zt3Y}n!v zCQGr(a*300IhY)lfD_Vab>z=O{3>iYWE8JbbX2A#NR+jHhpNxKFxH$skwo&U65^jY zbdU8xScn*8W$h(vm<|xyRl?6D3JbYPz(|{tkl<p46S+VR%1@1 z)Y01?-utc-1A{{646Ysa3iel=zts|0{nF z3@df9d&_V_WTh@ANuEp?S7958#~V$I#dB}$M)6cwkJ(E@jh@XwmaN+A{VDazZ|LM5?UkT214KX^gwI5g0zIBF} z1`*G1f!PZ~0ciAK0qJ2(!-xfmvAT|n?kj~qG?C<)&5MgL^x7!h&hJyZd1cqzr%AA= zPUZInsb1vyAgF+V*YtpI^2_?ivfXZ)cPF%Xv{COl5tK z3!f7*GA6|^XqIM9QFwjuh8Z$xzq6Ra4Zk6ijV7wFkwW$+qrf2~3T-For_F^pF!NiY zf--^%Ny76%ilX9};&1(?Pcj+QoW%=9;|Zjv=vfV*iNzKTK3wPl3aAPnH<~pMp2NDn zwnsijZzWfMp?H1R^V;gBOzAx}@Y}er75J8HTe>O`Y5Kbfj;ZG5d^(d0V-YkS>A$B- z-YWQrzfH5OP>*m&5(~M1A__^19t2rX7URQ|81D>nZ6ZTbDhr{Zf#ZOO*f0w7OmV47 zCn%r|nUG=_fe*$4Qa=rU9!y^fgpCev=_t3Pm6FBB#*rYw>zttyh8C{0%jYi6A)=_Nl{%@_;}oA1yB0JmZWMwPr1h7pf!i zA0C!mx`|T87~`u7pbv+o5gN6z2M_TZ!HXi>SmWgjU8NJv=|{$+E(gnUR|cwbx=Y4j zx{kLqPPYEQ)Jd2rmpiS0B*H@tjZfRnjQ4yjUpb zv4)8DU7Z)WS`MG8QDO)<7YybjGZOU9hr{D^jBD+3vCt6 znGAe(fjtm=L){Pt$tp}E67`_h5k z2B}J`C>x?b*f<>V5OhkGN5k4FMNVlzwk%)rST>vY58_0iSd^`#j5wP(z`sI1pb) zDV7=})b#l0WJT)(%jb!JAnd{cet|J}i%cSkMRp63H&!xnN=hjV4xIqh;o$89K6)cC zq)GPq9TxFYEKsoK*EWA}fJ^7V0cuo)SegXd#&eJg#4*Vh`euuel9`j>iYI!C{$f}o zi!UVF04tgz8<-#IGhP6u@>r(7nOns&e51^e%PR6+ei^&s1Tz6NPs-Rp5(gPYlxCLV zn>7SM@%Vy)B@Ai?2N}qEma7tVKTavo^RxeV`rAgB&}Bx5&`(Hdf8Bw+^cQB;mAiod zY28e!kK3&?BX_>iX%GfL)h4b~6W~Qd-X%B#zme7Uw2Wlq=_Lmhi9WZIqd{0`pW{;m zn9v2n8#dBci!AU$acA{^RmeU)Ih-*ZALJ2eR`b1Pr>`X;5sD{FwNn(-IJFwAHGZ@GDJ?g|9ipDbdRCpK%A8Buwbgf;OWy=O0;hbIDtVIryMf(!hF3}St+iKbdVp9 zRn$=HWCu{2l0o^G$i(aA3YJ=rIT1gi0 zK6;3lGr8t5tH$L?xz9T>gv+g7q!p~n$ytAzIQ?!Q(7DCD%Q=-Gj5hw_*AWCOqBpnn z#s#CUPMJm7;oLbZ<{31S2Zh)wmXVB~_LvxGU(00HzQW!L(CDn1Fs~3?rK8AJsN&Ej z!+&wJ|37SYQCAUU7X%21b!r(DC=ZZ8HyYm1?HfzJUWb;J#@;C~^_TatKsZ3ZnyBV* zyIW4NoQ;$F+XRz}SyV+`s#UiB(}z_PZuL}%5aZkI^K+X%F3vQJF7pJIajXLp2arwt zYD~f?q1Wm1)$^soJefZ^x%xAb{2eryn1U-uW0IH*w}5(33FXHFY&QJIbsV72!m$`7 zbNCdDa+mg-u9st1D-X1t9#VXKe%HQVafi1KlW-UZ#~ZFMd_YSANmS1HTz1?V1!u5f z)}fJQfEp0>$_`ASEP5a-;v@ft=Pgx(qew&2j>wTrj^oXuy7v7B5vG75IZ%GSW~*1u;;;Ir0dP>WZXgMe$6e4HIBCu6a?J#mx$Gsd3lel|lJMbm2` z_-cB z(HWTSw&R*E!BP0-2QMpDgMt}Bomit{`R??=<;EaB*X%MRkCgz2w0yZ6p(dyboR0y| zCRZRvkx;yK#w=0C{ArM{s2D5-WULJS_3B-pSXu01a$z%T)4iSQhocU$QleG5#b+IRY5JAtHl2I+?170YUEz? zN}E&uOD`G|b436T=>G9>F$7mlsU+d?!Zk}rq91#H&!!4Z$+z$Dogel@fb=w zIS7Ze&VUiRq>FD8d-Ut3TSVnrb6>7`i=)9qmZw?>1&yh| zfW)+3vH@J)H`95Kmo>`^oV~9ao{P4XP=t3&p{{l<^`6u}kp6&?<9P|GRktp#qm=9= zytvq&z505rtDM|FyX{|U;qA&~$d}p-*Is5b`)IV``T=Jd|6tuW%NEIeR$P<`z!3u(eI_vm<5YB8Le@$0o6^kg8WtcZ4FHjJjiW|7rL7T&IW- zL4Bn{dlnccH}kO)5i&7e*Q3W=%C|0h-f-iaZb)^CB7u%Jo;XwvWCpk0l8bwx0Wgdc zFj{!MEYc>EC|l-n)+~n*h#)9Ydh=??slK6LL!wM^qbL_3vpJg(cbuZD3=9Cu0|6OH zrojT7QVZ%@Fwg0QB;nkwv*V^?Ge~R6Q!%mk}F*F^*M#=t3#L|Y)Ko%Y~)y=|=5e6C|3h7d{ICr{ibexl+^rtN9JjJB! zklBf;c4Hu!?z&yIY=Og|)BTjqrU>17v2|K(>CZ#STfB@-m)Ubwt zf(ESHKlp}w1x)M@ympB7JjWKe5G~h>iEEmDuwh4Cd$Zrja5hu7jUzHA$=|QrtJiTJ zI*N^-rnTYPLI7Q=Mr!B!s`lz#pbRZw@nINyu1>E2D6Y#}0-L!%SB{UkHFtLWDN0ab z?>ZJDeEz;RS18;!XAL_J5SX6uqhem|)ZVch#$OU87!e^#9%5OOXr~bIsnDFB*3s`A zda3NnU0L0W*@dHW_XgCrrEdfVY32viDGGxLDw|2c2jrPb?ZEz%dz?is%9XT};^Ux& zDLkvrDtBXJL7t-FB*P5}0^K`=jAxb@hZs1myT}SM=?7FNW17u?8igt+JWOUMirVMy zl3JvhAcsb&Nc>&;NK3yW>#%q~2RX=rcrp|e6^DR1%`%S2Z%Pqu!PQEO_p-cv$Qs2D zL>$lQSzy2DTR|4dMMyKgC6iui2hBetk7H4ao^|0wdG0?ZUY^}aE%B)AFN_;AT z+ea%PrCg}v{iwMD5X1{&=9j_Vp+IOu*U>+#U$AT=05ro#el$b(ZkP^eu@&2}TE+p( zE_x{65pOK#Zs~YiId!`dn z3$s){p&mOXfg;lZbYg?uVbv@U8~t2yLb7!C@q>`zqZqaY(+SJst)=WYs9s%>3`^98VMi_@H-0aLU6SC-pzD3!%FU{+eo1JfUV)W|W5?OGDmX+cSJ;@5NK!$-tURPYYf`HFE=~Ze zo+?LnFZ)?}mu|Wfh)o`|t zT+iy|3y+sdz;1;qhjJUVZmYSA5S@I~Q1@$JTd)~z9;sCPDb3SAsPuOa`{%0Wi;47j7GTNSLF^^C8-U_{Kug(RIpEbI= z1oSu9*k82V)`+I$y*kKr-$y=NpBX=2`g(e9GZEppeEbDy9MvS%@`<@yw|G4K;rZMb zr7uROuAi3|Pp z3>%_>b|LRExX$%}p;x`#P2Pe6)GEZEyr*khT;|wvMdJBs0N8(0pmUA?$*{iw1lV6V z-;uh~$iH7`I~%57qw{(pdDI%|&0%9nQcBT*O`lz%ZhM-3BNDJ3O@jI+8hg(k)kH}0 zF?zZcArWuMp85ItQdPD8hpKaI&n(=Qb!>HPCmq|iZQC|Gd1KqQZQHh;j_su5lfBm2 z*SXFwnBV3z#;Cih`ZzIkdbh)R0JW?$<~}z#Q7bBo2GkcV(Eq%G4``wm86;7VML52P zJ5(2~mSfZOpSpX8J!^O5;El4rKW0(OXO*LT?g(9Um#iW9qk~TN4fS~XE>FEz*vjl! z{{fNEmMfqw!{LJ3`I?N1CKNx2w&>6WIsSN&5*!fN`7U+&7AJ5^sa{3!mPApE+r4$FPE+_=`yO44M2E2$=W~495S&_j37T z@zllLOG9>gUP(^sbS`l?XPhS-8&A$hJ!M=JK8j6(?<3Fi#@Fz+lWmf-z6Ha6md4k~ z;<6e4@dUBGOuMnI;v@l}r7&*@J$XqK=l3L=Ayw{cv~}SnJE6TcX(& zCX|3h((B8Ay5zSO0>GYia&a_kvvoQ}jp)ty)>p5-`Pf;7k1}=Hmpkx(Sh9P1(s!2% zm|75Gs%T$>=xkZw;m~snz@w=`wKBQDm*M5PR9CtX%)z znV*s0D^LeCZK3D{LG3b*=H(u$W%|tZI-&4vqkpdJs<lJrCetawvQt=W>regl!K(gEYOmGu}~+(yLS3hPp^o5iF9TOpwQ!{p#3Jh>M5 zlW8pGq|{D_mbA~^ulOn}@lEC~;8U1rY^jW{E7zykE5OATrJO);dNcXb-fr7+;gotMEFXw)&zQXo!D|9+_^QI!IXga9>0=b0H`PT2hn?W# zA&=V9bniLZmO#QTa8(4xP;DF(y0Yqxa9>kS zwoHDo`JT3-5z&lM5==yj_cO@ettqsOdfGol(DPdwYy869AFHlOhv03R+6A6hD6+r# zu*rI#$Wa71zOB2h?6kJGlXnZouU-pt<#0xc^`q|c+3&W%ZjdRV2E}V)g7@&bDV_VM z2#*5&w2l>tZ7*Uc4iX&@HY(PMcMp@jD}~A}8%)=Go*WUTw!DhbQJ>{;W*e?So)Z8x z(@Fbcic?w*5Nb~-WLD+pwz#yzYqb@vA&;0EDHYePUYVP#h&Be=X9=GBi6f!-_<$*E zs9pBiywaynZMCM!Lk zOu!KFE7L4Yl5N9T$GBPQX;_(<{Hu|FMU|#HxKyjZn5%Mi=`EqK{KFqE@ku~a`b;A> z>jO>g92?gx(;W9iF7-0~L!|XgbdtusTk3|{Lf2U4JLKf)VK-=BpeA24j*D^AG<1K) zRM_Gm!NniO2e*(I3U)hZUsZJCc(tXA;mDuV;vhw3I<{N974 z)v`5s8T*#rl&tR&3@ABPH7N_Ls_unykx)R(sg=*hP_Chl7W@atvZ@=PvkvWQzusF9 zj%qpk8S&$<+iMg2aI0~{5bo5`ZZNxA{8j6k) z2`l+I;5Z~RA;_2#Of>|GWXEJ7T;rEVCFMz3es z3VV=&!8~{1lBMI=Z&Ac9 znD^PlOHgs;>)y+~vV6AYHrT^!YuMZK6)WiP4UOpM;Qh;<&Gq5IXLs5_mJ3D6_U(G< zy5kBKBLB;W4(#A#y_7Ssa$v_RqylBFF?n?@0aagAv1 z#`y)S-m7EA9`mJ7(L0m(Z#$8LXgP$~yOGrEj@+CK;|u3j+S>LLp7x8pV}*gt$opv& z+<+K!VQgFKj&W@K&2WabmRkTXt<;lhR(`)P6i*G!U$W z)HOWd-+@(Z>TJWJQ{C1m$&H&}7&hrFCmP=WOl`)IQT}t`)inAXP z(0f%J`AJ-^09BSm!@bq9uPOHXNMjLf%f!=Nn-!4bo)|-^h8KRiUA|e<7?@x2$_vuQ z4ez{{bI9Pp6aGB6BY)qkA(VI+Ud)138eVFcLq63l5Qs*!B&a~V3w z|DNX2u(2QZbU7o^kieHF*`iN;auSQbOx@gdEiY_W%L37-Z4_%vkg)>r+Tt({KP2Z} zq`S2i(LSry%ehd`{A%N#V)Sg5mDOK_1}I+fWf19WE_(grQVzCkVqYg%4}xVTZumu@ zG%gL&5eCQwG8b^7Iqa|6;gTl-#T6uKhgBUa!89AupfN1SItw(pVu~FcI z(F~MUd1K0V0Re$Igc`RhS2;qX0idv9+PfGC9}uCEAgpjTge`hujWIiX2~5rB^PEp_ zwXgg>)++P|icnP4B)%)bv@LHjq7AP{R{7O1HA26`RZ5$iL?MCH*=a@s*-c-UCpRHw zlMNivywqTfc&ZaHXB6+p_FHjEAEQ?nfxO0PJZ;uR!8}6rQ+DV_)^>ZxwskONKpB5Q# z8oc=KSK`fGwB7mipc^Ckx6|e@gLNlnk9jAi@2Q(&gdK+IjN31wec{Ut58&D=B&11t z%y{HPje_S-#Q$#bn|6TziPSowrXiuCQgk<`Z|v;W5bAn>EOSC>ZIG}r=0*zo=RQ)c z1@mOrIFDXHKIHFdhqc<3A}hZyF(7&CBUsfD14EC9gQ`mu zZiLy=*MT>y18ML=`XSd{aJ11(Z=tuPaFy+QZb?{i*%Yw)`)IX6?oD-*Z95Ki+mzKs z^o6&4(zopV0tF`DkI_n`EmiuiU8h;Hy=+A_l^j*+CDMo@{Dj;`YXsm2^OePN zho-=MH&}%%GCn`+YAk$gIZw;Wvp?0>pl@awDgiuv1MGuQgZI5_uIV>nkBg_2)npT5 zm6jT$9>zzLn_T1WQ@q)CWUP#V#X$=eL_Aj|P;Bt7bFrx^F4aDKM<%L>62*mB? zpNX{uJN&o4t)KGVe}s`*NBSejx*c!bZM|~5=-@wJT-|o*sjs$P4DMIFSK_G6((?m+ zIa~(s4t$-@w|j7Mvc33%pDtbV7d0v~pB#Pej_urgdp*3Jm;KXHi0$0+w{IV9NIVT( zJi9yB4{ZUds{p#E{$7Tg)f={#_vP!awqAa>%7bk$?>1cmcJ*zyRo)grK^$RF#T>ZX zU@5)xga0qr!P?)|OcC;GmGc6=edyXuXIkkGyj$?SSxg&x{6_n)j`n z2LvdBxZwvaI6WMA1XZc&Pxndl@2%Smuvg3LVqE(JQS=MH2ucRop)E=9zaTk5mqz*C zUD*xnha4PqH7MxX_>>#V?2*;u@i>`R=(oB+QN!g>2noR(IHS%JmgBz*0nT$XYu=NH+X0|I)j5wLdgaT;-ig<=jyW9yW zl6I01Luqr`327mERk^s?Gnt-!QJJD1`buBP|$kBweJK1qNL8vNC?oY8pMv%B&E~}CjHRu z9;lf8S(hvTc=-sZ2XLX7w92F!#HdG-T0m7Q-5L_o4+$Ut+3YSg#_g5_kh&kx;(uj@ zmHYS)<@SZ+{6(sVrVI*$CPx0dOW6n&XCpsi)h9G%D)#(%v8r3U61i{U?&;?Ec==(| z%6&!GxxX|1c{g4=%kMHczx2RP-wEuM=JiBi1pzY zP}0cem((Ho70>SR(V_r`D>IQ&1uUk`;;~Lv{x}9jF%mTogXOIzEG7e5QMnf~8VjXp zkbrEhYtNkQ6k8yrdA|xanJ1f3g@vjnV?G`nVqg9|K{DY zs1R^su5LbByVLCh@cejtwJa@yYOY^klw#-Mb@bhkl(KYtEENkO zph!i}8DY&#FrY`XJU|KFr-4l#Wt&lE$N(V#U(H8Qxt_|_z1upn8Vv-zfVO5+7~PXP z)*S)tN)(kMDkGf2)&Un&-EL?$69?E*wym+NR7Y)XQ`nN?pE2i+;-E(ALl5>}KtWb( ziABP`*o05`KzvHgL)8uRkcm4OvI)d3uZ@E>&XDa(F%~~Z$r`EHo}Me#>LewIx{W>w;Gyjhv zU|a16EE9n1x}b)`^Qu_=L(@)rewq8YH)5$p7#_VQ8Yle4`$#t4&q(M;5hM}Nq+uv7 zd>uCUbY8d1^Z0bu)7G;Iz(r6$SX{%nV19V%9Yp}2>E4L_d~bjKhA@T4Q7JoJkO^vt zt7kz|V=K~9?J6OfWlekta{66#tfafObO9%61guOr8ltD!Y=zE*wt`(@S89as^%F@V}^WT@y;Z4Tq@zLhR^z+{`yNad8_iMe|_y8Gw31dLJfEg3v1X@{^r6O6=!>EJ4UV>U{0?b35Y2a_+&WE`c5@@c&OcU;qUjaiw0-0Mi2M zHCjR94_l9io55XtG-0bQ{F;9n8z}y_v7x0g^}B~#%I#{vg_sqzA^+7#{4Z~p_jNmW zc+Q`v$EQU08(%LmChfz!c`*L5im~GKg{ciET+zuRCLILf;uFR1j}IQLee8(H5zfR1 z%Ag4+@6DeJ62pHL_YMrB1on$(G)k=h}ct{^~UXRCo_+f7GNecVU`&#rrG5vgekxN~&{lfbvQdH4RSg^L## z%p1#BOY^+nNbfbX)(V$3EPK&i?t2tr?OmGx?K8>9C_cfn7m&sazsdw(2Y+j9Nf8Dg zocB{O-PhL@Y%gqTuj}|mV6$QnS0G|3XYQ?v>Er}HdQPFX?loX|8lw4@)m&}NQz7qK zwhomIHtX#SBKWr&1(tzo|Af}BD$2j!>_B5n0T)0xV769TRrZ$qHIU$BGO*|-#Io%{ z=mpd;4?%qCK)IItT}Sj>JveY9^yp zLM>@R|3WSv7fJ&}9Q{RF&^|C$b1u^++oy@?ZloI<;q672rR|u4eqczz0#|G|6=7 z;4&O6sis@IkwOl$EW>O308s*v?PHvjn9p5xcCRH=(!~Ja4YNAmXejSZ>e~QTg%}YF zSuzR-a|ebsWOPRbJT7&~c$jG%I1D5mDJyqfE<*D^!-?m{IYReFpikkv9?^F=Vesh& z=z}EC1{CS~{$N#VPWV3&!LSIDHQ$ijdEFKg3l(cdN0>Yk;VS5gi`mI(xvo(Jx<)Gv zqz7@VDzpHzO(N1v0S9QTc$FzL9pMy-a6 zs;^Qww8pC@>QYt-M01=7-`5z19Et6F*qDK0O|C} z`u68hmQ3vJWJ-zO1zlqu>)GDo!xLL%b6-hHP*?!UbX)(J@0WbndFUj5aWW^5%uKU^ zf-;m`h2Y9vkLEbKI=Bd1_v}NR!pqGVfbigYK)#sG7F9(=yPanB5lRj zpjLLfN`NBj2It*rVl;J1TGBiNkgpWVmJhmACL%XxtoGDc0N3}d?F-yB^$&(3q~ zYNG&N=7h8OmsSw#6xKOA*LH`1Y_`yCE~(CZnDskEE-U@Jwe@}zVYar4CcZ@H^NQ=P zV}0p!4@ssXIKCANgPDJ?BlyQA=niyuKRbA-Zw?+)c7CFakLz9|Wt^y9gw5FdrZgG5 zxLf|q5?jA#&p^pU@CZ&!DtO8)}*vxh(1A`d&WzniR%QqU0Fd$*k#UZ7{) zl&y!WuD!fV*V20WsafSj3?X_%C-yuXwfshFU5BHmigzE|8`GcMVNSRHwQ2cxj%d<0 z;m!C$-!{qg>foi@8o!0>PJ%#Q0HQVD3k=)!-nw?9(}m_k^zPYfWlngKwf)X&ag_>? zW)}Ra-t1Y|`TFR|F{cdBB;c5{NnIGk@532c^+xllP8YXd4ch#~Xa)S|HNO1mR`~Bt zgqY+_#L&&HM&J{g=Pm>Z##Z1L8MGsD9Mbnp5mb`UCCki;N?F9AhMb$5`EeHW!S=CJ zS4Ng4(!6i#b4bQRQjaWJP92&U|q>vNUtI=G$0tdDe~hTWPs)`gVM~oP70iz8VIR=c_%bJh>@3I6m1noGhP}E9 zJ{{nOSK7SaQD^1xM~CC#BmI6U`Lb5t-a8ktrvKYwAs;IK9&~$P!kMYF15A0Sr!+Py zL4-&Zyg(GZG`b<@;EJ$NU{zJTyrz}%GHHOAxI=jhQoyMxCQv-po~6a0DR&ys7ey#a zBi`w{kuOh^ps2Hs6b_KF2S_oInhVKtLKnjCnUY2(Z!aO^L}JLK$4MVbClg0NYf-F^ zGI`!7tpRU2mjE+?robadGND+u431w_zb&X*c{#EO!b-jf^e3nFjmxO$cIylfzxy*I zDl+6BE+F?5=uE5+K#vI);&M=e^zs6ONueUAF;{3|QC&GYkpRN;48j-0D(wJb>lUc( z5uL{d!h-EcyJKK5H*gStNh`yF?D)vK`-2SnYDy;jKs|)V)7GF8g^I{iZH5jj zxE&*@rLhGL835oA3|#_)7Lgkr6TDg2QN3&HL@1%iieaVx4v&nWLGP;abqe=LQ`S$il`z4`OrTX+kOf$tiDuK7}Wkr#XA+k=I+vzEG8U?wZPW--XI8c zIDG>k5LZo#^B}}uJr>EdOv87_W~W<)&wpvV&{DU={sNwDi=u}I@oLU*dLejubMHdamJSlia7VZ>9I?}C$pT0!9AC$o7MjZ(*u1TQka%RU& zH#2vyv^46k|GhXPX%r2D?f%5DUst7a|;(6F@$@I`Geqi8Ur1(n1?c z{Fu9BGj!p!QAe~ySGWg7v>Lg)C8vwJ=MRe#R*-W0rAa3_4%X{gY^q|@m@2zPd$JY* zVFjq5uAGpX-e<)udQdmX3+7`{2E#(w7g6&YlsS6)`A!fWQ+r~7Jh>x*Y<$B&_YI4E z)<)MEZUTC9LZGtad5kryvnv6x!jwcMxZgoSWRwbIFrcwu3Kr$_;K{@%9CEC9j!Z71 z`(VQ@Zi7djWq}WT;Q4s1NAXue74e`MGWHS)28Lb6 zCsBFK9))B$wi<~pAmxgT%V|Cd6ghV+YB~AS@nDcpdPpcu^Knf)O3LO{2CP(oq-H_( z1&chID*r4_VIZ_eyd-8$V4=F$fh&^00b=#E(4w$BD&l*26m4MFGezZ)ORPWfr-D#9 z153_o1`9bmMMfaoitTd@rvbkPArYYJ`bNg{v0k2701T? z*rs=OpL-FF<-|m0-^`)xyx`n_d_A<8s{XOEktgJxmTyj)r0&zg4I3?{KAB0b>#3fq zU)e9F%D+E#<8^{rcWIJcRaZnabZa6ysQE%y^#%MF{z$HvYF8z6=>eB-qE6!Xg2z#P zYc&ycm?XBo`pje4wo2F;Jnz?(CH}3~u=7s)PWxlqLQp|<7C~&;@@cjSa3Alm9DTl> z{!{1queqhHh2CCwz?B2PYuD$A?_0@+T~aYQ)-;pcCv#Od`DOU0jb_g~zh)6`QvImm zyQ>D0%IK~Gm+`REKL&&R@N`u;GN2i$o=&V9q^j!I0%zy@6+<8d$ZJ-|oq(He2Cw;X zo_VS=&%5u75++f$3E9B)Ph1Gx=>1ql=IAhifyUwt<7-dG#;>@Xr#U~YdR7-RDE5TL zIXzFxjz$ZG$F46W7XsTNe)*6^aXOLTpc#tcCk*=8s{2dh34e76dUV9PB`K(j1L@qA z1j@y`P(31dkh_+ZyzZ5tja^V#IXXriv1hneW!Ox_|J9&F1mHpn0m68)I80^Q-_Wsc z7lp@fcX8!s7~~@J%Sd*1xtDq@<@wbZO|4g5z4TXdY6Zr_{kvpihc)6A-+ z-f~0VxXniG5ToCHi%8IG)%f>y>0z@0_@NI)V3QwGB+8FFfa4Qazf4YuJV3c((z zL(0nG6>F*w)a|~=sMk%-oP=YxDqBLWOdg9^4NnE;2F3ymK7*gTP=KIC!>bCR6#FPX zd@CI=<=u6sB<_2F(P2fI3^%$DPR6>?uS}2h_qGJiwzOgQTq(9&XWG?0`Gr6%Pw&k+ z*h0E&v#|3zx!ds$6gGsEBCu|cxPEQ&cDQB>yJ23wZP<2|izauw>&Dcyoc3T5>=3>A zf%V=(b#DXOKN07zT%m9Hz1EU8X$6EZH|zBfO_g8D%D)>NZ}ztzAa_kw^X;m9Gyp$= z6O@8NeAK!7rN5fl?QG&JCpsG@=jPTsYA^Tb8au}E8-Kz85NIk<`+-JGH~LmDs{ZZQ zl!IEl7Rjn5xpE_vMt2XPN?WFp`d;>CGMx*e^u_?DZx=GJd#C%q_-I`vZeD~bbA)bX zI@oHib)BSG1XgRNV0a;~&XAkvr`dZL5>bqSGwh7S<`!EL1tCx-BVO2=G1Z+w}*f(iP0_xsDq})S&BX-)kljC&Zm_gWl|?->{?{q ztvdk%CU+ojJlugFAEw}TRCV5q44KK9tV#<;=U|G(te1fLjjzWe_CESM1JZ#9NoNCK z_zj!~1@xBuMGE^aO-O`>p@lRRwc=E7O_-q!5f?%!19y-?2S(`mLcVQH!sPl8f|53Y?u!E01MluY$yUpR$$}f65mlS zD&UYMA2WzVq|dnPcE%&yeUHK|Tih9d)JvYmfJr~$H&jfec?dx|%8}ZKDuR5PB^?2z zOjnkB3LPIs>%@rVT;`Nd$qteR@wblCThnu%k&ZDni<)e$Y4k_1{2%UEbJA{!0d@NU zO$`Z6g2nZc=W;+qh>(=cbP$3lXe+9;IYUwiUqSCXhBvsyUq?Qxgdx(dWEmiH-Hu4G z>wW2Qc2Ei_OBV51{wktLmtnT=^Q^@3_~~_2yjGf|IHMti6~l2N$@6G&)GnB+*MZm$ zS2t@hg&FQ4FhCmd4;P<(j1Lxh0yHHKX%tg<%z{#@9-x$}+gzIhD=$7sGSz(Lds%z~ z-1;(?XH2bQ;qMFq+vRdoQ%!2hC zA-u^m^J{R1UyI56wk(RFEh6^~!-91X&gRn%Ln32r1OvPP?#?iHmIhHT$NiW%CD6Xs|PQt&2Rx`^$imQas*=jYMdNqHOuYHF8;s=Sh|# z6oYx@{B??8QgM`&IpX7T~9wtH@>Zc-hP(YTG? z(YrC`h5ZVA42{T%rSjO4&+PFV{QjaGXzw#SIJ*+zZ=<%7 zwltiJ-o^gT`mgLEd@s=FIqv5zHt(`%-sdl8PQ)*c8bA+S9G14!;dv#)1>dA#J#w=K zdn;E!)d9LzS#RYKLpWt5r)=M1-lxYI+iErN>G7?>Vb-ZOJi_}AnHPyA@t3gZS9rp7 z^|*smom@9h2m$qc->5^|%GrSGv6iw!+L#aZs=5@f@jG=fmL8R^XXXztggEUK%KnCa z1B2vM3TT{sX?9m2$@BfMd*Em8M`8RQ?4UsOR;2vIuF{3y|Mw1(P`<39sH6}b2=v{} zREv%hD)+RTG17UYdvk)h8{LTq&*Gmhi;Y{JE)Kn!(Zy%c)|i=^k;x-VX8R5E%^rZ= z#(c|-sAA#6ozx-;lLDtz4&8GAXK z*PWJ&dPvvRvYF3MD~}yETGu4D)=p^bn)?_=1eU=c*YW#|58nLX&q(P`==#RkL;y0F z07+zKA##}a`r#_eSQr{&8C)PN3c}Jj$wEO@tq5pJOQGnm9O%b5s&vu#AC?6ih@(r4 zh5c7T1cONH585Zl8#NFvoJZ%6P^$<$6o?s&oH0OH?o5(C#le6bK>;qe1Zj@NMU^en zO$bjEc-HVD5lD_)NNT`Q^uj_K2B`WqUMQ>qyb$EiMLs`<=c?M5TF^-9RS{SRt=@+b z800sBi^|Jh55j58z^bZd`0}f=HKycJV04Y1DKnaHl2uiz(5WdlGAJ3UchGOv(5YVPFLlX z`}fFh2wx9i^{SmemIC#)z46<_{ z3PoyVBTFR5QOz-1OZTi`8=DDuoVR+X_M!9SllJ*x`*_|Uv&>RN(0JjBKbT>-hZV^L zVSgb1OVn=0Y>ENkes*k3svn}Z8!KUOl10-;=zXEJ0BWh3o#F3T4ZVb4!MC2(0eNS& zctcY|oX@RQH*@IUxKN%fFSpj3=l4w`?Iivl-}YTK*Vm5MkC%5uQBLlc&-eENIQAlJ zVG&WYd?b`??G9SA$a971-D|edt3SWIgdgv2>k$wcX&pt#;f**4j8iB*CWQm(o$)^E{u??t*5@rKu=o+h zYAdF{us}E?l$V!LSC1szxg3*Jg240v*hz&DE~oCdoNDW5D_*HCFd=|m` z4zPY>qa9rjkY_JZ6V>KZXu1^XOGR2HO43?I3bTpAxEE9x85pqR;4yxhL^Hgm^thrW zsRcrywK!BI5keg1;BeezWhK(Uc&JzCjiWdQF%-anM9%o?>~A0gNu;3Vsm?i6tjiJ} zXH6pkdYjeOQy7pbp7O+B;c|+CnTwBaX8{4(<;q}6O3H{`MPjV<>!7F7SZ}`hmiALi z!0R{)5%|E`npLkEPj9lYm3T) zsmE7@w)!);jU8u5;23Q}{DBn79y+{*iD381p935K-lopbrp`QdM;sU(;I1~CM|x~m zl0#Ng?*sD8$r28Y93~H!^(z|h-(ob(8XuL&;+l>~1ELzT6Q~d5SJ2MT^Ct9t{*Zbo zm<@iJ;6D_v!K?c4qIrxFOc29!fb-Ix{HbhBsJjkGfn{9iFBuMo$6ka{oz~u%%qy-w z!Zh#1KS9w|TfP5`<Id3;gg&U|FrGt4hicWQ=c5uyJTAICP**~>Ulz)0i?7r=8u_^e%}KdC4xHgD zwHdr1HtLe;{hiw{Znd(4vbMvTO_yi8w|88c zW+`91vg^a))E7=FG_OIkxtzT9%BeGB%6zrGMP>sKu)?$7^)a9LElZxeBSKE!8ww$J z=G1qVC&NX-ZD1;6VER=hV9ycJ_XH8x)z!BBM=Hep;r=Q=&&-ST{KELBPhe%z4eDr^ zm?wA;%kKVO%-Eg|W8>Yq{F}(aKOq|Ibqr|>sVT`DMa94+a2bM-m|X#g^0j&uAr78G zO7R3R*_9Q9sp=nwPg@6}PzA-2+*B68Vr4{4paz3E{jbqbylT#x{3v}bcJ>r%Jm0-2 zN>PZ5uMkqjZv=-0g66Cq%atf7>@s;CJz2GoIbbS2t8u_I4bKQbbb5a2`T=TCRNyLj z$%1o*I_?!OsiBvEbAd}Gzad%d$^Es`xPk=WoSUGqUOM&8h8f2VXYrI?kh&J?s^H&_J-3v92^@7T<<{wc_&7UuA-EE; z??5Rc5%wdppx+tbb|8A#sg1q@f`|tEOT5LgG&VYr00u$<(~`yY9dhduMt%P+`N105 zD&}S&h?*mxRN(V(X!4&Bk(XMoARZMzd!x;|NpHp3w(#=I3c-=2M(+9WhvUx3lVnke zaw7ecEQ$rRsK76~O)IyJ$E+99Y)o9&MQ?YCQ#y3gWPpcDz8HC-wyLdb@AW3zCvh!7 zfQT(`raU|U`w@KxDV~Iqi6H(yd6e}3ssMa2dLvZ$+jw@(H%FItW%6#& zMd=?BW^~>OBgS%QW~G_tY-MKU=CDf%!DMm|f3K}C<~HAoEHh=!IYHx-QF!&;uq2IA}ye#lAJFQ|w`rL@n1hHP`&X zte(rnC+hyw*xP69T3xXlD;lAuwCo!0>A0Za&1vlPEWa#auIHJJ2~TqAf`gF?p>;*#^PB9AX+-AtOxv~}o8z3aUI+|3Gq7i#Egg+(Z3z>j zJI90bRmLripXF7iCv_W`N@GfTvlbadc=}j{n4?2=>EDOcj#TxVn}Z}3vq=rW*kH<3 z^6d~>5Bw0+k|XcjF_nQLyRjX}d3N8QrRZj4(2Vkp5vWHtx zj~Mtfx42uoz|1KEfm8(LWex0eEd9SrX%+)g&x^W-$}}Yr0H1$UYM_2J$E&=ef|w># zl&J0~-$nJQcQy0TwNH$LW$GGQ?M}j&b*>Y1)^hHsKZg$gFi!Am1A@{D+ zHEAvlVS7Vt{A}(YYyfd(lk}`zM#pXrT4f~^+A=D9;iHV$IL0WCeHbq}=sg8z`xzbi z#$i@vv%~Hiprf;GI+Kc?pth7ID*JJwI8J-SI4>fhRai)sjreKb`-8D|qXRnqzQ`rx zw$5#^QeAXBqjCJv4h7hh4g7lsZ@32tDrF0g&c2-S!42OptC zzxrPy0L6*sV0Y45K`az-q98UISlh(kPgxdsfAhuCJp8l_c8ImwBTvPr#=}-N0dg;H z_M$xv!!}=NF?pnrzsPQUWqd#Z@a=R9&zMzlg4w~{8Xne$^xYQrz)7VIp@ePi+gMqj zv7s_l2D6UM0s-t{p{vgYn>K!iJAU%_5IP$;03F++IzEgx;&^^Gj!rp2x^bQCpx@zEulrIq_7^`MvBfw-&bJ3bY(rvR-qL)5u2fkyBPCAP>H(JOf~*VcG;^Uz1J*tDISth3{H z`mlAYr&^V}iSlYI?U|L5`cVvvkLWeKr)Y<)+%=7mWJHS2i|BfM0WszHeTnqd@)y44 zyN$3C_4ZnRK?jj#b3<(P19@$O_-oo9Jxahl(1Cw@ z*3&(2rpHo2m4PV$$#DAPd_4*9wN>gn+coOZ+U&O1E8KsQuB3Kdahx6zU(3hu8$A9G zRp;24X|!eA*tTukwo|d~itW5n#kTEKY*uXBwr$*;)BWN0e|WI>T62y;r=4JwRYB{U zYcHjS+H!?s;Oo&uVh2RKT)BICoZCgofa@KDmD%%NQ$LHk7}J_da8BdjjK?pktHfUF zchL}H>b?CwsHc;gAzrtQL<6t6lQauXdi7o>3cz>d1Z16kO*%ZBws5Zq%E{u-zn>r3 z@u`;eu#PuPS2E;gp&%xmYa;z@fVwt7Ub~-vRztpef^rISU}M3%b{}=vU0)89xqxiB zx9U;%!p32h%3qZ$9uil>)UKDmNcYvmi`L7fT0+}>uh)GmGWpx|3p%nV-4S5*8M@Xx z0etdPQV<@#r@w09YA6b+_rBmF@=sbqkw__-p=KR^T}A3)G5HPuL!BH+q6fIsjQMwy zj0)u)*O~1AXf(r^*auLy-%Or!Ie29*esvUjo;k>p>MS&-NH7|PdGlB7$(Bl(xl%93 z^6SBSGk$Sh_vDMW3Os5CzL0!ehiL<&1LSmQLYwUQ+G?)>YaF3fHnRP&7Ofk0tgcRC z4Li4V3H=D3hAe1-mz@X8&Z-S41xd9$b2W_rB;4 z=f>?)1Adrt5llWL$yByK%RWJG75RGy5qGXBKA;F_qo?)60tdFqsf;}+Mk(ZdfK*Kq z-BlFV0hB|$Mx_XG#Pp(So4Jm9c-7de6gz24#s@DgcvFL;6|;3#0uG9BqW-8fZB?ae7TYw1Y0r-S$_hJ zOgw2H7K52cgo8M?$^r<5ctE%dVC3f&o^U__tpSJEeGAM3IYP}9zdsl^AJvd118tJQ zmPigJivozGjIrd$uY%wQLi(_hPm-yr!;FI832XUAht1`QJ65nW!7J zp((B~@K8a6(JVTcBy|TeLYaX{1;toHfXeN|-p1kiQ%^D#l}McTj%Ke7 zxQhI7W+Zmtgd66d7sppU&qzqYGJ>S~E{~g?&48!IdhIOJ2O1gWh+@!1!Vw58>MW%@ zceUbi;uNVQfojtf`c@&vsPfD6vJmH4@mVT#9MSyQbPEkAUPI^n;bhCvGe{DYHA(-B zV$N$k1>9?>riIZ&<+nNj{xPLE7dsB7?L>V4cMHQY{y-|7J@jmvh? zj4g@$TS0OA>6#TGR-l%G_B%j9mRNq>Al30L|I(Y#1-sP350HohT%=w!s%4af*SCj? zZPk-7BSHlvi4iLVkrp#6!yAD6YR@;}==M zQt=9CG4sUaKZq1>)W*bw>ji?B33@qRXy|zL!k)x@HR+DP%_`6~ee^-W#G~TXVc1Jb zoxZdByllS_Q+FcL03V|fDGE7%Zx)YkvPs_55FOwP0vrxH={>J*TD?nq;AvG`H>0{8 zI7jwb3Mbs_a$syXE2mv)J-#xxWnikc@kAY5?InsY+Zd+m|2Rq^Y#eiZtf~Y&vz|n( z>EU|Cu~q_0`$A?xn#x(T?rf@c%GW^_1ez7AwgVtcEa!h{8!0TvdBx5t;D0EPv|~5h zh=k4J0dP{a5y?Zy+QmS$!q!J+VdA00yxsB%Yh3iY^BG#faK^VS@0u%Ek_u8nz;{rp zcsliBav^$>b}relHcA1`bft6}KgJJ52G6l*_sq|PC65G-3nkT%s9lq!jqVw^1ZVhq z*&?Gy|da?W>)TO?YLmBcpt4XwICJ-sO zl#v8kG3R$9gs=qg!;==Y>dq0fei2kJiH7GP_nh5z%wrZ1cSh+P54hvy9{F|HW!Y%k zO?e5`?12_pG!d9(EZ^tCOv#F~Xnv)j~`0$H|GKs&!eVGzvGIm z))SN`F5GKxCsTx_Pgejp(E=CB6*^cOem-%h=FsQjHNVQP1Hs#Yl3D{At{!&0V#lS< zV_(2ywa)|GUjrt4KRYE_$)eUQ8;FJ~Ko)1>)&^((6*DJ7Z0Gak?6L87w#A&4`#jg@ z!%B6RZ-a;V0v}tND+TGN^V{To81csDfgC7@*ch88rl%vn!DM7PB(vOeqpTMgp$44W zPVbJu$L%K=rUk+CEr`edukbKty?QcIhXr%I!SnXMPW0JTKbYI{pRV7dv=nB?j;(%Ka>PLI zbLv3P_pk3%PaR+i059aFWtcnxjIU|RVU+Ug&GE`Lc5%clMZUJJR#eq*aj10tV4J{d+OH*$II8x=56(LeWAXv<(HS zCTcoRi{*zupvwvvRJ&P%l{>niVjzRuSU=K;w$BpZhh&N?esw7<)9>5z@bPvogg zY7Gh-g)*tgnroK{Haw5AQUodY7rLru6&@)>@IVLoM<0@9X9Cj@@bF)El3a`l{fr5H z3%q=3qiKRePP3%^Q}`$XsX)!REV5t;V9L9InF5?jfJ*|%g9T?GOkotopR*xi7RpH( z9<~KjxG?29A)$oP{lwyE_Wfe`kf?Z8j1gc(eQ-s*U}hP5Ch&ugIi}Zf> zm3oWSfLwuzHwN6BK}0?GRQ?6|#HP)-%co1k9^f%3_P(#fx4Y^!`-X~g!KDs9TsO0I z&ck1u2lNGpJ6pE;J1-^ri1zmDQXW-PDl0fqb};bKgy>&SEBQ*=ZM4_#XaR_|?VDGj z+%4}{t-K^57f+xEa=3T7D5J(-kfaro;m3AY00c$7);3-~zOJ^&w1X3*l)qpJzCSv)(A-Wy^usN}kSLxiN&5I+);iC%mlL z{&dq!)-IfuNUDB>vXiPrS0t~&C={tYlOXIgE~Dl98ZWf_Kt)#sQfKv8zdE8EDd2xj zfL?G}8pAJK?}zEy5yJoe=ohCaVSY+%Dq!#U)nGDw)Klht8f|qwSkKj$Z1JSM;2_{if2HX&Y{^Y?7albN?gL`AgZ>T6w zl;3PzSM{mdi&9W^VCA^o`8tQyCwnbq1@`}TKem8`?V(eR41P$L94P<6T+#?-fU74M zEOj(D&=M`nUj}m6<6v-%Czk6NUU&H;NE=i!RuVrz;>O`GFSf}9# zr)G|~b-!*l7ytSBh_Ylxo~&~F#hEb_wcW5-M_OSqU_=@+!zJbX`5ZMxVQ`ENjVuWq zTm3U!B+0RzgZvB_E>Ej3h;o0pX>4`Ofau!NDudYJl~t|q%3 zJtz0scnX})0}!aMH8&@-SzvBONtcwXt&xWR^eJoiC+`5txSUe0| zMH%0kq@fasGU|)-CUFY)F>rC{f%F*tsFQsk-y;F(zT#fJ(leD12zAdb{PH|tDTxf4 zrY3=W98})FAs^3DtR_{K3hdis1^8&xQ|a5=<9&V#Q0*WR0k(wXru{+p9uI7_S#G@r ztBkS1GT%a~3b_!;4GgMB4*h&EbQX}5^wL*&cwHu`)f_Ba5JHE3mj&5u433$`V&JZCXHRc_h7k~% z(Q6{%G(%+=)TrOYjDV{Inr%6hQZ6I+>wtKRhnRy?*lhdx3Znqovu)|K3K*5nB^c z5qv4twW$I9q@7UJgVI};!qC7b%LWL7n9ki*kvSW}3&p#}{c##jx3aqyIc8R*y7pVfXGmXVd_O)hplrVg z1lPjcJZLaNbQ~{pFW#%t7jcTG`H^-L26WgbBmLf0R;PMd2FKxScb>W?djcpdHIR!h zSC~S=1p`U0EJMR~o0{Q~(IB2%fVRP%kM%?fRLO!#=>Q?>^{m(yD!pFap-PYgchfe? zq%mjCsP=4>Mk@Mie_ z3&T#lDC?sLeai5k!#gK09t>JLNUuy?io-dS=UL;B2)Q$z zYqq4|9eDyKc)hZh1bJETjkTHfL!%{y?D7;pJSU5|n?dgq0Xi2k&J=QuyHNZ|{>$@Ba;P|^z_{od% z>cM>FZvBrpwMQ$@NO>jp+b)`U)HKVx;aMJ_VU-wa>o(trGaP{@lF&V^bER9g7GQC+ z%uri*#=Up_Ek3P3X0HPPI~BStIy^bth}fk%^5@-KUb;3h}K^*yt>bzvUV9X_iF;ygjb2Ox&ZeE|lrlm+c~qYeaU zvy5Vdo{r!&VM9F1?;h^LRG46v%8jl4ZinnoQVDWma+8f;aUleAw-F=cnbog*(LV(r zXL|A>lwgK#p`&dDj_QCOj@5Hw$Oo09nDH&BOAXVBZ$#``X3syS2o1fG=dc+;u1DO74NFY$Y>ehq4jA$sGW&l|JB| zI*`eyJHo;JWNE9Jky#k&cG_v^zIXzPYpLhDJ}Q4bO^Rt{I*#tQ0Yajx!df1j@({3} z(Ns!|#!_3;S)(uLmt8RK?b}rpcC*=;ROm>5Zae#`c<&zh1j3jJPC=j#V zJpH9LO6;0iEIoI`j_3SE-b=hT#o#+0yi+J_p*f zAc4vpiNne>^vCR2R&zv(g0+%Cy-^sLJ72#l*J^25#Rt;qnt2mV2Z$87Id z32AP3uhCQzi81d5wa@-M)AbCqNq+s=--AYv6qO6x<4=#C(EkLl(fnA!?P@mc+JbT_ zo91$4sW)+O-KBRfqfj(EK&B?f%y9aVO$!Z24UC)xoh3-n*w&Chg)I37--j^r*3(i! zROp@Rq==zvB##{ya9;sh{sWLcJ>fuvc!qk{V8f+1ynsKQN-$J+wzJtte^Lk(EYi0O z!*)j;k~2}gG(=iBc>;D{N7q1F=T_HVy-+2;Cgz2O9G$V}T zPTdv$>3kAIGlXpV**gg2dH0SL#CBydiZeufOp8vk%d4DnA|&W!Jf7JdAc<1b*siE! zpK;MrX82+~N^Ao%RaiBXIXz=vr}SC(JT+iR~M9C=Kg9<&>FAD8sAwBLe z{&p0)U_UUWUsG$b^yRPB7D=N2^)e(Wp zwOn%Z#A3%y6NX@eW#syr72Vf6iW9$J#2aw>&zTwM`t$s#-(RT)%Ot3o8^sxhd;eI|#bYTDso;i%0Xy)YMvOwUxRE8SOlJ}AR9@iMZAsWh zWI3WCfVeq%2+7%+W!BfFK?#h)lg+scM)K>mR!DJZqd=|>v)}^f`I4-HG629j;9?BD z-h{Zfj2xOk1q3?W)5+;26#fPBt9+M`XCLqY6q5q$dwWW}^3T?P1{b0fBER|h zfa!wMkP9Z3J06B-oLgQyeWN-}og9?LC{RsNJpI7G7dBNNDzIKlf=GecgbXxeKOH%u zkVr;aEFuy^^_EW6YKvWH7NAHQ(wfi-8?h)}NGrUFmP;Yc`|;R;&Y`Z5yGs2jAFp1e z7U*w2_b5pN!%pwN*DUoa@(C1)CQHPn5TGbr;a8tSvA|PP55_XJ=p%fr-oEZy8LL(e z0*L_zW)u!Xjs6RjPNU*O4GS8xH#2NyWfu!ulTVs&R697t9vYsU4WKdUz674%=$M-t zLm{0&D>;He0k0$OZ5Zx`51);eh#K+GgL8WI_-My-qitj}fx|m$H_o2D)U0TLH>$5X zm&TAWS+_}cvGI5jjLUCW*;1fdBmO{4eydB#p`ZMVncW*UT&qC6+NBgJ@sce<7KCd& z&u)7uQsWWy?-*s#0-(IMhkPo0A-3}G-}$THo-XxvZdx;JGcfTaYC=AF=#*rddx!iO zq85!M$;}k5L$fM7;S;)K5QfQwZ@rmdV_|iJ^LnNAA|l#Gdjgd=8!`P-udcze+j!Zm z-en7L+~mr{p2maM@dDSY&;U+^mZ=BxVzvJID&k>hWpp8yc0dt1h?X&hOM)ty@&!v0 zO&?CpFFF_9YcHB+IfsZ0$YnZHs})>TT`o$Vd)?){y}J?B5Bz06kMrlQ*}Ut=j3MG@ zXKwFhr`y?L_bPYVNqxu3mrY?~T;8?Uacq=(Gvm~-m1^2dUnedhh$)Sl?2xcqr+@QM zbsxSn7bxd1W`J^pbL&@rbx4cpPtLk}%(Zp%PX5_1E?Ut@tvp^a8`85Jn*JZdj-Ea@ z9$Bx^JLNNkhwa<{3`j0CxBqjJ^1J@=X$W-~)!7V7UlxVe0-x6c>lkR~d9zxE*K^4h z2`Sgj+kU@aQBk{TDKdP zj-B&z*kT$2@51<~b#iGux<4rm7}O(Wt^vfG(jVWmJe08&+F@X%*^JB#9fxj&@Q`KT zIbfDmgn@b#O|~YmP_fzKE~8v{YH;i-xU2KC=m&Cz2Am%;FFceO46UkF2Q;yx{t&k& z*r9;c%q;v+t+c3#xH~~{qa$(MHX1gHMctE~Woa&@Mv%x($~=PPBp=mIEP-Nu;btMV z)qv@#Kr=a#Yj2RK`#XBwSnG$nHzI=!4%mT)$IwO^X;&jhH*gYm24Zaguwx?P{$~zL zt%3Os20M82TmM&+%z-u-=E*j|lqqaRy^=WuKSWEI-ak09p2ZY6M171Q&r5%X9CUT9 z$DPSmSU!tc-c*L|yJzIZRH97kR|1gW4HnEI(#a9sqA4+Mp*8xXpK+Ns3u?ml0U$$o zBF4RWL2?;SZt{d36pq_aj93>MWB~r6c!(e`IUc^)9ifL}5GGH=5}2mYpqONt?KrM2 zjFf^&qU_K1T7Qo$905)mXcCyJDOOkP0vQUDzkNrY72dyOUTZ82gLS#7IHde+;x7%PO@ zy(e?>;<|X4RjuJBahG!>8M;@95rj1LFbKNeK{tgFAqRy_c%h^PWtv+ z^*<&8b>|b50K0}**b(gzS2A?@QJ_+5hz*Rui8N&cxKQkMGL|JHEx18*E&xpFuH}n4 zs(Hx>@nFY5_v*$HG0tjKa`7&BeI_U`K#@5fm0BO`rD=(vrDw@OYYBTG1ccl&XB)^H z$xXl31guKFWL*{1DH$xyL6j|+nam!0{TTY?2KHr}3*@_U7AR81(2W@QI~aS+8UIRh znuoe)3S9JwGS1bIccF;J0sv*3jVbR#g|v{C)wFFJfC52_y`vI}e*RpoY$~IMl5!8P z0nEeTf`AVB2Fw=6+vJ+8x&C`sKbsr^j|ph(c;PJZh(BFHlJdB4^udP-LZztEJJHT_ zX2(NG#;nt}hV)GNc!tsv>C@Dcvu1ul(l#Dqmg=H&;i1fi`p20AP|@<~Q%vBT$6IGAm88+}cs|`4r|HJ>6w{G<$UP6|yOs|IhPy zGLKV^6*5R@(pftxyYu=V!ec5hr=Nq}i5CI3AVgwQ2wjk8>hWesP$%rRP}kRaY%XH& z2M7CAr!3!r6YR}exMxiN-SI(b^zi5et-J9^3+bKRF>`)43V@Zs0<5I|GRf|nI!#Nq z={E#{tBHjy9&E4ZG-u5lK5_bbyycZ0KIG1RI~U|Ee-bx>pjJP#)0U94AV_X@tVP4^ z_^4&n;{0O9J9n#MMT5x&EQ5M-raiG=C(`qTI3kqew9P?JMKzx6J6sA*$;eXan^}o( zTp&)u4Vzg6G(a@X_qInPzBGJ7rx1(YCzQ}@8g?7OAJtux&tj)TTti6Gq;2EYnFF5CPEX8rn11PUp_GsAjarZL*KqBHZKAfA#SyrsC zY{@nGc;W7q^UjXBKjXgQ68O{ETSTVN^!yNLhX1)|KcH|L0DH3!)_NR$2n>7$uz00; zuRhN&{&)TLUr^_S*8ez@whVfIugrne;q%(tWdAo9|BJmh)rpa-1j+|AW6Lp^5bw&i5Wds+i358YnQ$q-2JVLwLU z1@3SDI6$Uo5wGy=#`NY%iBlw*yk)eB0hr3~0;`8799hw0inXUTX)HiE3BXp%unJilCsog=Qh^Td#a4WG~s=9)MXFQo*1 zY)if?BC7e0s~ac_%`-@2mDwZOtw&g~gC#q9tpAE&Mau?_psPU}qMKu;WeX`HmZorWDm0sljjzRV5 zmJOW7LEWg{bG`g*ZOB*DGCyE*ya>29kdSCN>=)>`O!aXG#sO@Y$m@+4UOizfyhEFyIXJpx!asFj zh6uWhMK^wKJ5#d0Dx|HEHM4J~S=>);aXjkXaiDKdm6w=+wqJ_REyuZhpIElSPE4zw zY|3$PTwVtiasv309Zei`bb0LLFYvWK_ODjGECQRn?i7u38r<~F`&_C@)KDLf)Fcsa2rCXd~x3wPr8l`!h zLW0}G9b@6VL>qQyV1I~5A+14ClIE1iZnR8j*djN`#smJSTlA4r?WThI@CJtmv2|@a z$>K<;Pb;RtCFFU;G}l}KyH`%?!HzHx3x$i+#X6nFKAI!X9I{evP%v514l8p&5l*8NVI~)gf|}iL zcSr({g?ag7(4Mt%_3TBL%xMhbf_WX}w971KQeLN8rwN8xi1>FImN7Ba#uFG9$^cw& zp)>9TH#OE17+YU$M~;|@8^Kg7qV2muT8a__;lO}Vof-8&Ug9OVW(m8hWLG7G3YJ9n zw8}!;wI<8FKR1;QW4TjmlNi;%{lUjnxein9Zx}cm>OXoUs#DLXjx43K`0b4oDb00@ znp6lcU<%-*RC+I9V!(NDW%@GsR?9hi@!*WRMOA--Lte!(F(dzjx*)JeSZhD)p8YBM ztx5$8(jx1@sVc3~C4HZUvUuRcw@w_4Gh&WBOafGPyD(;PhI)Nv67;um6#@}MvBS1GLcZ|z13(XO#Ol>uT z^b}gHLc%OjwMJ>pBnDDaJlt^FUv8DG&)K06qOTuAmXcsHyE{*P>_fO+xV zfGvaogWYBiKS0zKb)6_haT8eE6+gnZQT`qopqWuhrPaI;YwYq6^2m7!Ib-+Lf zyKugWsPb4K!eZ5vN?R&x^+eDT+0e}dQfA`jiT&SA;#Ugk}+^UJy-+cJd9S){R2O3?SA7x()Esv3x#I4A3_o`<6yj zs7&tMP8z+ts1kj5nhoV3eJT5>E(DX=b>hp|bhFNj_rN6$_>L?hX-bpKN)!S!I{dRf z?g&16_|sGnBV;X|XUK=%DMj6xz1<(Sh*i8@^$2D%XgaBekc6kSqYZkbu&VC-yez>J zeqzUd2gC`Ks_ex8tBG^GZm6V3L$TR<>>8H`FO!`e>z_=I&fcAMb6_k&-A7NeU%zeF z=WazS!$THF&syn*k(fqGQ#_;Oxyo?D>T%B6t5^(gCl{)rUc2?|?7D|6I}O#UFrRhe z+cJ(f)m*r_-3$JA7klvk4Q*=yCEzr?j(Zet_`tS=A%>#45En*RaN0%meJJG-NrFp{ zfbIBtJZ#iMDTg(1?>ZAD_vfFnZCs~EFQe|r7A3U{#lEybu^1TYJ+zTrQu}=o=EGSI zEaNX86S8t0IsQB`R6zVF-y?U|Jwvrbp?e{*-QgG*yk08|drq;|){RBwO1^vMwHQ9-myG?HD{PHBy+vW#Z{a^*5slwyeCLa-2P zajDc*pnO}NjWY}zR&60p=B?kkh;vmOEGb3ic3dZ8&*3Vl5&h(5Q_)Rt-0#Vu!#|iG zv-)Rq#AYbP(4+41h(ULNX@#f-8FsP1zV7`B;-{)o)xR717k4w_5!>0ENe?fMeiAC0 zRN!_zo@LnlRvCrzrHf+9>K&{cqpq8N0yd_$?)EEwb5TkJXHUYdPQg?^b}7p-3#~hkR3)?fmS$cRQna-dNF`iE=AMx3H*rU9 z19>Xi7hSJcvcsVPYlnW#!!wAtX#NJRs}PB7FZ3ih#?+}WH{nYWpbhaIHxBA`sfd2l_E9B@9MNnACO{wolJ|7`=~8x4Rzm)?4OP`{ z5VO*q-`NKcOw4rUJf0L^i%w`fjlNWJjzd-C>|qB$>t!W&-R|JN_%Ve;7;2Jz!D$(# z*Rb}1W5N=Siqwy{8|K$tm>enT(y_E3oL+=EkZH7rsM3vpW~9nuw@iNyfF?NZNoFZi zT=Ch^kE|B~rX%NwpDHsTGi=!iy7QReij7eB>KT|W3xNGd2ozzRe=*TO`968 zdOwnJua zQBy{tEP*r)WSmYopG#*p94Zav^6nnwC2oy6k(+R2ArJ)($yEn8;^ME8JBo?j?(T2nD_^qvxnf^ceD3<)c-<_gT&5*iKN@f7IM%W#_sh2nc9s> zj)EmpDVo1@F2&!re^zjcHOgm$m7FWWuOmO<+TH93nmsPaCXDS`ro<@0Ft*_iH9By zAaE%Cw5@Y>^JLD3r@xs2-+7sK_X8Ip70vpdJ*YxS?k$A0N6)YPzSy`#Iv{0?40Yiv zEY|^VJ+B7Hm0kYXqn5N2v;kAkA9&0yD=9Y!)qfBzwFO^R2vNztgQkd}qQwuoBKxKmgMCbGaogrPbW(j`&m!>tLrRDtJK~RlQdU|Vi zID>3`%!-e!IiKxXsDL>OUYc$=Rw9s$f6B)GKMQGchD#KrfSCvbm$(7rXPt{%`tp(> zTVfe*!%;OR0|xc`SC=+z_!Qxn`$H+|vHtP1!~txzGny6fT}0bud(ebmfW%s9BoM_P zkKo}g7ZhN+l|{q`n;ph8W7zc_;=z#j?Veq%SS%X52 zMATiNBG3W_>_LGW!ue@C05O{|g#bJt!UO~1Txt~9m}ghtvS1XDpy~NA`0Q%LeyH91yj1#0Pc6AaGvQ_6bWJF%88}4BG*HWjbCO%Kf6As%l&Rd~%rW9l z2;{i^E{zk^0z8rY*u1F0o8@(6Gpw`A2QM`X=C_IlsSoj3K*ih=1z)+eIbeVuJ( z(osE6LCAUR=j)N6&BVIY! zXXExpH5i+1AFv##N`u;K+_$_zqhp(;3i~Ye?TI4-_xvfFYEj6|Y(92ZZhBkmtB*O$ zb<|Csxn@eu49oRU76t2e^$(9cmECK+Hr!3+p;3gX`luurzz8|H6>w%H(V}+6qh(T` z0CFA9n2+kKFB^fvROq}CzWg;Z8CD6;bl~tp)GgfS6Zz3uDZxTM@oHEg*>lzrU5S>P zqD(ji^MM~EWX*e?cV>Ud=I4RTJ%RF=_q>8{QBvfsiyLz$y@l(H+uyHP%=9^l>(Rxw zt9vjQi%2;az|{(g$rD>~N01-#g%ty9SnVJAdbgkIh0_d^(HS%MFP!~-u*4vH=s#Y4 z^V*Sr3E1nHbxwGlQ;B>&EvO&X-IWJU+dr|*=TTnAEC1-O1RAg{SRVDaVHNJbEf_xjR1I{uXk48tUWtvIM#yZFL=S z@KKHMwF0T)>z-!V$7gh2UW*u%X=JD;F*0@^B4}35H`(7@84`>6FwgNWDY!xnHn%r5 zBEU(C0sJO>76Cb9{f+7_CcjKzw=?#OLO|g{>K6ea({R+;>sLQ}DnIDFP5TX*+`Wf7 zPsPE#^B?_TDEZ4SkKTb#v#DoGH_8%BHmW<;{~qop)$8omnUH?aI&c9P^hmt*g{@XN zs5dk8LLE(+4)F{uM)9iBV={A3+p;_-5;Q~rBQPzpAdKs+4)5m?Z+H;iB4z+1dK0^MVI7(MY-o+?EE zL-a3rH>(ca9Kmayck*&{cC$=K^5Yhn$N0Q^D6z@_*@aWR&Y}xZRZ9?I^`m7VoJ^P| z^?0!G1ZTc~l?ZH{_x4?ffr9x6ZjOOWY(zsY?Um5&_<^dlwXOqq%*EQc5Xz46urwJNdg{K33LQ_VuRlHm3}$Q#E>iw8+; z5JbG^GQwCAHU%wDEz{)3XjpSZeEa&T(#kN?5;%TQ5{&37z9_n2`_4D7xYI^~VORiH z2}MP+`lmG3cwoJIFYiL+df;-#U*K!CF8CZ9Zq)85I1e5M`$RrFQ|Xb;&h9Tj=cef? z8O`@j^tIvB@8|ED`?csg@5eQM?`G^-s({i1$OQU}cp&WLtv1kXHY#+^FHAV~{0Ch2 zI-OcmG35?8Z##%%Hbp7z#wCE~@zxb7!)QIxbTpr98(w3-%~tu*?6z}@y354@;s1>{ z#%bKD93TS$rPhW4(*RmJmylLm2cl(8Psg8wbTn)Dj{tx;~#al~SzT6HvL! zb?6N6q&xRFou_ZFigvq71^KxF@AuT<@)=;fq94TbY{+i|;nAZpzW=CTYEHbB=?NGYB;M=AocGeCn+lMuztJJ`BE0KUccpYzYX@?y@WI^2;~s@Xu{u@NP`Ke zl9^h4E)!|xAAlHi+*~f?WK=7vP|PvkdO!faG<*g!a|d(R<=4~UrTrDN)it3&2Sak-9AGt zkqRAF2#fI|0(Fo-rCaPd)?^GD_3}Hu|D6GJvNSRsg(z_vnmN^K5YwQ=#V% z2b=U+jyXbjS^kySD4+9evG70E6<)jxS1-F|=x%HJ1?NC_R}t~iq2{wUg&T`(q+lMd zlAb}Y=w7=Z_$CbhYDhvSD}v8^Lt_Huow8+WdmH2HZF>1Nu#{dNO)+|5JNPj}ek%H= zATQ6sB)}fB^^E(51Yt>pl4nr$qm&(~q4zn+Pzm%W3bBs;HHYD<0c4~xp@52;JExf0 zKVb9+zr@75PYR2>ewmSB8W6$ef&qS=_*Nzv2~W$`;Hi8O|5a;DSUj{c7F8F0#p~sW zfwU|JbqfpqU!IRrj^5ky_#)_~b`ynR&}G|kegI8X&rz`7fE4_GkZUjt+4V_&;EO$i zl%XBIqYSc-i0GnDGH^u{<(Y$o2~!0*IDa`~x;S0K>|#?fcEoVQwgTvnG?5C~1Z$r# z-!X=KqY;z~aZND^)?d9Nr}oo?qB;rYcmy5?@b4HL*vx_TR8W`j3_3eO_$eV6k!?J)8+(qqFJvB228p%E0|nN*Pv)Y_B`Wd^pOo?!?zT zSF_%h#P#5*Xsi(pw3-KjFsbqlVl-lV!VP zq#S=EUyPV)s313hy6RL-|6+me`CCnp*3f^eYxCI=?tT#-Y01KjcS!wsGSNn;Cs7(a zWhjvZ8yy{M_WGV%>(x8-yp+b&*YhISBV~&t2P0M|fFOR$VKJ0tXw~rF$x-YRrzp4OZd^$-n$2;QHT7!8KM4TDh3YbW&FsD4#dVMedjWe z1TH_Ggq&q1^SsSuZH(~B7D`?irv#AkFM>81Cx#9N(d?gdVvt#%-3E!lq=}N8x^dWr zf~N}X7zc^KNWLn5neNvYam}3Vg}w=HA5ps`no;Ewo*b1JZ06x$j|$j{;*3X`!pU&7 z?MKxY->12K;V+^0#NrW z-voY74saPA>04`f!SC-KO?bzpNv<^5iV5Ljw7|ip@mzL1J={3-F}6ugJl3*@#md;=g}ObS+y%t=`k){eE$`%m6vx@V>TeJ39)JKL_rfa6~L z<4%2QnJ88}R@YxK?e!@!RCP{UzkD#Ucr9*&ErG(Kr3zTpTTdU2Ny+Wb2s2uV&wr|4 zz*PCSkMDv1eYntXFa10$Q-WD6LCBM~HVVH~O{aqufjZCO-XEUz2LiC2OeZhO1(D3C zc#&GLNd3D7FZ-L7dDRFb6cNj`oT6>Uhh*nl)O`THZ;V3jbkNYERM?tu(pJRq_^JNVy{x!wo6eC}67uu2uxraN(_Tqdq|4Pl z^}d7a-whex#z1vQ>N(&X+4AD1+rbcho_W&l<AhBGFk|*&xft!$`kMt$%M3n_YXUs90kw z)KVF~kQ^UO49BX{q%m{GN1=FDF3!odmzc7ox4KK6_X1dCrk+0kMWL)b|Ia`Wd zG7_{n_Z9#?UF#}#taN9d`>VIml!<__@LE80Q`Jn0u(;%zC7o%6eafz2V{J~?y@R`& zB{`#&uH9+zauCz8=G_tfxEYDjLL0qBg&|$v9a4$Kv7#Blfx1;JweV%O(L0C^IJ$Xm z4k+jczfUJq=mH18VmO`|Lye}eAa@HjV!SH7@B-8pr6)B8O6#j1fCf&st-3rZx(qkydS^sz3Jq{z1x-AtzVi!#E9D9EHwoK+vS%@fu-**gf7IVJ2{XX4?Uw|^{TwN9%XV# zDlH|{swYEi8zUAZPyE7_Wi@#LZyLs;jNJ5lLOqmYnnWU;S0sZ@Y|;jTbC=a;G4 zm6>u*!2;RyvIbD@K@YCZyqxoA{JV?GIm#!0H`O`fQwN7Adt#=W^<5M_eCZ;ueAnE< z$0nkcB)5s-rXDzXjDqTa?bc=7%?Wnj8YsMN_5O3%UOPZd%~*cx`0iEBrvSPJSx7vS zwKYxzm65Bhob814hR^=}qut1Nqed%T$@aV)l{F3DSncH|G*HPAFqGS}(Y|I*Lgl1g znviEZo}CEvVj?`VM<%OwJGU*f4V(KNR&1n~1>T^y#ce-N58o}zNX6OQ6szNfa2z&= z3nE@ukla%%v(Og8C8t88T$$$W%2@}b;2;U&0zLq^V%%am(7_$nKM)PXNoi)Gf z=Zxuj=g;0(?KQL6Q&vQ#@+1K#C_@sQGBQGZ-X4~5A}GUgI=pML3Ia)tv73xFp=visem~ zN{|ek1n7QN2K}rBg+Xfpb?bqE6U|?dqf<@sN!TFUeA<>AQo9tSCd-D!7Wtch5{kBUb8b8<(^sXA^fo(u0k9 zr$vo{pp>&_r|Di}tt4M?T!^`=H9EX?*(gi%gQC}kz%r}pM#PFi}M zgpj<1hq8AT-8Jom>`umTWUeO4yPx|DX$nJ{nk-HCS1zGDRlOL0qG;NHlN&To1q&f} zAzoh789Z#%Q+1rc>-T!ijdCQObyQ!FnA*}*fB|?P%Nh*?hPQ=F|EwC`?*1oqJeZ%UTWVgn?oPklG{1bs<%K$PY*1x@(4s?fe6wvF-;&ow+0 z1yvQ{3r`Sp>iaC}U5s~1oM?W0M2DDdQ?RlAn$uj|jRlRmW*C2^_H+^THuBOL`c5yF zOsj3Jp1IQP9x5;?KVy_tY?4Ce4q&&DBE)tZ0caCGSrm$aB^=DLQrYLf9?WtOz@42Y z>00Vh4dMc!4-Kg{D#W~s{#)-aSqE<%h$dBoE(@+Wgl7ME+73yW3J+N+O6m}1H0BjI zPoAY1>?|*XPl9Z20>j|+>igttqeT{jMhU_;tB~iT2MJCpPw?mu&_z{PxL^1AgLg)>Npap&Y9efa4bwa%-66YvwWZeGfe!% zVC2hKERA%X%M&?XP+8psJAhX9$NTyL+{f8-lw;|j704w2 zPF(t>042<<>eJR-EqE_Ja>pf2b(A*85-cUQ;_kL7%nmV&SOb#~9t0w-s}(r2YJ`Wp z7W>_Dz{k4c$hPm?JPm1$s)49}ya=^@p;`c-GTTkI=+_T1KbnvQRW--# zHlhONq})edK&;I}Y-DOIbF9#6`P;(|#3wE!LLG){S2H^Q=tLpnqb2Co-uE(2{_3#U z`bSH{4c&njlK_4eoPq*m$#93Ipc8-R!6`ayTX*mdxh=cWxd)7l%AF(tGXD)hqJN#% z%in`_szLT8yw<3%029XX$WzkBr1c#%Cs#eDFetVYV?K$FhIGUFY2?Pk-z;k`#&qh6U7Exbfo@x%4lj z9W1n@mdcQ&fRVb`TOI7;jYzou{qgyFSR;^AH&DdvTw zy=1y^N`Q~=Qrt+9D>fWoaZ3b*Ui%98CSdv=uHX!&C8R$^c3{6t_vq+=#LrqK67@>} zbI0=-O+dplQ|gNX=YVqLu*8R`RPk}{lY-Ls2kFmJ{BvY*wj$aZn5D1VWlHDDy%@fE z)%?7^nIK>1B~-IV4d?GbTqD;*QiVsgabo=@EJ3z%-Q3}ZE1hSXnqfjErsD!2u>v&} zL;yKAMX?6ip-DgmEf7M2J?A!dgDp&jnLo?Y``kZUL}Xa|-K=a)O3FN38HOQ7pVNJo z)BSNe{A72M`tvO+uZK{tg|bX%=7rlxv3MQcf;r!0MRQc{hM8>Vw9sS1TbF|I@W-se z2(uh)ClJ_JChSRXlpMv{Jv89Knx6Od7-!&cUXArwfYr>sGUK*6F)JFDtE}ujn3Ry_m zj_wmoaOZ5nJn&eS$hp;&gpQZlpI^oo*-UWLPqw#4sQ;1_~PZShD2bzoM<#wgmd=-YYhYcKS8O zuC9WAuiD+fXmv{!d38(4ZP^Md!un|sWmvp}M_S4phBqVkhaS)AolihlU92f0;;cbt z*%Z#pN;2w`J}ixTn} z+7h&F9U$bGJsPU7YO&i|vBAh|C$8NPo@g~8w?c2}QFab)v~f*?8dnxnAJXl^M-9K_ z#Aw8K!U+ny{`(ro4(E*-VJ-x`F6=KMQ_heovt9~q>SG7JLFHkL!GDlTQY#ySHGBy0 zpdvQW9emJIoAJ$K16jR|q)x4sz1x7%Z6i|Pl>AdfvP6vuL&^#}OU@v`7ANHZO66{BG@K9-SWNmj0loN-=EcitumnPYEvMBa z;S4u-gj3YFF4VWbUwjv0kIr`wp)A!rGjS8KQd&pib{Oh?~hqy?B>Ai0P^qX_WdU>WbL#T`1Ek4Qr% zVO)ZSV`vGp_l*t@e>SMR%4zymG){qOC22ft-cqpGja%sk%r^Noe63^_i$Iz;w8 z=SfN~VILvLFn~^n4TsBQW=$F|IPu@z^yn+AbP>G|8@_ctF}fb_=UN{CU`1f#{0YIs zQvo;R4H9pQ@U)6t<=4$TybRsrNFQNP-PE%XwKuM!uL+2Vnni*mH9rQj-tW#CVUNNF z%Le_PW$f*a*l-{U`bDg|4HKy&*{Gu0QN4;ksV5y$6fp}F{@P&v_ zqpB{q)Ki&Dy1g{}!08mgO1;}3Ejmxy=|-{oc;x86K`bktwSc@LlfA<;@m(d_p1bf9 zcamsw@LHaUf7*7mr@8sVO7om48;bk|v&l$kNkJNiWVFjpcG2pkWkAujrEdk@tTe(k z?j`g@t^E<;>Ja<}k?H3w zacAzW|B!B~fg0GIG1URMUmAWJ3Z%WzZ!-T#S;Nju?7H@vt%!EH;z*wSN>-RZwJMiX z&01ow>XveT&IktV=f`3^qJDU(&vXQzh0(#C6R6t~&^hDg7iO06icR%3TSLTQg+aad zSVL~>Q!A-Tyiv|;rnRhdw#fFpP}q07y;g?+#}pA}pyl7`Rx8>$$hx5RP&<|#z=&Pa zLB$}f_}nl=ji;z(aU-9}TCvpiv7AYiR2Rt3Pm zJpD^ww+`$!^(_%CBMjD^O0Y zL0^*2n$RLXQz#071ptsPeHR8IZAamS=y5x&71nYc2w<1tX{IO;$GkmQvB@vtQ$|9t zH{io=I1mGbZQmM7)wDE>IM#|QpWhN1%cG#fYUg=P$9wJhTxGM1GLP>lo&dD?g9hZcgMt|{h;?Cs z5soZj2CC-9=Z(Pw^@(daSKg}np|-w>+B;uiyPDt<##Ynp(*1SIsH-8(Wg4puOmj0z zQjzW?_xGX!otg%Hy00+&We?8$ix-mI7s{j;*$%!qA#pzyoK^yZITlz_A(_5b8+0gk zq;wh^z`BT_5}+#mrq?`F^ocLL-oM&Gh6>5JxROXNjLGr)FotL#>x#zoaiGC? z{T9|o1F3h464CZw&h8n^2*Ju^Q~1^HE4pie_nbZ>TY{R$J5xW7viKDs-Q1;rNckIf zrb1~myu!h4pH&->3FyFTv9c6kd6E_IDth=G z!sdE{88ILd6=FaB;JzXt9s;FlM$Gyck|Op#LVHZFh3o{9FJLjXD6okTP6{N-{!W!*Z%|Fj-_k=fMnh1`sdUi%3GFJRA))40 z4G<$7B%X$&x$rU%8)zN3N$K|fC;XFzO_2-05Hjaefa5gqQ1E5IwuK|B%id}JBhX_% z1iMmz`p6Vf^uKhqa%<6kKX%b$zSe=tmIQE3P;C3CXpPKD7#ni5!Mx33Jm??C6Jqn9bt#=ZL6Lifn*S^-KFD6y=-cZQsZ)#4(*MFJM zi67*XgyTQRr@dKGW0Eiw5`7C|8E^$K&up#R>S?4$)>rOg*dAeLSi91P1pX{@csJ=l zhTe2fb-wkQ%zPw{Iah&U};MHshbCuxT-}+FQi6s0-J73 zyD7?}<*^>8F(W{}{<;(WqAkFJ=u$O;TE@z=+U|MJ+Uh>8c1LlPv|~2!T%S5c9!B|@ zww>F91|OwUjl)1@${c!&;!)&-(O%Z9DF218s?og+8gQAj6;MF=kcYZusmrIhoh?K^ z;bOGVQqoIBR#UAI_12u_-r$@@k-yjEzQ;xW$zqK*=Mq8>m+BJsmoxX4R&vX$f1hzT#rneo({NcS#%Hv zsD(w>hA8YTY~4+&y*AZ&LUdf5`p{fNxOg<6Y_upx?G6`dxy_s-zHiEZkA(@Y?4uui`#W86kNo zcZ&BW_+R&rJ9Ogb*(4i!**f+v`5#=wEd{dW0ZC z2K2Iu)~vzUrZQCd)^b@pEAe7t?N~^n1TF&g%UePGqw|~8Lp$Q*&vy&h($!rRc5kIr z;bJLS(`^odQfs~;0Ehm~)3GHQvLDSRB0{Ts4HxA=_=-_zfmsu~{zY}N<`6T>%g*3^ zohCbB8DI+c2m&lQW1I6}GpbW$=IwIe0+=w=)WsD3(z8TcD$<4hIK~lwzWd*PW8-Jh zt+W4^vsRm}2H&%C(n*IJ>DNX(nFEoj>%@+9wsweUxtjF4UBg&OX+V6bOsFXJVNr?k%^ zie-0Xb{<0&_rJ6vuyrba;ZHwo)2rk*E?A8wv&f^=hYgk#>mmjE8Z^;&`MTYn;a`M)>kRijxJlkISR5rtFtB*wrI=#gCU++h@pHNT7*Q+ z&6P`4U@e&^X6KjBEiBWIrfh3gmSi#t9|RB!#mU~DgkQ)?j zff+EFV0PPvc&kaeDAJ~_k*1^nlpXeMi$viI{AE#kMK)m#U|bVn6Nhjbq82b=H^|(_ zUm56wH@1ck-b8^f8%_^V`J@pIFD!4O6-4hNAoXT)zu)~6G~S-?GS1C{8bSgo`x;)+ zlaJ0#-l^E5Z2Q9PZeku86BffAP><6qBm|W^&BVThU1PYYghKPAr4+AIr%+nN~c!a zs738jFo9^*Y!R`Vekoq=_1;xLLmlQ}Y~%Mdx10vC+n^A{T2oz=m#N=2t>wgaY{P-4 zy!;fRFp?u0=YrNyRAnLL&^WH84PuPBFTQ@II;LNxVtM^%B${8GK%|IsT|@rHdEQx0 zyou4fmB7pVJ*V1Y@%e_2fhvB=(7dC-R~aj$r^vW>N3OBj(K(# zncJF*3b?WOW3wT(q{;UN_jDNAn4(w|XT-gQSuJVGrQl87hReKc@$qz3udw6%tDOFN zeDY$eHA|BS9mjMdz8B&9Sx5Gz^s6IlAmdHE7O_i9LU*(%s~GiZ6%aja0vxsZ7V|df z!KYO1dx=&9uL6PL)|L#)$r0mc`nnBk6B^%&1pb<7YM`@gwDq$*Wvyio#1v^Kqe%j8jiklpC z2w~JK1uF(^?MZnC1eg_3vf)3kK>g$B&5lFf8g5b;@|PW@V%FWA884<AI2vZU@M_AaYobBj69WDH8+K^jF9!vp$IMN}T+$KnV= z;MrC0Qc3?WzRer!8#jfa5f~lc)sj*R71(p7-?!OQ(~l1>U|iBHqDi!zOpRydEyb`A z7z-emRr;YUt6;W2zq;!FMv@lx5E5}*oy^^7UK5)%*CwL`J5rP~I`;%6hn?3XDM9Mn zrNfPS`ZI~&uCU_>9>+o#jJAxiE_cc#yRT8p6D5Z;VLSPqQHF;<2?_<6sUAd#l#7yb zcs}f*Tq=-&%t-W3uxvDpeSC7M(tx<2`vClZ+aDO(B0&t4X(G*fHcUp)tRh=bV@a*P z!TfoQtK8edYZ9G`mDj{}9n2NYI2n`}yqN~b*7*<+4>IIvBTKXDW|g91u*sgK`Aiud zAC(Sowb%$)3}VMBW7SOwsxr)qXMJMth9lF&G)6pt%oBvLok`}p3!d5y@x1|~!vG8T z))vY&g@9Oicv|amJEuqBCycTB9FAhQZiE9Zx&F8@h=pm`F3w*GfBxncmX%HYdL~Mk z&o(D?YKe8#8vUJTuQ@L?Ny6GD0K)h$ZGOWk2()>7m57(~t)Yf#U^;U;dk`wHlfvWP z%y@XM;)a2rF1QwP;T35KLvqTFR}Y|3A3gjSnNyuJR(|!^@p$dhUFYiw`=E8J_b+7Q zW4n37i>1W>fQ6<$tYTUSapjR~tQOOoJtyDEW8%np`PZARG1)!X76#PiRgp0?hZEY1 zu!kmTWzo|EqQ7`IyDRvXplNx#XiP052F`NFo-S0#@j&PFPXdA%b<5yZLkh6rO|%TM zL26@j!CdjQ=n%x9x*VZ3C>m2fCFfMxO&YDqo8{7un!|j+RaG7*KdK6Ezs)^Sn0G5c zmP|#9t}pIlo;yM)1eL(R&`eg8S?2d#$x&J~%tA+|_Ic-Vb}F|J@lKn^od%dbJ>;hfzFRLQ{w6QFS5qFd+UwtI z{>OMfc2X#qrc!0yA^;sUI9TM~H;#Tpx)wNQ<>F*rY-_8v_?fK=QmDpEt(slXg|@{` zHNw=iX=L(Thg&clRRUku1YP3;c*kjG7l0IJQoS(nU6r& z!Z_1Orq)oKPxjQ zi%j=wtNBLWre&O`OCK=giYRdgT0(TH@NI*&6=v6m2WUL6Sj>AnYvBWG!@c607Pva< z0fk=zoolx-o<+_A`+Xx@+(7(B077ik3jPFw_7Kfe`|4q^iJU&v1p?E~xIZTQqv)1) zctmmsMSrSjA%N(z-b;=OyZyR8x}u-xT)o{SbN?r{5&dpW00!{i`@PWoF2NH(X!Kr+ zLBhwM4tGn3a)aAfVqU0(?h2-0GYVaY{#IYacoUuLu^i2zbJRk-CMsvo17GLNV0<@T zH=0yVnkLD8YG$NRwe~+H^XC6C>Dl^1RGM|bN&5S&QnQZ@3Z+I;>}gp~v(IY2=frf0#?4?ge%iBuEXbAWwZNeQG98a;m|m+vMgq#!YhdO2 zW*_g>@zi@vD#mN@i|N!qGfozW)8LZ9WZd4M`6sAQ)5yi2C|Vms@3wQl>Pzb4qE7zp zAmEYk!P&-?DNTy?M?0TQ=E zdeAP`s=4FC73m3AG{f-0@e!G{tQZ%h9LB;CInxDdECebG)GUw|%hV`aho1!JFEHUk zW#x5k#tve26a>`}j{_wltaO#*tA!B{Z|tOW&0z%vl03GlG!CF%UUKe~v~5p7KqWat z)rRnT!%$9)M8mAJ7dL*!VI9T-h1YGKTs53|g!)1OL)oS66(pV?BHhEI{ADhH?ka?LuEG?tmlz z={HZ~jI~899hIqG?_4~QUU(;<>&H3drqjjYNhl#cgmo2OoBb=3w_Ic~sK>gQue@(- zBFC;A_g{+gU@|EtY;Hl-HoL6;0jHC$%%tPkaa;1xgpelQItOH#?8p=3Xql<$jgXeQ zq2JtZK=Vv7_~!l7HSqBa?>Q%j6C#4jeZMVA!EAQL@t@SKfs?yqmMcPlH;LM^=avD# z0eX9l3#U&#&WF$p$R6tb&rT1yU3{|!MWW^M>a_&chR9V*$HPsxacGCL8em*_ z_2GrSD!Ju}WNO<4>Yv*P=ME)`=QScX9xrVWy4upMMIvcw`0?=rPGKc#JHe97Ftq~E zrrJ%7bhhbv8se27(`tP_kVBfL9w_~|h^xkn`TbLDIS<=t*QP(w6{!PJbH}Ym3!$Bj z-w_Sv%bR|)i|jf6Y1=mpw`#-J<>7oo!|ln1F?(7QonO_yOiVdof&cHL;s{uS6DZ+( zEJDze(EA2zH4EZOplcs}-pu5JjtF}_1^Olu=uJGMjAjvQv62wg?j8R^nuSqk7_zWf zU1v=_k*t%#0XJ^(m9K7F&R!~n{Ic{&6R|U~m5lRbFVd8{pt+JYLq6K%?f#rU1q^G# zU~b#lS~AiEV1@|b$mp326Vo%F6i9fD_-=OA0V^>4R@^pMfd*M$99+rPV9w~jVu!Vj zL=kC(L)W?TF`3t>o))oNoBubg;LUcXvQ&uF24aOiSPp*!1Ii5cdE2#|xQ4B-P7}>a z?zNgdUDd`qtXq3`dUEEIBN8Ehtv)k=^jQ8o8i#shS-YA79Mas&-gzNbf5d1WsR4aD z`U~X$XDVnnQ(T5qV7q|10DAv9$$L-N>}XbM`bpLs^E9jI2rkFa2}WGy{R~%;9?Aa# z7IEb^*6-IHiPj9Oq7+8_u_1?p(IcLJuXo!*Qtm!7I0*FtZ-*r={O%jfI$e)ZYz zW61SdlLGx0dc2zB;=%lR!|pqwkJs@x!plT}tFo7KVIItr<|Kn?yO@+bne`$)aE!wU zG^0(Y8L`o+ArirMpsp2$F)?Far8%#HsPwZH+ClshIl%J1S5hr-Jd>&?5Ee=$Iq;+w542B&x5 zsVpL~;IA$&6N4JANk>8`F9BN%nMr;^0AHD=odba519bfnhz4O`OmccP+Vi9~&4Dds zXVe$WLfct zZ9oU-W^De1#d*0wg)Babl@krsE;&aIz&M8zN> zt)K-(0uR$*walq_43aRy$Fhk0B}vGw5(kdWwL?2QYIv3t4^cc%$q&)LNba3#(Pf+A ztq9@tvkbi+u_xMIs7-o}r)UI7qt~9-1#tW14D3%)c9k`Ivjq9(QcwaLfU6*mksYb~ zk5A7x)1BFMcl3q2e`hEG6;?qQ(@xx%+fyV|hj0SXD|=O85^{z6yOWrD{ELkavC88Q zKNEg#d(L9k$ZIt!mmPPUXPd0~vSU*Fz*{KzK|QPu=?YKXX;en79i~l(f&R`{4S@Gs z&jClQK#MbNGX3Z@4Op)u3H>c2*()3Od94Qt{NW*5bc5*p5nb5|O&Ve(Hz*U3(mal7h6nA<+3= z_6wJ-%E!eaDFA<}g~hpVc91`Av{<^Cfp^@_*O(zRw!#1 zXxxx1$>hhbF()ou|5Wozu=9f?t0U*oLqybrF6`9n+=Ap5X}}OW#A;FLz1p5!W5byB zu){l;>hpk@3#I%ZbDKz=?3c!-g!1MnU&Ym?_T;%NTi3Q5(#J^IkQ}yYpm7OT6+zcx z26vMOk|D{%_#ZL&mI6;W2h$h)GYZmvM0$jH*wZ;>venmpa(sl!?k=uJGQ$&+DZ>L- zk?<>`>9v*O>Dhs{j8j#{2;Oh_|7a*LwEj0!w`+(vD_4H9FWQvseqd+7wCO#qmWh^Q z+60kDlafF*ZaysRie1mwVfSyC*%pjq`P9y$Dh*Rszl(kFDp#q`zWmBqTNBDK5k^eVWZQqb8|>53@xqfEXrGKoFIOwXAfg)DPlq$PqAx+mtw5QO)sm1lD0IhU+Iz<| zG$n8P1^C4x^owN%Z(*#G(#}MkA^CnaCI{a`BLQP18m)0{))kb|JQSs|6^Lgm07L)|H5L)qJPSjbG6QrgM2p&_ELC;|pt_0!a`Su?y5%gcU z%ilji*YUg5UnjhSj?WVoOcBZSen<#-t^DO#;Np3@){=4`-6?~)7%Wit24v#^^w~f| zmsH@FZBaNljDMK%UVBbAqRoXT@X>tSfHw#;{^FiEy!Zf)W3cbg_;OdE2phFHMK(!K6U_)`Z87)SP@ zY=f}#1Npl5*6!%up<`EQ0xMg{@jFuiPfFYr?Y`K2?_g2Y$18e0rBY!sh8}8tQg)i9 zvr8xbUzuhAO*iS+K=D&MJ`pCakT~3^=Vg$eqWObak zJ=K`>Qija~dtfyZG%c-a7V{h8%xwBu|G!>3x=6T9)hM1j=FWu#DD~t*!-I}&5)1bY z#OVHdRt;PBD&@Lj)y}^ivlm#gsd)8w3ki7L?_9fp8agA#13(MMaofFXYNzwx`m2Mo zTD<4cSasjVJ znc`4@B#K|*m>v=Hvpc#%tY9r%J`7yv=%Zmsy0Vxm%=*RmtPU2J`u_*-=vnDV{FHsw zAaM*RxNvW?$n3wPzAdyUhvWR}9f(7d$q$es^;h$jS$jg;nDY;%3Qp( zygl-1Tiims3Dw_+iL)Uc)iGRKK70ZXN-5gVjMoc~qdnWMeE&dHL53K`p&V<}iod@y z4Y}Tilz9tKNuf#aU9D4asOqpS<_S0rl|YdV)mkM1h!dQBjk%b8AFRjmky{;FjH*I&aT_ImpPy%jo$UaK z8QX^3&1{fMS3~n>Wb+!?$JUkvQg%x;xmEJD2AMLjmz;5Id1OnA?+8X@F-AkCi6}{& zKBK;5VH62NBeR)cgDL)8l_Sbg#g$lvgo?#DA}#K5VFLI9*8yWfmJv&UraNjj7NnC5 z_vG3icA-Z5eqBkmG^aFbEjP? zei@(O(0(()c+NbrZUpoY)`)Y+hF4<&P5s2nIEmI~5OmZlZGE%K@JZ5sK|$&u={=o% zT(&@Y^JD1^JubDr+}U`5W_s=oXHl*c2Ge+|<3Qik_NKjb$~5k^oi zQh&PbNkGTF8b~yC684N*0*!)Sb$a~Ybj8xqn6PJ%QFM6!JgjgF`@0Y3B zlMviLizKjiZ&;9 z_`LvGAdYm3??RDU`Fu*}V5>l5pSIKA9ZA6zWKwS$?%t7~^ZrhRZ{?ny^(Iy|r@InR zV+QwQsN0bqs#0&Cgn3xJ5RM6EmrkJ|D5ry|s%@Dlo}S(AkmjnBv;_>X`qgd8HL`PT z1-lSY6>LXR(xzn*db4nz9UPEz!D);ztuj$mn}=e4&8t;n*fb4UVs7hRZDJjjI<__)-3>uMk4LmMCK1%m zKa1piti~bHVIQm(3&`C$b-i0x@|OTt_4S~zJQ+y=ACnTQ9E&zxI6gg_PhIqPCR>-2 zzkPFd4tLIWp8mL+T-)fA>Kr`(O~|bt9cv!phW&&eJKu)4gNzp&(e{rP5Rn3De19l; zFW3E0R%;SD?VFA3{r=aeF9}M8s*9Z8hGp<+m`DyR*N(OH-2ORZ7Nw`RUhDum8(Eu@ zJq64Uu)52Mhv9>FNVeEor8vJUx5md|@Sj?Oh>l{)1|9wr&UeWdt=Dq15DXx3fEd`iny1Sn`;C$8JD=*n9m5ZaNG|ebVG1 z^5Y)_R)AlSB>C#hhu5(0mfV+pdf0bGL39BX;N<&>@tz9m>2R1$p$J65DjNpy-k(nD zIVI6iTfj+h`r0Ix))TGz7Q<4i2qV+aq?BoZb4z?+kj2Rs$J2cF=`&7E;N$geIOFo_ z%Ry>P!)+3!D|2&qejQ;Sk{=(9KT-gHVB?aJJ^RF~F#st;4~<19lzi4y3B3#Gcy(o` zn-~*J%v_XDn9=wQMkO@mVg~pZAeYL(@K|9~E!)3?J6Npf3K9iDi7GY}H}w>TL*3Vy zW)M~u+8o2_peV}^<;}9Jsp*01XcXB3>ziLM-7M&}o%Qkv^8+Rk+X1Wl-O3^4_WaWH znN3wF0w>a9^$y{WcBf^8A1~<*JOn>V^|O<>h0X1%fGmV=RejkvPECdT>{NuovhCo0C+{#P2EKu~Vj#T7R9$cT6%hkU z+U%UbDIVq^x?$EssgJ3y#s7yL@FsFE%$zG6lOD}R1<(RXkEiB9QgO@R@lq4=$TT?* z&EBrV3&}8n&v+jgyzd1G$f2Wwt1M5QyLnT2u z^p3WS&jXVI1JqMVY0%XtI!)<5ysQ%^3=7Pmp^fkihf0eKB`36>u%pll`0#{+ph-56 zttW0GW5djv#HYq&{SxJ5Z2xkWq(XC9ZI+a7wsve+%MBlnM=K%RnS@|Ybs_d_9W6?) z>%08fK7TmQDF3Jf|JC_3y+Kz^=TGF8(#pY7-e}&90p$>>7|x6Rr0~t{R5icF3hkrpZ)mz|%WFP9ptcnR2lB&Rm*rKjDgjqx{(M0CTmH>UZCcw`2Qs;K zXMG1lXK}}$%?wv)TF)j|{ItCtTWFhoiT%?$v1~Y*T%gyRMq_=6+)@fp(!Vc~HadE% z+tQKDf4Hfy9EHmH%JIx?1oGxp$jxOz%%dSz=#%tH>nfu)+rDP*uC1gN^?E4i%E1@B zF=JZFRdCl=E44*}nW7MO&Ce=Of>AXxRb8aALVKOgcT>-Wn67}lt_fv;We44LL|*D;nt-lQ?qWiw2%989uiIBH6bHQnRw?%LECay9vu`v<+zZNl{Acm!4M>%MdX zfMrk-bV94a-gMI7WK|gX?h^t!%5OdJ@L!?1WU6g+F4iF2?JgVej=L@CcX*n9-z`oX zprj-FlxoAuegWlwc1v+fD7&78@eU)Vb=S*RU_9^jI#}g zD$f}r194&gkmM?E?5}B!__t?|AW3M}YlU*SBCWa(QXM9)qB9OFC1$s-iGqlLhdm}M zrsh$(WuX4y9zi!uWD!m+++y0qLnOhqy%>@dHIj@YQLu}WqaSLEpD3>Di5GC>^wK3k z9%9sI%d>NayhTjuUj}x7m_tGGCs#<}UIEqt*gFJ)5!tc!aSXHez9pWF-~c1Dnc1_4 zTOTCi5tN0cMy8pQntH9t#s_w!wvuwO!c_iZ4RiFDX8t*Mu9+QhDVYR<5UUAo1W&wR zX~Bz4vY_LcO#CJw#SM-FDuChnQ{ao}JO$hMVmU&OXQ%Q5M})FY0`Sv1(IoV~0%HJp!|4HO7!K*(U-v6;A02TyHB)Z-ee|U=jI{l4=>5zdDx<#tgjeR! zZJl`voa254rE9@hq}P^5i|{!zgFrG2n1bxWkL10y4}cVM#*GUW2UG_}{=}yy+vZy? zlDxj;-MU-by1xO?=K0>{`P0Ivn>ca+>(USXK72bN`$-5FLG5u!KFB$3_S3)gg@-oT zM1ghAL`k5G!W1}E0|BAT19)s@g)f56V<^esKP6qtsXSnV%X#Yl3o^F_zsj{G1{=Xn zh<|K#+%6u|P%!HL#xnEP)(zumv5Cu}cZ`Q-q7dpLvzK>G%*NPU#aP*Aa3hL%U~wR0WB7MHPG^(v4Nb^S8gb4>3;wX1D})~@HZvlsnYxpRok8~{<1 zXCKAY3K5gOqIOhG?yRT=lNu1rNI(Ma#kAbjScw`zr3-M)T|_a*t7a)NdI}TntnSJ< zaTJB@P*m1tP3y{R$-Mq_j6Y_^uWHdeoWPyHsHLra4hdnCUF|qg zJ*G=i{AUcfA5m#~$Ft8Fayi!{D#5TUh~YAq2*zXty~yVD<4EkR=k6+-Q-mZDZajUw zp(YvoZm^`us$$tF3$ux_bP>5aawyAD9Ql0UYM_O|!hoK7tAVc@=rH{VJ3tACFwPqC zpo*l9rm=oDRN@DK|C^TzD-_Iy$z&~{ekUDVoQXUK021Ri5_wYS_pL1rIMG>`WyNXLw58Y)f#xvE1E?3kIB)oA z#VPbHx>Hj>2J11qXo9IZcQgi}Jd`VQ=k1qZhj+mJ?u#`NVY$&F6(^#$r?=23RgEX1 z{vT847@bM9ZtD&^PCB-2+qP}n_7`?++qP}nwrwY!M|sTlC3)IJ&Y+1kfT82(crE9(Awkl8l;P8&1kBQZ*gV-I8?LjNc{ND+kGEf z>bS_E)Qdmm%dTxmWEQJcDw2^)yIB;Dr&h-!Gh8xe9Xobz>sPLLHMKXCH9nc4c17@> zd4YSTKK4r;)s6Zeq#N9poqDl3?XY3WE?yY|c1A>H663&Q!V!{sqhKL^__n_4Y- zmc#h-@cc5jYKwe6#OF4Em&$7Fh|v773X(vbXVj&4z0aX&f&`nH@wwG8TK9RI$E=gBH zZT!QPa0z+zAd$Z(6AiR)*Nc`YZs2O11OcLi9A9iG&T8U=4JI29=~T5-2|7q3>wpH@DO5+AQNQdF`A+K70%slYMa zvC|||@FWGSTtFY%DKC$=#jGy?=r2X=qgA#yi$PzZs7p%eHLYE0qL)F+qz> z@FU5Da^hxDh(6u18XMKMZ^zEo#?4DRR}$Wx0FFIivQFp1scM+=em-)&in6l_PCbKK z54hZQy13i#Zn32KY*m%W-7V`jG*`h|h^1h|i2N)_SAfk=cAs-7BvUM*i2RuZ8FrMT zAqJKKhO`6}Ro}HQj2)c78d?48w;O%tBUgvlM_1O?Cu{aJo9LQqR!7xZ;rl_{RaXAC4v9r%lZGhA}sjp zfZ~is>>Scd(U(zi{(=o?>f?p?>lY@7q!O~khbJ_B-f$_prc>uzmsz&Zu@XN-yTRY! zZfC`~2#7e_@<~jVEu;03h5|<4?halULwR|1(#%|X-i(E@g?ktYc;3OA7zGOXiF40= zse$9=g$9!1P5#s$5~fBTmqgrIcccIPsWQonn2o_r$!2b2ekCcPGCxc8ld%FR!L5|3{tA_p6$Mt_;YK5g)){FH0V#06D@& z{aYrIcm;-)*uVnH{roR|L^*D()Y)LELp_DiAAu?e6@I@xDUsMtzmyHkqhfxiYLOY){r91b}KXdWc_AL8;b)(f(KM4ZARenVag0lFw21NMI(CHp% zp?-(6q{NE`_`W3Ic3r@+aurhBpT0B&5^q8S^S(Ex2HHLFWxU7uG@^cmNcNaCQZ;_5 zfq6s*B#3YW#mqJt?DiL)u`|r^fL~^8(qMz+l+0p_48rutc)__M4i<;?idtBGu*mjv zrk@9+7%{^Gdxi`dwEYSQT@|oH_@3`-6jMxY@dFA^vr@5qi`;-u%pb_K4^Bqbv1mp{ zpYM%Pn`b(7J|)Trd=goqsv(YWStZG*?4ApvFrGJcB*pB=+NHs+=>cz(@B;sS z#`Gcyeu#YU$2pxWb~&^ok}Lkxqs4YKMYrr`wuyB8ynq zb9c1tz}s=zlqY1P;zss^J;;QxsLdlQ`WvL5V69YqTk;O zg@@!i#SCEmNk$O8Afnzm-ekTpwK7;-=7Y48!O`s;zC(+^^0 zvo0YdAuf|i!}9UbJARzE{LViq8>(|o3{7G*i+MoPT7VdOB}_5tNBZ1#pe@M=b|!M$ zB}bXCk}Ehp=&5Giuq7AX_F{mmKhA_{N-e>!4XWsR99-u%NvT?LebMa&ZYzz@)r&KB zg405~mu5otsv%sHI)6MmJprl@hY=QG(6L<<$+A|IOpWsPcJIjVTP_-+_5;5-UZ-tq zR+QDqOfu}6NTE9gLIcbf&)=D|Sx8Im^){(GN+fVmB17&yLba--AYqOa^&pr*?p=N$}m-~ z3*%gj4dC7!Mpa0c|L~_w6z^5R3a?mnH~e#aZN$ty^^LAx03n(qmfoxxvd05w-kkND z3yW#X=T1*b1W7ldFPzREOzuEm`#UXq!3GsmipT;vDeL69A@kd2%9!RgJ!i_lp?viB z(4xt%DX`oygRJq59AuKy}wy2lOraI70 z7kI(^1HAqV3_Y+^kOH7~~ zRs#qdW922_+(q7q(_WqsNA@V`Kn`|*)+XhXD!ER*QthVJky}w&J1Ntovud5xkNSm_ zG|xzlx$lD9J*6>jmzhbNaHj$la^(cL->Nwy@W-=8ZURW|=Pcv6=NW>2;YBy^Ls*bR zY(~n83xO`|t|5HH{(_B8_j^AD(>9Ayp9Fv%q)CQh2*6os4L4CYlrQklVo9cd*t01f zDN1yW%*Kvoaw&5e|Mr@dA(g#bhN$*a6tTh&#h5`kQe+S1m~Dp}tNaBte;5Ozp4ozP zey8DbS(tR5Pr7J1^ar0+$$5iO$?v!>HFoYT!VSh)S~k_7{J`ZL$$CE8)_$9Zb`3!0 zS`xlZ^>}YVaJ?|95!PGy2$UB}abS_G#lrnM3#(}S3EEhb7eG>kBnGp#88~zOH{bCLY_fo9Q8~ycZg${@^KHzH>xbBwNX6h5rvD zkbAX1q4CEa!_^3Hg05lVKSyUhmLl>)rbX#2nN`m>ppM!Vf@M3zKD6wzDEer+DpAm- z`?!AirsO2Vp;M`4^R!SyOHly(n>K|F?*#NmiqG~Atne&2*4{0=PoLcXPJRDh=~X%Z z3-H9fmuAbKSrl}6=Lx?i^dC?ezf|H-8lgsfeU|IzO-F(C$`~=5V^*~luXwaL?oGDW zb#Q#{a`x{Ed{Q~0in7R@{UqXkKDmHb#rmbA0G%aRHUP~6kVIL*-e##{2saNrX~NxU zB^xi+%fKFS=uS&qalmWmlsBZNUS&s*Vp@5=fPRas0RkJk=W1>P{#UcDN?%~>_v_r6 z0%DI9g9T-@B- zTXF|6d3Ss4;J&Wu#3tT<-}hFJE_YU&ydup^M*#8s=orHDQzBuWa#*7%o2@P*;%InD zYnmE>jCNvaVxJ6F>oG_`f*A=KMaskM5jC~GHYiti-Jk=>K&C>bf`+h!t-32NH+wC; z8UkU9piB+901bD@iTIU52eS z2*CUvNZaM#535F+beTb>l_bnyaSu}^-BKo{GZr6cxX{tMcrjl{HIleZG5JT*Q_7A# zk)%x`{{v1r3Pu&x)0V^tO^-j;1`;bd(0)jE++9a}Bqho&u9k#ta;s=glIEOT8YzGO zZyy*jCOT&uUWia1$dcX%ZIXX($mA!`0Z^gBkp7eKLJVV6YZ>y<&=kMrat(F&D;jwM z!tq(j`2v-U7D&%;ex}0o@}I1c;KE`6)Q<}qw9612f7BzM$Z*j|-nWFoL$xJbz6#8Y zoavyyFR(H(n-w1zonuBtL*n7BMyPUk+dr|AJ@Z-B-if1*V<~bGlTnABD6O^eAAn{< z9k~5hqGn)=qA(^psPDu3s%)1_P*syWwsx(dcQ)7NoI4=}qY)g+%i?Vvs5m{nPH7O- zb+x#FL%>!1%4izQDJbp>|DUiPgM>&f;E~yJ$&LB0=xNZ`5rf>msa?6;n2evkpyu5*!f3Z>xMn>yol4@Gb$q zbJ&9ILaq~xpj`VC0R$ix?P#+Q@r!zAz$fu>o_t;oyFO!Nt)@TY;ecEEYl zy0@Ao2<$jfvrPQ!?VhUJ0gzZ(B~el&`xBmXpsc+udEZ02vrD%IW36~rjBkGVw`jmv zZHcd~43OQTeopXa;C~gb2KQKa9Ejg(q&x}4B+*OKos0gREvIcT)O3q|L3f^Fk zGQ%kTz-XWjqzZ$)1|`^N2NmKBFRp&56ONLNIOTH|vLsmbm65nJ)JqjXxp>NX9^=_n z`u#aEl<}lDv7^`XdgaKP83{mt3Z6JFiD}>3Ng6qiFXqbt(1(J)HYmx9vX zVl6QS7x4y?@xR?l0{|{RdSOFu642l{5eWPgW?Q$cahO@Em7eTYEq_&2#}p`RBsSRa zdFzEi=MBk|Sf|KeSp-1Y{#m`Pw}dcRVsZB!w|N+f-JMPJJfdek^UY9lkvaI%N8`dc zJ;75<)oW21v5K(4w1is@S(@+Sl&%^nw)AC!h%dKKkxi-I2ej~UT%M!=TWXd;B6&~A z_!&^;!8%lO?(T4d+_+ulymuAtxd)&4NW0R={nNH2KD3EW`~h~&1DG%QuJ`WBpIhO7 zKZe*z9%Z}NEzKxp{qw2tW)u77naGXqwDPne@~DE3;rw{<)XOcMW3XTH75j(o1=ktF ze$>)L1PqL%BWD%yH>8_aRu%xT&FI)lZM?vr|LvfsQ4;lCdnCmE!pTWDGzCn`BZiOu#^L zt0t^UbFX@Dwq$Mht=aqtH<>i=#JbXoK?J zs*z^Y{5|=~%6Zu2P3T4P-%1-knYB(cNBitSSA^syaJ~n$yg&4CN-I8cTlHc`s3|SF z(JAnJ88lsPUZ;x$<3nPx-+DI!u;fqb88mU=6g1H(9oX^%6OiSn`8C^xXbyCQfdVuY z24x=oM;@9=r}+}oLi(spKCjH%;b?}kU0wfxkq%q4JUJONEfhyPI5hFkq;J+O=mm2?5c08l!uuzK!MmXY%P4|Qd1hSmh$65)G z^K3r5n4YI+VOlL_r{b}~y{=W7!5 z%|lD5FIvlvWD!2!S4vN4-p|Eh__ZfrLze8WBRaOBRRZ9b?c?f-y4t-Y$C@vZr5(4U zPR!>gM{$qMrEI|54CF(o6s0|8+ZL{_Vv?3@NWU!T~c&{)?@=rl zMPS=B^c~WZual?M#hWQfQE!6nMWEXK{!MdzP5!+o;$7gVdHRDYTsJW4=d>J|#|dY( zrbusUO zMaS1{-!NVbbT8+k>Degadt*Gm1YhXVT?0TiG>dbyESZ(?9%+p&Y{k1njYfx1rW4yU zJ7Y|V`6d3$S%`hyD32o76pW4i4n64D4%P%d=#=*oG%~zqwM>q1UonR58ZWA^SMB!=IMs)X4@^}HM#{97hNvbN5@>h*ZwKm1e{MaT5#KIYw+ECAj zFTFCNlS(V1P0Qoog0C_2(`t})tu#Q4m~+aYzPyd%XPrdU$?4aVx*KQUkk^k@7w+Vz zxvrYzpuzIIYUQ>^NYAdgDamNYpO)af9d#LZ>8W_hEUU7r$%UqPCcnz+3#9SsxbnN{ ziEMs3!}zT863sE1xP-%&A{s9mz)9d;_qQKTs-(!Z!PTq9fN>{0=M-~hZ!0fd0j z*^fxX5W^~SOFrGBUg;0W=)^_>=f-XuZG_julXaK=xH2JI)cdLb4k^qHT5u@c7?U!hy9xEmYz9G`~x1Gk3 z+0PMm=IvmS0~}0#0vDBQGo>%A|NK~{lzHs_)5G&gWez6*9TcB6TzW7L%IG>aUzQ*s zVHQ?9N+dqs{C>CNrReV*2RteiCz9{p?sn5fBu9%rF~6Ibr15PG5GfE`b(%1#uDah+ zzwJK~#&mqUs1Da7tsj;`(e{maQ!AiM?Ug1a_mn@{qnkNerHgJ*6d|FEsX}Vse3)7| zD0wM)~^A_P!s(+9_C!8j0&7PG`1(z}( z{&VzMNs7pKl$6F^-_Y0Sa%H2a=M{%a9IbnpF_}5pJBk)rYg7l$3SqV?N;$xd-S->S z0fHl@ZQP~6NNk4^E~7oi1UcXrD|oSb6z?yk=F62Tf&gSEfV{BJ-{JyXMUHZsh9ZQ@ zyeaIv#*Z;4%=eN&g)im(p?jT2nt!H(kH|en6HtPj=RnVum@=9ynyC2okx+jYcxsHC z4N{5;^2t;sg4J1aGvOrsNsEeF)>ZRg!Vrbt;l*JZ!90ItX(>S(T|td~1z=b+EI0)%Ny(*3kNTyMA8Cr4Y|EyuiO72T~i+F7}2s%1)`n}r)Vhwlc*hJ!4e#kU`yI0}Y|G#GK}YEg0` zH5J|=1(+8Zlx6rU{=9T}>>@jkQV9MUG`u2u40Zygb_;a#UuIIttm`Q?wz@h#PJ5ag zO-3k%)N{f`eZ^x>4HeT5H{IXMpCA?-J0OTd`YyXmpZ zn_h>1P`6zSAH-r)v{;GYpJEGlUu3>L(^IGM0aD`K#384NP(x*+nnA*alzE0c%AnET z0W^PjSi3iZg|_lt_glw?Jw~<)TwC@yz7Ez!ZAQMU zM$O|c>Z?1V55855gYKSZ^v5jf#J+1mg$RHjpKfJHpB4akvQJ9$jvcmCcB|kwd@KYu z03oTCaMT~SnNEV@0#|#AleiM9U|3HB=hvj_oQ3LsMf#WzXr&qoeR!A)8Nr2h)xox} zIY)UuNiZ9i;m}5WQo(1fqxR5vBxDj2WJ-__TyidMC}X zNL+Xg=@Ju<=Y`g@wkw)8jxBgL-HUI4>w-O;66c}x&0m5Up7nSF5Fa{Z4;5sex=VL2 z!xEjL=wc(d0c2N)lm0-5hr=d4A1D(VhN42al<--n!r;iaJX%Aiz2m0O{cij1CZ*niSe-!ZKKG9swb zVvGyK`A($8!cy@R_yq{0hRS63;3pI9B1ef63`atlk*3=4Bh0D(5EoX3N{Ysq@+m#s zd~Q8&pfgX0W-kqyRUPTY;v?`{wPH3}PfU4A7^eP^eokh;pKtzp2>0?IYW%Ik!2Y&0YG0DH zuT$9Q^=B8GMkI>!XTFpI=`42pjnb^_mWpLX|l@_ieeT58ibM+K4- zhhPX-C?E%7i8u*j)s=5%z;tj41tFDAZN^v991dF05=Y4+B-W-BAx*0Vm^+vkPqw0i z4I#pjduAD9ZcM35lGfrg!>mEA!@D~pW!38!UrIp>j-HH>iSCi|R=;^V^}R8o#QlYE z$0|~%FABkmz|kZZa^kUnIN`u6z)u5fm4KqQh*X#1rF23RHl;=LsnC z!|tlfWl$HPn&tSHbh#`Au$mRUZmQdRB64|k;B_UNyl`DTARW z+BWH8hbA=U+f!Djg;YWJC<@C&D27ZwXrzE;UYAx}-1-FHAW=htd8N4~!$uYek4wlM zBoY=iLzhB;f(v8ap@70^5@Qj?0BnWL4dPg=s}D*x=G+RiL&LrTAg}!+(XvBSN|r<< zz*J0`KJbBBxO67_bZ;8XqQ7fv6=`}xAkSa*PG*HLtX-Ve|3x)qCTwPL;1G=oISI=$ z+8Uh1E3%HKnWna*mkKd6GVZtBT#Q`GM^0vrUD^ot&EZXJd7dj*zGp9gNJDM*a?bE8 zvHljYzG{)>AxdBYY*8&RJHqD1UE%Jz_E#~1cW@0MLqZc$g5LgH&Qq8oS8aF^?@wOk zF5w0VM)d?@mzW^sn_JTS5Fc zcz3lQq6s&lr$ULmP215?iM=!8g6wnTz0A;Gc>C^x6PU^Y@a1}n}S~+ddtiiIXc5SnMN#5M#WRaBrJ=RNU zLR+FD7+c0&8)No+H_m^3a9cZ9UvcWtBaGrv{8kiBsBYe8Dqd-BRZ_maJtSopI(Rglb0GLnah1sOXt@s%RDLtH`qwzqFBL^yE zs%zLKQARQ=W)T=?ox#LT5Y@(uSJL2cW>{o|Z6 zCT(>n%J?Z_H>vO$;|YczL~5;<{@s~T?3*OH)F7yeI?{N!B63l5Fhjw(&A(|;TWM(4 zQ%V%?yvJNc#HcI9oBPCiRYqDY`XY@dsz z|MqhzknhOtlQ9L%BverzBR^Iope;(N72FM)R*L^=iBCpfq8h1`TFbGlF#StU;8%f} z^NB<%b*Q%EN48RBnEMh34p`Q(FfLFG-9RZ?K6A1eLD4Qp^1bn9w4?5Bfh#2IcR9$& zq2g{qyhg0R@8|=|-X044KvF0b#cOx&!p;MMThAZ3C+odK1_nB5mJF!|fL{-mdqm8e zV5Pr76GJH?uylAweZ1mf%2S*$#q1j7gDKgwYaD9q4SA%@c%csRw2v+LZ1ds}22 z+?SYku@IaUsU%EWRXSK+#{|i$bGP5>9wjG~3;)uSP-{U8Ro#%1>^332ML z(^2TH2ljDd;B!vg#e@|8RM8=2AO5vuGGozD^l>OA*?SB23E2vji5FVAAsnL<8h#)g z(?bx#?taf=m7qmQl0;U%@HneYhFlfvDng99th;BF2pN!IO)p-MxZxhlY47*Yp|@HE zNIZrEAipM_mM+-*_mT_;s07B_L>Smx?M1Szm&?BT&@FBqsSQCy2}mwM)zZxD|b z_vxtA8XWBhTIz^7In;ujIx#*yU0$Xb?|*B6tqa%K;ZZYs_rdqNzF_E4;5!#ebNa}* z*e0lzS?Wia`XCR|Ww)4VC-fyXb zqjpQcw-wBp52Wzr6=_$WKeGG6DZ}w6s_caNDntEYc~p>-FS~GLCF+3dddZL}9i@^0 zqIQ{!$ever;fOTuSV0DbeT+{Ib*hf$y~UN^nw}ZNc?JH5Cg! z?7_6rRcGscI+Y8J(Tl+YJw~Tx?2Ll}ySsvuP$$-EP0%~G0btjGxMS(8h~`7Ksgl29 zY^tvu;|fQ&y*}D$>+JKPH}i_~^AO+4f=|pk77-Sn#3nPR>aE$avu0vvnq!?5klH_+ z<`V>6RDHU^w09o$X2IMj`xtgm8*;}`C2T1I57X6XJ9$+vacSY@;JifDEqyNl>K!}h zBUp1`<9Taekqqx24bPmTu+hLSnNEB`uY$vq!OVGGYa1;D;JS^X@YTTASUk6xT{CPe zkbh{;>+}}47#o0H?q;1JM#il6W6@-t1ufS!cy0j}p0;wlAqh-sTg=?x3|&%APx8Jd zuQ-1nEcVOTu`Ql|72Ik^N^NQbd=<}KZ5!M^tG^)J_Kq(`Wg1d9nIrb%D{LaE*yl?R z4_8@pWUR>?w>T2Wz|N0xmc2z<>usc#$L29bVxJcr62)S!d+6m|lrTsuHf~MY#u=a& zb8p1Ye`Dh^@oB1hAsS0-be~-Ne!M_lg=~Rr{1~8to$*mLBtSEx*Vv~3!v)TaJU4B` zL9mZwcv18-8(INZD@Me=8)+9Paamhhr!vuQCPHF9c5X>bBXQ`>=vseZqmRr}eQf1? zYdJsCg*G9jfx*(fLF_JJe|NX8_R@dg>VdQdS+GJmp=P)dH8%AI>7RmlP4Wb3e-q#^ zh6g3}UrV0P0c$8$m++knC(s*RHakC>PR4q69q z2M7Lf8h3@c&g%(oStxwRu3c7_L7K}*t@FKC0#Dk+ zaf*^N%qDOcn6)%84Lfh)W!SYL+;p6#=*em9G@qV)@`5T$@V{2hm9=hu7qC=pD9cWT zESTH=zP1(738$l4c%5ymu_pruQ&?Jl8K8TAT6z7PYyboqmoU-nGlHAc?K5KK#uI|I zmwf)yZo$(ZRc}E-f4bp#2pbxetm%ro9zef`=Jz#`0uhKt%#wy z5{+5H;+A-h;qxeana80h#umc+x~EV-_K^O zsc=G9+yEt;e$sq?abLN4OGZtFr37b>;bV$ce=M4hB&{Fe%^L`z6ymF6TSX=H5VWe! z9TZM5#)H)hGOGkO_qv`|+GqO(u)>Lt(vlNV7XwPSRKH|J>nHkK!dssT3K&DBku?tC z<{^GroQgo}Og!-VS!Q~5T%E?}2g3z9+CzGhSpafaKo+-4^HQ?)`6YAwC2zKaw6*2P zWI0$>qXKJ%nu=E%u9iBYYg3vSlc77fW-Ian8=eK&t=?Ha6;$8Xc+>rPaE)ay5a%*& zG=k&GAyT44c|_vnBu%syEsO4xPhne9x@bBXJWFdx-|{wo zV^`7Ww|MDWowhClzpq&ihN021TmQ3e0RI^;$=DsRB6Qu<;5&~t^IMCJtzQ@;<@KR< zHIC$akwf_>i=rB&D~pWF{wRT5iPmh;5seZf0&@|D6X|=C2VP7WPuKe-V0{L`9Wdjb7zs~m5;6fuCRE$ zt|(s3$i#mAWM{bY9TUf&uL6gp@a8LCWK2cex+gey*2)-KMs!RM&FOP&MATT}o<~&w zj;+zAx{qn;XTVL+g_NPG?lgM3f#Ca$Nq~K0ZU4_)Vhl(u*VFdue6BHDsMLGE0^xJ; z4xr9Y$_@a-!2ilY46hMlHm(Hx6LG#kP^bxqF}OZ7{8%7Q;MmP6AGS*JrVv2TQ6f!E z)k7?n_*=|`9~F?lCoGlUSP8)`uxw5W2Bi65#)#kR49TK_^?RnC0P5G&4wa((@Ek@S zp&ekf$h~GP9m=M&s%)O0Sqs?Lc%55)G(DG2aiBgTke{SMF>M68GjALq6%bRK|G<~R zNQQ6BMCWE@-9?pHKCDkw>Hs#^u4j~M9;T(276}+mx`u_&@=NWtCy<<6F0pYflkh;c zt0fd6s%L$|ux}%k{v#=={npmy2nwW@-H-$X3pjVNJzu?TKyFX=X18igHoVYHLEbaS zbQ8xSZpIR99ps`J#4rP_0pHu8>iNl66N}c?7b-cFDbXeYFC{>Ub{dxFWqtw)v%0hx zN1Q;}5q?i9WpNan7`Ba6T9M=CD_%%$MTwGdA2BKoSaN9gWfo^zpzFXM>^VO|`+h_A zz)5u-D$HZ;KBwW3Sk#YA%Rdwi#na{rz&Sn-o{t7mB9~ywx0eFWYF4{P3y4DuDIWd? z+U?P2JiZ#c9=mr8Wvi1dIC%T`cp>S**2*4t9SxQyJ&p(z9C8IMk%23}4&6j8=IH9q zeH&$0;E1_t_0Vek9cSoKP+VLjpA!#?pPsF){SfBN&nGL4*fRkioqyirNrv0mGs=OL zn9Aajqv??vcgX>e5Pa-?uXH1QVgfF)ZMS>;OE*-u_TeGi*ec2eD8!2xCN39R>! zYwoDDdUya1DoCX%gOiG6K_zR0x3+tOs46U2md59`9Y}g_lWS zy)#J|{bwdMtdCb_9$? za*+I;-Kq#~zKL2=8Hxx2tY=q>E7DJRHfYk7XOqj0y`Ji)7AMTv&r1Cc>bBr=5jw3n zyrbhjkDdKcU4(mf7%QAATvqYYen;(H2p9TeN!`E=bzsN+&}>InmpVWs*v4F?zf8nD z1TocAFG%wXj&;cFUD~@GI06&b>1Sbs29{^e#ZkYK3pC zb~B^M-+!O3k*AV;;X%@NRP|5T{EXm%lhUP-4ouDGkjj(FSM)SQ@I=77`u)BqhxQAt ztV+Uq3`vVAFDRWPI{h$RLV>zWu8{R_hOt5V4KHL-3Vh}R2A9**85@32Qa^y$>mj(# zE35)pZgo+Gw(YfI;>(EJcg`*58=g3JE?_o5E>Mz}q|vd=6n-se#fMMyKHD!gw%;TX zmtEA^oX>H!#>AB<9HWImUR#U<_NG*(fHTD(NAfPi-g1P$Dm{C@zKLX3I9f~tLRz<0T$CC_nz$_)(;h5N z&>|Mywnd#va;Cs@Y_zA~Op!F|>9odr1bldO4gn$jW$|x{KL#*1LkVRm_UK-UAGXQcKsQscIx>51q>r*ztz9E z5#P3da0gwhafi#Z^Y%y}n(rVfwwS;vkj9+X@Zb#wJu5A6SimSikkP*n&YamZV8C0b zv4ItV|Abu_hxs+Hh8O^G>Vq`f0TZRf-~tl@B%p04QJ4Y*hG}AsHC0TA>#1B>c{60? zrpXQ*m~rc0502_6+9K(;xC*hDQ1SX^P+3|8$Dqw|7I>aQhvfJg*CbG}CTN;@l&Ept ztp`zM_^5f=a=K~~KkKdx)>w{CGl%FlDwnCh;4}}GICk78_m|Bv`rYFwErKgGg;W&* z<=p(52F}3%vcY9?&Xf&bJ!kM(#{kBMsTEEF0lKzhDWrWmg>y=3g~mxm6ZIgCIZgmy zMvKx|MnvLJja?G9HkAta_3w(d*wTRo_x^n}-C~-!iNVgA)?sRasw6Xi{^_`!p7H=I z1MnM3s^opT#VU8J!^I$Wm`5%_wyqJ(^os-4*f8^X+6pPnPQMaH*Zo45u^{9%c9WCZC%KVR;dsoJDeC=6*Sz)#2!0OdW0_|g#^|o1xpuS7$Nx}lwBe~Q`=jgEq#Jn?v&nto-}}RgQf%BCx|?p?ty`wyqG$u zNN9zd(3*9mR8`Wo2fes@Qk3I95}ZXVUZ~o^btwCqei|jIqZGkWVI!_+m3Kb>i${|& z*EUV8ZIm%lZSoun!-FWjYvzIvzN^LEO5@*#t3jz`Ztz}Sx6 zZ`c<23%q=%V{w;<+Xtq{-g!%sP`HII0 z|A@E&*%)H#He{0{(O$scWp;7+43wMSra4jd91cF&D+So+QQxqau%@yGI&26M?h~o^ zE>X$Yxkeq$_CkAoX|XavuE9v`B&JpPNDUaA&1^p#qO`4Td(szxpkQ6mqXB_!uxa$K zd3JbnA|;$x?HV>Iz|q2MF@LZw+E{(PUNL6@B*pZbu`^?&X!o(Fmx6Lh(Zw{mwyn(c zL>D$M7tg~MU0bJQT?*=~3c*Ytn|J=*qt(nJ4z*m;&9SV0PjWk#;wul?U17??{@vqm z&!}A}T^Tv}m49G>>naQh;4)47Aa6Fgj!4hHPr6umHuz~%&1G{kog2d0?{{`=*{a0%A>v>VzRL))V8;m|$^hnVn0>%dTN>EY8>O1Ss!Yk-Yb{=f; zxa})Q3sR>txb-oH?z);gk``#WZAD@s3A15}%sDf`=PwlBu^PmioW{8z$_Ug37nk5+X|#vDg4!&Z0}G|A+6_ljnFpFii3ljL)WL_zm& z3jgZV?qr!MYx8k$sgvWJH{{WnO~8ViztP>;`Pv__5^Un=K(47_{RYy|xmG^Sa(5cx z0xP9bb|c1P)dVdorW=FTssL?YaC=VW5Q0y8DYbcufr!?OfYzPuO>&Je81;dO#w)Zx zOf#sXi}Rqhbn8*pHqBjoY4^<*1f!f8$U?X%ueygr4scgxQg5;b8uacQZ`a0i!Y+H5ax?k;OdQHp7oUb7gp4`-8Md2ZDf0P#%)PmD$_uNT) zab21tkI=ZDW@yJc;=;AaIpXrLKgB)-dyfRn_XAMpKcjq@utK^FH&YGe_OGQT1G^!7 zKf8!7Jq%>boCJ3i#uFMkm(Ehdl}@cTH)fn;I@p7|w`+ZtUn7u9u0!%|4ZdwiAbhSi z6RdekBxkK|aISBd#+)@wtWQn*w=K(jtDu5U-z)WM zk+vMfS83AuIwN=sjAB$n2YwSP8{2?Ec!ltgX{_0RM>{53Pnd7pvSZljsK-&e zq%YO81RjkT+T!&%h9#neY$Cvh{X4&rhPXvq5^hB%ivtA02?G*t%MX2-hYq93*&HTbNU50IQ2p%_60f!DCms@>Pz*1i z6wZz2d-BX6HA^+gUkig`Of9gBxvah^NIirJCYHB%e)LEkQ|$4G`UDM@-kl$6(m#Zl z`YXGRMJV-P{asbPtAWsQAXo5Y8*geE>XORUs_N9 z96K(F%GTJnv#OxaOyU6HFrL^BH)+ZlNwITFi2so)FFN9aZ;T+LA}ea!5{_R!m6e^g z^l7JpAh5wX*803A$H`2|Q-M;}jN|bOa^cchW0aBaXv_rxu76@rpB`IAb^!nV)aP^) z;3!^JHGs;MPlY$bh$|dO+x)q3gsKT2XEBrAle8SGR6+Funm`5QKx zJ=Awg+#qhL1znUO=eBi;@q`5~@YF0~lry-lc2H!7Y2Qh;b`sUNyT5W4=l-b(Z5wn5 z58w&h7d&1Ef>3kGzZy9GaxUFmVjX=3V2IB$&hn-#^8IXXu6gT<*wM{pw)h8rlQay5 zgC>b)Qpu3U6xOQDeHG}$o07dQ%>MnZJ6cK#AkGdi zLrvelQR23~U6+WWSL zsk3iS`9^4DIr7%4$n#I6t3Hf#_m)k4S{kbmU|F6j1~910&ATFVk4dEvf0JR-x_poY zi+nv!wggu4!(uu&m=AHQ4|v1^ypi~}3;Tl|agwJIKvbr4h+M^Ls#>;X)qUIw2og$= zsqvKFilieBpm5X4sBIn%O0ov26?wPKpV1I|*Y);$Ya%lSS`ImP0jMyNt)mJX;cv!N zyLTD>4^!vVo>|ug>)5t!+vr#wn;qNs6Wg|vj&0kvZQI!W?r$ILAFz(*n%5lHsH(eU z7B*^lnuk#sM@Ukes=3c$!s9NaZDp_hwZ_IZ@DU*L*pAERR9T$>QxO@9D_+q|4eh%D z#Z&W&RBcoZnpLmF6YbEIwfwaRapOCRE4G|ibHVJXLX!6JyATk)OW!s}3ECD#B8sF8 zZ2RTt8Tx~XCj(ygY=_17q&a+$?q=f<)<0zOd_etIw4Od~BcH zc0v%ZYKIqKm;3)HEs&F6UJrt3t_9`rylNC?*xmv+is)Y;jYY;*Z0*55OtRrHdvf~? zWN%uoE&Tm_vEwdZN);wU5u)tiXGZ2e%nc4$N>7Ir7OK-_y3SOHkhvP44|a#U(%<=f zJo9+oh@rzTlz@-<&xOf5l1CqGpZ*oTHqwr@yOVwb*XcIEaoj9I)2UrXk9pPi0dyeu z!X3;Zh&fs1MJTrNma>19!tB@@=@jaAt%*swN6xnAPF=L?zmuDA4LE7kcQo7Y*(eD+ z^}a^2M@aI$BPfa$d-|*>z59_#t+16XMQB=CW=90ySg7=5`|~QYn>N_tS~AOV;c6vo z@GsBC4(bW`Y}yMo(Ea(pHjKsp*X1@-!!=i1H1yBme!iJuK9Pce)&686@=}WcK{|(l z9I;3>?z;TcbE6;);oh5DB zRu49Uk(i5m@9GNW58;y!V>P~dZ%o6&aT1+Co$Z38tHhGOHqI5cT?wTJsx(Dn!hLg{ zhzSN~`2_`^dyOFNQ7*96aTfV0;}}z|+X~X7{DB`p&Jk>qcQh z&RqUW`^o}W&#B|Kkl~`t6@Uz-CKxb}f6$ADp|!z}c0i0Aq0P8N6=qs)v>z6O49{Qv z5GArw>av1!?76sjen!9N-UB<5>&}2hhzy_NXi3{LthZokOnKR|A6ij_sYTet<&25= zIS%i8Jjbauq5GOHGLC^)-kfBG_YyT$qyJ1yq03g zftn;u!{+1_Dp=|UFMFb&)QBfxbS32`B2T=RX{D(;E^i0o&&qZhg75AG9k8)Ln{1N1 zeJMz4_Dko`Pm^|rfF;n5HWubS>;L7W3e3iE4EhSaw85yK?ihH#d$mLxHYY=!%H7Wu z%iubIRHSi>t8)G6V;^XJ;0%bsq%vkfr`fC6^?n5IXKI%m*~Lp|9(>_Lm` z=W20%2kvIIC*;zAp}s*p=o8g%kI0;t!s)Fj_{E|)v`|vu7saYtDT57vW5KNEO~|c! zi;D=klnJA$N#AcTT5hDCKgA3p8o;}Jt9AYc%0N4y`Wd#co&+rXrI@v(P)nDJ&;nH@ zz2G9U>CfGgyoQ^D)eC45=Dt8!ZW>ugmm9pSrkDi>X;HZYrKEs`ubZAV*Ryzh_i7a< z!xKC}Eg3hX@|BGe!9=k;vTUaj0nm964782bkr2P%6On9kI7@&!0{? zaS=aMyh1Uyq1)UP>NgW^d*x-FC1IGk2&^`2-4&b$rc}MhU z!dG*z-MN*zP6nJ-@F*-`n`t0)T#1x3S2@_eKy@K1Y>y3UBB??L>8DG(N?bMY?d634 zF(Nnb&p@YslmV*M+L^Iz=C@yn-)oEUF}Sg4D#WsVm-;i8l;L>Yc`WBIY98F}+t1mUn4@Uqi*;9iJb;ZNk@}tW}r)TLo zsM>wCC|gy^B^iUvm5qXrF(H}q@CA@1dT5yK=F-m(%$H`aFkt!btg_62ugF2IdNE=_ z<>#OXkUwlLeW3s|Rx~t?`US3;?8*dAX8MMr%Q{0lhsZOwA>kT5vm|Nr@afKzxQu>> zSt@A!i6DT{SYY?dI|B3JM^8ziLpb65e*xsn^qK#}>yew!f8uosvv~`JRc6c*2*YM; zFMR&E%;vO~Vd&r*5A(NMRx=!eFT>0Pk#J_YrS7xdevP1S507j~;2gC{?o8}O0ucu;l`VNEXak8YUC5z|KCIL-_s}&J^!7QH?#McFTbUT0UT9Kbjs@kVADw+|@@g7NlLO z*B~5)nW_-$s+#s5_DB8rEY#Au>_o(95n>;gpr;ZSv~( z`chjdqX)DY++yD?BYnYTrZQbcK?0D{Pc2()!pL)@MAePZYaVRGA`U-gE`y2erOX*F z3c18HJBbsEtX$porR_F;<)-udd7B?=^JE)yzr|51H3GDFm1=Ekz$J-!>=9rxsNA1x zH}MGV+*<2?Byqal&GL|)bH0nty3~?CN0sEJsMjK!TF)6pCMxqm`$OP9_>;dQ zFr@is(tpc26Q~_}Q+`q`e=-;j{(T0I4aht_lWXA+)luVcU*S-gf_h`~^IvWomi%9l zlt;)9qK^4+9+!!oH8gpWCLI8rQAG$wE>Y z#b{!vfLOxq;>LnRUqc&CW5q|q&j%~SFNG=-O3acBo%f$@lvmnzpS=XHUN{c@i$sLQ z!~44JWR#a0FgB$5CsmZp>=}ldQY%;Dg38}#Ml8wNdw7D1+qavorbDAJiik7>k1@G& z+$bemqTx=TB{ptfvuY*S9Z-zw8BxAOhd2idlMJ$EmJ7=0n zoz7$^s3s;1+{`LzL<0;QpsiVjk@NUjUpfN4q0MJ$#sIGyL5n2smUyohvCMCTG;H(? zPbWSehg4$@5Eek52NqgZ%GKcyO`A~UA;jHf#c}(r4qka%A7)uwtC-WB`@O1@uDLB% zdE0B`;%2Rw)54mvfHu1}MgsTw4kcxWV(Dc_ELF!1t&Q>BHpsI|t{k+P;TqS)&EaT% ze_%srnbh5>I|ODjN-rFpq$l`qENHO(HHsX6aYn9SXE8uf9-f9!ikHcqXuY0d7m3NQ z?btx95r}6-GvS2sOB-Ebt@~-u4_oOb4jIVTcSv|h3G3M^s8T4ad5HxgMY)d4 zUWtZY1n-MS6R9MFpbwz_Pl1>HH=&BAL=MCPPlDQm?R+0Rxy~+}4n0P^EcE^%f3UN- z*jYCdBXz*d2RB}*v$emOAx$vx@1Q(q7yYY-TG5YX4?2vc;wlv>%%&p|)SaGp)l&5Q z=<@k$%f|sFLCNx;Ic{6K`2e}Vl){CL?Onm+_Y|B@YIMDcLqjoipZq$ssk3#pV)qL8 z4fF$)VTgR&IST0x8Dz(3ST)#P+E!|(`l7u`VT``dD3WWoh`&|1xL zVWUWX)U-}7cw4L2HB2Ki z6H@?Q3aAs#WoRTcn;q7^s1iO%(P$ART*Ur`apBUD?^lBDTC4Ju#rh#pS*%no5xsD& zNB-70g}4e+?}4KjWyAq(P|=9Js9x#0PriCw4Z8*waDZzE^OGW^8OJN3*Z|nQWad_N z3+tsC30YldDokO>?Ua^nKCyRjd|RZz03DzNY#_c+L4l0$@ZJ1nFCjxq35q{VdBkb$*j4-;Wl4mYaY>H_DJ7ez2hCZvj z7OUlM{CeL6`ui-kh5t2Q3c2H4Lh-uVVV%zQ!6H=;cc~m~d@ZC5DvCr{+hms^_yjQ8 zUEd-V+xv};?4!r;i851?xF^MzwTfR@g|}rjdMyQJIl^PG~E|h-_t@EE3U9 zr8{py_M!Vgh2u5lQ7ZJj?t;m57{9ac)b^K~IQ2Lbgo-IF+F1!^vYhygTyx+@9@d;$ z@Z?J*66t!L&hF~UN+$_FQU!THFaZV&FPU_dvO5#}*#eRd=OXl5{kN)8UN9`zF-(?c zbJDcx_zurj0zG87(G8!r(teoCW|W%KD!m)<0fK$>e}nSsKR@M<9@!>#$Z%yr#mQ&x z4fG`fg?0%gh~*3Y`tBSO#Ax=tT%6$>kZUeB7@#cKEb~Q?s#pBc8yG6negGGhSUJBXmAJGFV7ye65p;k$coV@v6?F=`AG(Q~qQ!b;ec`GEP+WM>_#v{gB_pL!F+iV_30#`WBTzEz!?q&4HLw&UFF5+Uz!DwDHWJ~o2!#W9EHkiD3;^DvU*{lYGMae> z)}Q-;cWG=AU@WG9(tlt|EwphdnH1@j>eR;CSO4{Q#zrv0%&IK*po9c9;{%aqTrmNK zJtm!)*8Z??t$k1kP?S@wBB4+fmQSjn*!hkM5s0gmC+#~ghOD~sL4K@2z6vHv0cHWI z9cMp1zX~X*?jxQD0YDM*BFH|@m`8Auz`lr^SB*I-BdL~9wkpwA^D|ob+oQDrVa%}^ zyB`jF!4%bKxHuf9xkDl!u@w?}iusxGl=UClVOjFiY6jv|sNnY6)Y9Gvqq6O*^B1&f zeI*#xZV|p(u`6eXv)j0)j`0OTmJo@Pdc{$adM!oSiD9S0K!Er|LN$<6UtV2oxJ!47 z`m>M0Xf|- zaVz_KYQkCm6X|`uz_zpgT6y0`JFWE2-H}<34MxM6cVUzcnP-Y;RHjA?eI=&(io(_E zW1vK0?)?j05Fnw_HWFF%g0O4kiERqJ=yAMYBM5^HPxNNmGNs*~F+HEZS}Y1hX5B5c zZ|4}6&)ehvzzKKgig^4zZN8>C=hN$dUo2njJmDo|#~)|Q)o{U}3JX#pV}z3~Au{!Z zvh-bx6RzSuT#Um9Uxk(*6g0Ih^#${vP&D~(ia2BgAboHDF(@toDeKru)$7>MBhB7L z$gl8|hdOhPm>k|}%2*wCt=8uH<~0fx25(+q?ojQyv>Ns}zMFK<88c)~XGk$CJZq%t z@%Ax9LqD>5kye9eKQ31QFDol9Y)Z+-xqC0y$fC!Lt?Y);+(IXZ{$hx{f0$d>GqfHo zr`mA>5R+a+2Xcs&rI)4;cd$*67-CTF?-GUA8fveXhp6JxoKr+`37w!Hzi$*FJ`{8! zmu6E-8-h5g*epN;{0%q%2}Se1%)vMcjZ0=0drRA^OcpHQ zKBm`gh!C;0evxKNN^U8zsxtwvSAmG0SLuvMN>0H%A`2mgBJs&>81>2O*nxzW<&tUH zR0;V>se3wM#uKwhuS?!J(x)gZ;2X9sLR?sM1ZAp&T!nJ#+Ss;OW`Hn&AJ%w8lK#5^ z$e~@AZtk(~vmCWtT9bCg*k-Crd-g((ZF27$4q3A|!2x@ttD5Ho0!}~f`lF`S63;<< zwZ@{jZknj?FpzMQ!V^u;?gyWV0%*!Sa6{r=HR5VG%aYwk_C^hi$L-ZT%}Eq3N1F`` z#ZF}EAVZ$cDEz}~!MH=ywyqOYvnQ1XAVNm~>=;~(;3e?k0Yr89D0VbU%8rzb+qib> zB0qs;2~yHBzvN88aaaiFFN8Ajj3?YU033_c``zdz{^?NfxozNe&3Q9 zrI53jy2sxELVOuS;xdmjR$7^>*n`*SwHH>?4DS z-cU`0i1cHF{BMqun0_#ec{D4cQh6|>m+!bw6S7;5Zn!1bw*Y6ZKYASSQcxXf;-_=h ziSwbJVf=(_6>l`7Gk}55&m~94cCK8;8HqVdC??;6)GHwnLV&7&-ax{l(mq!%4NN2A zP)JwhDM3Vk9{D;6T|AA{8vFY94evTE6C95v*?%qN9tgE-k+(T%_KeFJL*N%Y*Ya@1CTxDtuqEQo~9 z2{;O0=uZ04JPr~BmiLq#QMNZEAB%J-tJfa~rj(Z2qmh}J8oLV|4b1@6JRbO`Z2g7Hznn8+1pGHrkAfpdOiXcd~6F_Ot_sc)YN$_Z4Pi$dHt?`Lr zb$BJ|0AN^zG+uuj!AdN8tDIlcGc1}lBHI&^;tTTC??;Cqp8dU3Xw9z%kX)L|xBuXF zK;(_Gw=zAz+(te%?J8IAKm0pQ0!+q*wGE5J&B?Rfgf8ON9~zfdlNv`yGK^JjK)$7CiLV%$um_ zc1B6+Mw3Vzxu|RCc@95PP9LZ}D2Yyro<$8?)VSrinz78;}vpC$L3T1ShZ`RGxJjnxW2=-%^Zk6^V0- z4HX?exdY&68Pw2$!;vqW3mR3EpvRiDdjOEE>_}ZqvEPju4HT`MjXHK%b9b-Ka{OXq z*8Oja13?#sC5g~;Qo-uUr!K$6dvvgBm1i+1n^_|rdrU6u$)=`BTXbVSqUtWM1o*>ALHIq-nE}~<@jVCL-OL5xoY*aGm%L@Mw(W#1 zr?Dy+M@xNpO%{c0_i&>pIWfeB5L5SH3omz+&t*8>cLlR7?>jgEq|&Qp2u1Y;yZW>U z#TpE4$Mw54yH_C>TYF7uaUWvO-HuH#lwkQ}i_%r_SaV?+7B8;9HMY}2cX;CAT4r{A z$N|rGXLujrsovC;+A$q9gnB(){#OPb-Te|c4-P%_g7#XMd91?KzgRWPj}1b7A#N1h z$g50;b6P*}7?TRCxNE0r(01T8C~H+be<~G+K@gnhz4y*H<5O$^H7TTtH!dH7cF zEbES6wN)yX$}eXHLS#rW+(XZ;H}dh!CFw8WpTnCJj;1}D>SePDzu^CWfa-RFGT`52;k3Z@-(W%PJ&8ZLD3|pwo?K=IL$70&w~2S8 z7h=E?Q8m~;njoK$2^RBl)sk?<2E0L*A8a&U)YMe5CGl}z$NyCq;5uEGrr1!ovg*ei{IQosyjM29{{DOWObO$yl*k(yyC9zYvT{ntBy zN#buhMo=gT2?}&fjsd#ztG9>8-8)R%sWSpVr==_B1W~K)ahwB>@^CO6Nseq;n1>2Y z2NmHXXJ$s!9IT`JWan}QK$T0)b9?Riy#O{+U-B$Fg9vc-^)gfy89E6wBxpa-3kKquDr2Vow? zX{Z*$x)sPt6=poLn9n&N)fs`Ici>6AcCo^9I@`~&nAu3xP8m{6!^n+9ETw(8ZQG$` zMd0#1yZM5*FR&_@NA9*++B`+#wE4VU?lxZ<3W5LBC~Vv^y3aQnJ{iNWn(${j$oKOm z7WE5DsL}K4vyb5!;JZ0rq^r+u9uTY!F{&QuI14wPvM$usN})6#ws?ABS2$A?k)j!0 zZA#WNwrvM2Qv;2Gu0-lVIr_SN7c=^38OIEDsYJhXrIbpztf^oT zV1b+JbQ2XtJ7FSn5jKit_8U>GJU_jxN|wLeg80~5B)%vKfb2nghDea}bGPq|37V?W z{98Q)aU=gA=ISZQin@S~?V4BSBK&KEeG#}FK=9tGPa~-EPxe}E3whxJSIs}7mAZ1` zbU;c$>tK#1>G#JL;%0RYK28t58Ywpl;o0%NnTYyNWwsXzv!-3KEjEagYD zRxCUZt!vAjLQ)r-$@%oG=AJ1`oX1N-rA!7=SoNs^U{Wy1lP<<$PM`{#WWzG110ul9 z+A1ya1YS>-0A`#DkP$*JbN}Z1JMoxtXqJ~^Hy!oGl}T0{|CX7u#tg5A^pz;gI_W1g zf;8{PDNb5x-s-z31=Gw>2X00e3OP$5iVI!9PB+ndrw#fzEW-7IwF}aMX-;vt7*>oD zxJP^ks9X`#5;Yu7ubsQ;w_N$vZQ#ikv8|htwPl?Nxh!keCY4HSaLPF)qKH~dk4KVd zk^w|vNjPO41T;a(p={_BP|JaCFZ;7tDO(!T5D$sH;2CSM8W{vF zPB+lXK1~V1xfd5GSIhAWtB21ye?>`!y!U(#5VmJ;2_HfhqLK%lgY+R_3}UTJ;92*G z3-jyWLKy<+8H5^_#vCF=^N(S_5@|O_kpjXl=yc6Qjn*<<1T6qPcpl{3rdd)tFj8oOl;d7Mu<>p&`*qB9^Y zDZW4fD%8rCIjRK@Vt+!G(CuCfSN5Yf80Ch%UykO7B^yyJ<0MXkj@+RUP<{;*AI zrnB%*T5Zt_sj>DfhH7-dUEarHvo*`!114;0T;V3L13WYsI3k;EX9RtnH!tuO|J<;d z3O97G2mMSl3B_*l5v%Kz(|T{1yteoM(265>Rd+>N+?HyCDzdl+Oz-8dY>OE^Nt#BY zno1Md(!dwqKue$Dxv#OCV%rn&nx<)iH5m!dezjcr9QJhNVrF?&M`G1p#Um|z10c=7 z0tcMa^tst>=FV_&-jLgD>!Pv2s>{ajb=A`JglfcLvFHXDx4({;SQu@4b86-6*MHtg zS|*4snd#tG;htSzZcClqop(7?0~yga#JgND`=MPx0GKwodTEs%%Up+;tckE>>-46R z@S*7xV;z|$(}IMfg19C*idYwF09*&r;oe=FQZ~Xc{Q;;8CIXsKp3yCz zMGxa|0A0+~>zzqmp#-E1FfXQ1y5ABfMDh!=A9s|)vN~c{0JYr_a0D|PKoM_L_9%qH zqK!bDjoNKrlZEBaR}{T9Q^}DV2ClTA-@Jhx?IkW)D0V{Vo6pp}EBX6lKiG3UEATry zg-=5Ve2)1gS&GobPGNI=YQKiO&j?O4z&M>6ejCF@#gs!{(20?hA8S8&TcQj zMOU~i;hFAauGLqw{mnn!PFWN-5?LHw^oHxH!bTsj+&3pu03SwofN!Z2O@PxUnSpdu z_ic&ZEk3aP@=qdQW_By=#_h_?DB17SZlBP{kWcEf(TFpa#0$umO zxx0T}sn8eG^Q5hnT%oxNZrQyby#!rS5Wp&P0l#?W!3$3LyNlTc{xE2 z{vJD2j$054K*9N{0Tn3(z!5yx<2HC|5;&bDA&|%rMcmlnz8{IZZYQnxD+0@+N4)Y= zT1+&`b8rNP0NO?c8K;9h343%oLkt6T4}`;^FkFT2IH1IsM0J^RMJLETQ;W{tpOcb} zB*{;mSg#kA?dTYJFDpg@!y+HM+K4SaNdP2+niHA<9<>h!T>t%$5<_XTg(THL9)t*B z{Deohe06lk{&vP2pnOB78M+lvvAe?U|$efa=IC3HR?i{!{x z$V@9sAPv6FIP}{COdiRj-#rVf6oeZ9h9$nnQX941Ktf4(#9GZ4MohzY%uL3X6mvp8 z5Tc!01WJvm`z$D*eRsF(%yd5zNL(^qRENeTQ;0dqQ*hOk^haXOw(Uqz^*HrkC&9S) za2F8E2Rh~}&s&Gpr>B>TE(;IM(fM*!sJPzk--S-M#h@{U&DeOh)np<-r=1kw>iOLZ zcgFibvQ7(gi*#rP)mjRGwjReDh;z99LLX;j;|vRo-Y?p zPK?~FO!OF(#nZ7B@stE@^QRj?9lW{N+PR{tD&s9C!e0-)b1w9!!LiP=5`JRH^0HIZ zt)FKf?@kAL4rX)7f7YEchPyy_b7D2$WN~X8p)i{F?mS;>Be^{16|6-V_$VN<6_UN( zY@%45QVpC0-ac$;b_ zKU9CMP5+(?Gi84XgjngMHB#9Gc_^G+u}xP}QZ-Y8Ufe*T;SBwtGuduA)o`gM3wypT zLRx+&0-x=dZC4;S#2EoZ49(t$b=6a(r`%W`y2FNq56Y!3cC++qcTL{p6Q^d8ovPkdZy;`Rd$Nh8J%yk`pk`2G- z`B1BvW_!`VF1)eF^L6Z~BZofadc^#~TgLI-7^;rbeD5}OSsv)x*ltzcSVNV;^U)F- zfvy{avDMeTn^_F7*;HVEiHT@4T@S|m=C{A>g-6%y77n*DZo_}2eF?+Qn<@zY=c|CRT>{A{UA+Lci+OMzVR9+wIgGp9YCW7t99qJi4&7WwS-z39RVw&S;z|Q z`Z&5j?>f?QZMr0QT4%2;o&@@cxN}plHIM44Lgpyc%weIl`d8*#rwoD$c}b@B71k1- zI-&f}#Q#^$a4kN;-RE=&F&xkLVJeYzhy?QY8@@UL)=-dKdLOL3BqP@fH!4!e?+aMQ zOZPsnRwZegFs9=GT!!F~v~Si~o!?4eQ}uj952zh?d(Y{Zon9^1!M6c$=~!oiuqF0E z&wen#wb$Hxouv)F=R$*UR`N-7_Z+bdowjCNw}20P0GD}UOkF8y^z4j8Tfh0%#3rnk zOYo9P*VonHr@|2DVyI46G{T41fWbv9OxNy%ZgArPjD0&+B4LOre<5zviC3QizE%q~ z@49k;BlkcPalK;x4ejb!E9R?g(G@K}N9N-4U+uo>X4zyiHYt888Y4%eb7Ld=g?{g= zL%`aHpog!^iCq@oM@>54$44eUm6~M*t9b$*#K(kCA)Lo@P*B6lWLy^{_B~k=@;oNE zk$nzi5AmdXM*r_}>MxdX8);om7(OFZFzgM=@WGTXxi>QAGrXWf9MV>?;bab$H-P84FPTDrOBVf*l zhdm}de*>W)F^{BX;G$Rov)Il)JA~^QPhP6>4(p=pgf3aZv#H0pS7BA+ZxrRFwzy%d zv!k}ljjmJH1<2gCWDW6gG)5LSTjlGs>MQh-W00}5owf0$dZ;EVB|SFFHOZ{u+yJRF zi7P<+sjVbtHpRSUF>F=WZeW($Vr;BZ8<`xtktXx7C~a$s)<(Z#5poo=oc1Gjy|8kh zltM2Yu|W%zDFyIN=~IBhF`mmrgff|oa=d@mj)CVN9${~fc=vJHlglJalNzVxhO)<+ z_2{dHVJ6jZ1JbN5^2RF=&Vfp(6@WCF_UfiDtJUDibi(Px>n0mnDSedAdv3()7o8b{ zE9O)d=*J#B$m9+D$AH00BcL4aQq-s^N%bqQ0S#`!kDoFj%O&dU|T@AF^F=v&Ry~Tan1; zSiMw*+e!Ni7Yovl28x9FW+bHq9_AbNs(pR(vffX$@|wHybq%& zSxu+}VpI%#g)l`pPW=;E!<#FX_BfradiGs>R>%=(BjNJ7)iu`fg4**`FvR&B3-40m z@flhPBvPY2{zs>CA&D-*5`iPk3B+|-PuD(>p(6zIs zW_I+t+p^?y<>aw`_*9J14j%ITjJ!E$t4hmthQ`ujdf?FGMG~V#npg$&qo}+xS&jlB zlwT?mytP@oeeF3;bW6m}<;!`{I~>MeE24@VP@Zau1Q}AxH9>Fy<<3DjHv-ioa0f03 zQ+Sz->G1;@8t=Y~MHlnYKAu!$tZg6o5ynA`&j)m%Co*E1s6R!>gtjoz3LKtzcVRhZ zI_7mthKWAnk!j>rg?3)4)ph}vN$Bl8I1ARy{XK7|!y$!*)26 zFM+2y?9KUrhIizmbzA1Ve@|5!LRhmbhmDUm8An}Y8$A??HQksqaADEnmZukbovWJg zW*g{)$rSJVqqca+zd%QrDbr*mcpRa;)CMju9h)NG7=*#jnIQuD4akb{X-9 zF~t9W*VwYB1%ikOGG)_pZvlezKWr;O7SP|(R9IUOQ2<&GnVgRHuazQQe+1lG*I&$V z5cG2xxZx_2G|d|>Jw6iS^;N%tpT(kVG;}7f=vj z7t+Pf0eU)ceDO4wfqhg3WRF!xRQVZsL?lXz{KM%v+$&ff7h+S+zrWPe=ShUZp)4w= z73q?M8=@dHpb-G51SIj6DYY{yG>3VqGJSIg!CptM;(7>mP8gKX_y^oaDk$`1;{LlC zj$EI%9<(4vs~4@=RjAAdFOoG%we!wpUB9TKQh(e18wPVa!5%$3WX;iH(EtV= zmUHeA0lXMB!#G;|wH9Ravte238ZsF=lG6}T5E)!$OdWoVU{ zs0B6&veNzmN<6hGU-)BlzWdNW%9@NTV2xfwdqlEeQSy@N_+c3SA-!2@l_^_-mCNgI z!zQRvB9Yow$S8Vb6Ca%>TN5JnMFt=i;-IjFq)R}xw&?RpkgpQU|kuOiRc zaM8w(jYUZnQwARmYP7et-wXEh)L)nwAFObLSfMGCaKkH-VMxNFDcve5iN=j9(GaEw zG$l~i!`bRG)sOnLa6go7{Vs-lYEMFm0tM*yPP+b>o*d7I$6_e7?>bM6a$4@=GVhV# zX15WWpSJ~X8FWp85)V)FwT3b!0TpAcajo)$lcWc5az07O<3SSe&r@IG`u4FBgYrm) zbs`Q09OSpn)r>lvKYS961^8STCTam=9hi1aK^3O5Tk6-a1iU=fdfLwsI=`#0bOA0r zbsNhDh8v|i7>xw&FnEGqS}aPVBOG|!cg1QhG`xh*muHiiF8{O$@#+TX$Bf}fEb?lY zrbf%9hPZmn?<7)wahU&=QK;Z+vreQ;{rg*_gRgBpkusaZya`P8T;`TH6-DCA%EA*1B} zzT*d8=F@^y|7m@?*XdNUNj881>X1&N0nGt%va4id5LCQTPXbBJvOlMnkMQW?uL1Uc z=3GD1(e+=q664rMz*7=wKa`2g-RCbKT{2V_fd}#wCYL`!39Ha&P>E95SI*EpIu6&O z&acKw96Y)>>qleA6p1YBXDEv_@(8)V$Ho83{5v4-eq$wZE9P{%(mBA=h_!X-cUt3< zx59%_!j;u;c~gnEC4||$uTZ*D$juw0tr-|d)W`qf5HB?itTy=mqn;A;acm1+4A{b57>Yt_`KQ5w8RA zbpMP&th_aSi-H3NP^wqN;(E4j*PUY_Jd z)`CqwW1y56@IX#5eO{%o4{K-xE`mR|4X-kw`sma6btV9+KZ6sUnA%;{fiRMkWI^IVuF;-%ET4~zRg~uUAp?Yu|$#Fv0V0}-DH++r8XV4vM60j zbAByhLm-KE!>F#W?^9K0sPrMW^e0XO@%hK%-!g#gOuq&VT}U3^Lp~ob^O1iFC(c{k zk{YFnOg@#qt~6wpPQmG8p$yw|x^BU$-F|GWxzPIS&Z}d}U2&po?;E-F=R74hg%66` z`)BeR>nqek*(K}1DUb6yvAFsKYw~CQyJwQ;Q~TrKAY>u$T>)?1Y*P;;&*S!RTA1yV z^*TUR6k03m$3{8Hqsum6>%Hsf9R5H_;By60QK03@RS-kD#=`h`Mb31jF)_U(TlB!5 zC8Cf{_8nu(J@QYa|9++qRGHk8r=a|r=IDz9qj@;%_x;XsB z*pTftkpflM6(U`RC%Wr6q0EWlu6%*gk`a(+h=udC#PJo+uX=hmruZFBtA$jwtdVaO zwH!cq(&u|OFr}q>`Sv_`4dW}v$8A=?9(WO(K)xy178~gFXwODU3?LU z&nIU>bNP*lgDK}OgN36-|PB?B!i@*#8iGN?u8l423*PhYfHTCVBZxw??{iP zmE<}`JYu5`an8y;XUPE+t*jbl>6$`dnK$p=ixH~%mJzDlj<&^O@3ad zRL;NAjJ2sVqNuY{Y=QVB>2G;dQSNi3+hu!y^}zLnki=)&JD}Mr!PI9X+)97(jIlG%hDWizp#(a$jWw@eU{oQ=>G?fC=;tc)#tk|_xt&3bL z_eUYyQrqulnovjoyOrq)5=wM4f8{Q9Oe~;CJtP9U$qav+DR_^_)gWpbCbpdN*AKcJ zfVW>9%b=?B6|ivl5{rZ)g29;&qLB{$fE2+_Ru?gbb$5|=Q~PT;Vo8$2STzQ^v1|)+ zf0`9a;}yt#e(IKj@Dp8S9(Q?#l?V?EJlzGpYCXUe4heiFnkWG*KX*{7Q*VrS?$QgG zx{xs~@fJL%3;+B@B0WSWY%v2|?56UR;Yh}%`H(D{AAqR^m#|z-M($55C1b|J9XOUw zL|G!VuSrLN(E$i+0yVr^9CVc4SHR3f?J#h17Qy9D)^wq}+a%0LI@joX>74t^V^8Bf zRZ-Qsq9_=Q29Z7|;09ke1#MP3IBPooCgW>JMO@mL>?8wrb8bfMHthioYaPx<1I;e*ScSoc|@UUW|F z`B=+>8G*f~bs{=`&+|fKWJFi_*J{^x1*E*}F2Lu^!{EBx*jW7p#CA)4W*i^$PXNNy?dvXMWU>f!9Z1dTt(TH`%36s2Vsk!TlnyS%6 z7C?$LN-x^N8+m_;{qPEwz}^M^nE}8w05<7E{p$667v7sKK-$oaC2$%R*{%y2e_~(G z!%@}chOXi>ZeZKr@OtLnXq5igULpB7A7`VaWi4C6U2j`}0pHZA5pJ|~w};oK7q&xZ zAf0LZL#RY8S1^iLNTfNzErPH7+6o$y3)p%t=$O=8s;_c;89CdNKl3c`L)QEjTUEWU7>9uT{&W9!EnDKdT1rV8SO-KnJd{MfBt4wrVCMX&j;^R`4nXd8AYt&Wrs8YD!D_hrXzPo6Z@W zM?o7M|G+P{Lu?OOI{=jaAf_&A3h~R(^8kW-{0m6Fp#_l3ESS<#n&crC&Q&Eq53vnw z&CRZNrZt)x1Y6f-gnIGb7Pf<6-4^|0@Uqw}fQhLa#oloeqNL6$M;DpII?qBrSdQKB ze&(8ii=zn+^&W|=76M(fH~!j(dO>DAFv8=hvNFBwm1gv`(G@zGV@MGkOB)ru5zf0gYFJZ5{#%Rw@WR=nGfdl4`faUB!X%(c>x1qxVODtmY4g!Hgk3y-6$dvnF$cnko>zoXk5^$dE zzp}?0NU%!7nS(^xK!)H@zi9bx%&cU|E>AU;^U=9zE=twdl9T6HUSM|9uNM-d8`yp~ z1;H-qAoH@5w%ONU3^hzeFaEAG)zez%4+WV$+hTq(I|t|*oA`Is>xpIkU77BGB(i>k z|8JGGsG)1O!GY#;Q?sj5sqH^{6e_A{L6R3s8^RBFwZH@xB_<(itcYIBBR==jEwkpZ z|0{u1ow=QFv`%b$#tES&=(w%%d0v~L6Kux)cc5Q5(LV9ty&+!hagsKB^+sKFPpagm zM?0E@BtW+uW7q!yqd;80NN`RVry}+#^OAqNcO_?CzpuRH725ud_ZE1`KV7>vxkNam zalj(XLqda)jEt)@o{eQpLcuUZAy^njA;G~EwvCtg13qaS#X&4$60#_uQ9NBboNz1{ znCF;7VSAD}(1;VZJsfO1qnJdD#*BrO&}fpj54Q&5im(WOMHFBGU=t_s7!izd7=|Q_ zpf15A_}j7u>c$zzBnIL{lpoce6M}*FfMK|RR8S}$3k1b66XXOS324BhAf|!{!RQGD zgAoF(25<<HP8L|K%z3$po z%nxn*>u&Z5x?!rLoW9A2u+~?;!b=v6j&~UMoub-Hdi3ysxv3e=@Zdqug@^^5e=rnb zM)>#=IQ!EoM%%?6EFL=k2B~qYszT*I0XY~siX3i#+wtUZyH83EMhD0-{&B0i!`C!Ji`ag%F6DGdmL;gwMl zhDTt#Z}xhbETaJpCo@7RXCcNs#xPYLkxX>bWW;g6WY8!aV-gS=bJp|Ze!%ZbPX&ffUp`;oDCbCW9gC@Z*T6uG61)y0iJZ)|Orer~Mw7fms*wJEHx z3%{i-~FYB zWTmB_=@cFSF1@;Hlm`jvO`&D`@wha#x~si^pxygW7nu#ezy_ z6s~3R5%wWzAM*QfXNuXMuy%Hr)yw>KyI)oB;HsaXfUxJ%D%ib$ zHVyn+pc@5dR&r~*Z4)wuLS}(nkiS`U-RQiACOs|F^%~hy&GlmK_JAn~wVa_3aD`Z8 ziBeqwCh|3^4U%ZD=_pkTNIP}4ep*Be@omaDTs*h#QPF1TbCIeF)tN$>G3nWhbw^v2 z8ke&*OI2qrO!Tcf2Noq4zxY0U`>L*gjk!Jl<}FM$S653}-KJ($c;^nD-;wVp7cJu+ zdtb_1)s#r(`c7A#gJ5BESu2>{Kv$oRbFQ3Mi(=$^lFO0n-^gXzDP$0VEX#cTJn^A1 z6FiK=kU8=By=QLTZ|tbEtZ(RTtK@Jwg}uSN`X=Qu=TyS1IQXO0N_0X}no_TKN- z+s>n+O#YU@G)FfuRf45^1$dB__E-2)A%On9)`D2qYoG3P#EHWT5p{+>Wx!SN2!Ps% zpIOwZaH7+yAX-5Ns*QIgi`s=UH$`2UyrniIP)T68s;rYkCoFpscm~ot&)hlSZe7cI zp82`@s&Z7Rv}u=YG~s_v^~216gzd8)1}>E>uI!gaLQSqzRXz8ymxzGTI0$&$FBvDL z);Md{ddUt2JqFLm8a%g6kB|*l?`@BxSDmwCi#SuhB-|cBs@u^3ptejOT?g8SBEYDd zsHQw|y2*MBhR*FxdQ7T4s@)?*rm4%O9u}gGM~XP49ET#H{+PF6d%Fe`QEdG`i5H&WjTeMaX#_Uvf(`cHg zsx(S^?R$=o@3~DW=KKt{5G_xSd5ar#s*a$bch|QN0p0Z$f)Wettvc4XUR|ESG>d~| z9xtN63m|%XV;JofkT2am&LJw;rlmxXQa|O0DTWYzcM1Ao$n3}ke=ccRvCgLh9T`CX z%~PN0;E*vy+Lay*N+5cp3u=lTQJ?yz5H)LDW_(A*i7swaK@^0uU>+oC6bHJmFo=$| zp*2Ll&FV0Dqt<3`ijb6`k5{ajeP*mR`Kqx70~G=^oNa zhouw0MCF|KH7)b1f2M+Td8M7!8re>gjVc;0Kex7(cUOr#K#5;tIz@e7xR~1FAc(oZ z-4u{B7s%PjkSAJRf9?WSMU9HKmPG-pQaWMh;``&bE`H$ByZE7#1F0})3sS-V+n0m1 zFL%^V*uVIZn|z{u8;D_ffb$Z++ck9odi4)2=zdy6f2%VXByBmsyQ#X>i z*-5sKon*sy;s>*29{On<*%sY!B0KqFR_1`CbbZs*%__&2vKX$&1dfvJ(VN(B{<)Ks zRG71rRPd0Le>eb6FG$}4LhGx#gFtk+;S>uZDUvFB`a_BvT=)q& zVG?KAJn~aT&U@?Cxz1J6R$EW*Bdbxk5Vh}XCOM$UvAGpbf!n$&q8$Z=%R}a zqO-EKNA=M(+-REP1*Z4`=|BGTbzO5H-#sT)Po=?hK+?@+0hVqgede(@f$f?!(#9}V z_qMfIgH2CG?Xlkg00960)LFr9+b|5h_bUV)5xNa%(!_1kq)i0uveU53KERhShysCs zysX%=(!iFP%7J7BH)cgk{s47Fpb$HuDY2SMQ*a9_Divi*Q*rsaR8rPF7BbDb<;%MbG7S|p(dWh9wV9ta|o?MzRgqmX~wg~H8cuF6y@UGgFr z@hb#zuhBJSKV3$pplt5;#x6pDXdO*s!ulNpz^4cz?498aN(3XEr@;I6H%R(Xs&{#d z!ZJBIEHf4U_Oo8GoJC;ib$E*$T2FXkht|9M9N*o2ltOn$dDInFc*(d$jPSdEwCnx? znaA9fC_&D!60zkPoLlv)8$R!pvle+y+(97)gRnr(oT2Reu~zoJHGTdS z5f**Im4h@8&Y1~8X(;OOSHdJ{r`72K5y_Rr!m^VFv} zvBLfSa`Sk+y2f=9em@_dI6aw?mJ10w z90sh*@LPGr|B&)~ib?5C41%ffJlIRy8C@DYE(+p=SL{#cq0=fq6bnycQ>$y}ExQMA z5h-4vDekeoOeQ`Ys-|-FhePWv9EUbStZR&%QX&zy6hDew7=LYc0_W_1z*GB5;4(Fj zb$gwpptTAwf(a(l-+}eLamSceX6E(b>j2{LF4W;&i)Ivlfm-~9S4Yu0!|@7hdAjOf zzg6V_<(03;iz`??w}!!k;Fm9W;h(Y+tT`;#lugFr4RqrFQZXn>JP)5=CTJgc+&p=D z9<<^Gw>*cO&!U2J=DZV^EQpv@~V(XJ`y_Rx#yz%K7qN|w_Wr4degM+^-X;o zy8Q9j_rHsBcIfejo?%5dY>P1oh^ropVe}8S7Z2~kE z|J^&@oST%9 z_ht5E(JWW?+V1Rmc=R9M?@_&4?YFaL-);9@F|&Nt?WXEhmOU$zPaOPZ+bsUfhf8Yj`(=Pdd|sWf7gvg>k?d4kCZ*JpVksMvb}(i=a4#8J!Q>~dtQ_mf9;#@ z*>Y94t84f6$GC%DUX7)_K`;EU7bpDqV_mh6zzHnMZxe(+yy~|%m%m#on$PU3A1}Vx zBwaW}?s)8s`)|;X-Y*p8i`2o?DQGXpn?49hDFY8D)QO-X238^te=a}{BDsSO6chnG zB6+hX5shik(W6G-piB-phJ4tl0>L37lMj(G3$zi~e83%a2+j+j6ZPyuq71X{DI^5q zIFhKnb7sd!P!SGMe6SLre9%CQkrn$G_0=jF&yn?yJX&K&UZG=;xf?dsL$5s)}f zB5}SCiSwCBTn0!y62XYWXh%zIaFJ&1BbvXD#DsyDkc|Q9W}cLZyv%y2_pe%Y08OJ2x)+VmxoL#6j{QB{cuJd>mic-CM7mxyDaoR@!W zt=W5pfF20ObC^#ln}M&T5beoprY*iXnQwO#@ zyzAkB#Fd-@4b>H#H+}S`>)&pA|3}yFZF>K@>qk}V9^4+$`h0*UFq9*{K0jkqlC#^-P?RpaJ~QqZNqeggKa;xdBQj4*AM0Y~yL)s;v>L^J7WES}Rtr;Z^q=c4)+M^|GAdH3xDeKUFeNg18 z!Zn0R8dVd(pjWhhMTr;DE$1r^XF-UVfAJ#_j5HBiCmd6u>{o>Iayg&j){ z@!*m+P84V(9R8Pm>zL>hF}IHxpxwW?J1hs?K4duS-`n$G(|^|S<-Xrv-yNQwt=4Cs z>G6+kiX1tQNA_32JL~#4zqYNxzm0skT&|zPE&@oeH?3NoOf7r=^WZ~NyWg^_%jAp3n_A$wa#g3sGfpSbkRe9%Cl85RRFgn!% zM=A65>~?i_^$bYESVfv)Sv_fn)lEsm#iNQKO~F=9ge$nBu~o&vxr(`58uWHyZfTs` z$<}en{SUmy1BgitG{-ChF(p=vf4tn(r1H2)B^{nAd{%rqS4D5Q7aQG67-6%l%;8WW zSw&^6>2AEL?_i93#Ds{M!Nn@%Wy-=%dm``r=Iajt z00960%voEH>ogF4=T}%cf(2=lCcSO5mq-VR2P6bYaD)&-l}X&jS|`r2oox%8|IXOG zWtTWzhc0R#(l)Wj-+c3pe?4{-_LqX!l~1z=FA67NJc}lysXv~1$#3@qS>ZCd*H@G> zEaih&EW9KNqTwtWPU3J9$Kf;#r)@1O4`o<&*#Et~S90BUf8`~gq20%QwAkVe_Y&Xc zWf)At(J&g1qZxd{c-U*$|If)u*w4xFXfm6|(|B|73HO$6P&@*t&UFint@nT+bQDElG z_{hR|XQwf&Hmx-+^^%!m$=ynXHWc-N)mjmrBH*hYLfBlM5TJ~6_I_dszg6m|tT}Vp zX>e*0S_eEgp;;q&3YK;2(eCA`35=9~S)a+$%697x2%I$))!VNdx|VQMFDstHCx>KyJKNFCta|?lf>DjnT4*7iM!j{clX zCDYJPCsEiTlvf^7y)bvV6!lU{WGhuq_PSgmdTM3Zp%wQcPMdRH)($(aw=Bwkx?ztj zr3!>%Zq8T^80YP~71z>58*ZGl^uTMYVa`^@?)*fm*zwWZW+M2 z`tO`MP6D=u?-C**>K@a=)V?8q>^^vM<-QvR)ZU-BPNyfkSxx}lz-i4W;%&Q&*N2zk7mJ(w<$izN zeO!K69k+|k@vuD}y2W9)T<`Cfht+kr-gN!d0P*;*+ir2YJS;EY?>5Kn<=yRdrru=u*?_K|Y!(wsjFZ-^)_;lPHUf-_P`_)_c@Xm6$Mtf5BxNSc=**NdBnF)! zrBMcVb4C}V)TGMCYAnS%CkgcU&=y*iiJ8?U=PZThY`ReDDDLVSWhv&C)vyU?k9vqv z+Afp|QcjRL_7(`e=8%k@yDAzgFzHB*)5QK^wOg<5p8>UN6}9VAs9k5Kb{nB~tU){3 zVxvL1sk-F$>f(ie2mA-J0u*6Xvy>>qfkp(GE?%D)T5=?Qaj+- z=cHL&fGkw6uo@Gm;2Rv*wmPR}oh#d|DdY3`eA)aDt`47nfokG}IsJdwI;1M`AHwR@ ze~6w{{*gz-w?GsVaIKFsmfYvWl#k)Sl)_y&hb{>5 z0pG(ovJf;>fiS^N7)rq4Dv2t#cgnigHT-w&AyDdgM4rSWGB=NC(;}#h!AIMs5~i~2DYiO5i&OMoa5V0 zA^MPU_y#tIIZ`U*1WGB~bxvCHh7%h!V>@6KYY3u$#i5ELY|${%@cw{+9TTU-fv;^w zp9ylHaNEi`7zwimAAb9k2_c8kXSC={kX+FvIBSc>xamb)1jwIPs?0EwJjYBbOL*a#mQhL@{-Lt$I$0 zrY=veocD~mNXBcg$v9x+I5ofZcs}1Wim7QdAE%R>BRKLT9~|isnUEU4P7|dc z6(t3S5J-b(En*{B`m;tEvWFEBx7q2nadN?Mi5q#3{wb=r?D=dr$uCe#0|XQR000O8&26e!C1_8Szn2-F z0x^Hr0~?2m;Qku1O4!J%7ht) zO*nvGFRnyn1W!7C=i?@f@)^?045Tz!=pa(cLz1uyPw6EiDj`I#%}gYjTzD~6Ixv5> zwg{sODqG#_H$T6*`RdBIvTI-!Nt$V-WV9`EdwFy6>YH!B_CJ6##(`TD9vvCK1Z&5P z??k*r5yJ}NvNE7WIoJdabm&P3(RlL`9`CrxtqGD2#5j{A97iH1YrquGNS-05yoa>J zTrsS{0bGDiD#Luelt*6pJzapF@Nj?k6)g3Oo^vmxTxrZ$(c8>JG1j;>l_Jkt#)59Q zbJFSdyJy2z#|xj=7V3K8FGWIg$F;g}Ctmnxkq|^S9AThdF=BXJqC=qufc5P6T zQ+ucrC~}?Uy81j?cZ*peZqjubK@zMfpSB{K5LzxFcpC%~X)1Wjq#9V|*nn4=?dcxj zMBEeEG8xlbB{{l&C8>ShNO+@Qry!R9^Wkii=c^w8)h!#%ttreJOKck z@OVUEPlFP{jyXIYlQeu_4?KaanE?yJTVV}3Uf*mObO0|f(DMTbp&fsYP!8S&N5*8r zS7Apq1?yAD{@6rRp*htFQW=RTX3j^H1EZ9Wt|4qow+}pX@19d0vD|RLn)SeWFOID~ zn=Qb83vZTg=wfhuTOT~NQUN5RLapt>)shWqafl{4w`}y$E#SUGC&A{Ks9VTR!QQ)P}a)1gEY}X@A)54SS{k=-3RvjcA?8_bzsM*<4mS{V#o6V^FYVf7ze_HhPBBtk9v4 zDguA1%Grvk28ab^w?!jYYNa>ICJF~r_wmz)-Iz50zmQLN9Q6BxUT<(V=$ljUv?S8i zQe`~dw_8~`S6;xILYA(^w1q6F)n!-e2mb%U6x06z00960>{(H7;y4g~@2{}3x@uQi zAR&Q3TOzG)A5VSi%c)u+Z{ied6FZkU2WWrQ|9;~Hxa5dkOmiF$_tFO98GkdL`99lZ zm<10-h8YSIToendu!7q^N-pjrEO7nwFp>k>!`4=Sy1vh>U~&6mX2ZA@Fq(9`LlVc} zCFf-}pwYsb`TorFX49EJ7+b-sVxAk+Ia2}A@ls@^C=}rDOn@J96_$nI>C)EbOM`!j z@rd+-D5X{~R?TmmU5{2N6vLYj)K$t~{Y)DuNBH!gG z1(O8f0x_PaIDud?u$2y^vjc4WgOnK}3Ej}#LL$ekNU$JvZM2KU3Is_r`PL*c;AUY| zlc5znZWNw|ofdjv$BCZ${?s4Oo$-HMCi`fOTEvvXi|cBSd;vM=sp zR}qOmnV(ErKrZ!{4QIozw1DN?8ZJ66mM=NZR>nA8#iFx{qxwkUdrH!6WU<}&N6ky1 zvuT8CVzq)Q8|dll{f+xP`ddrT$2-bkz$f%`{29$&>o=os_cz{in?+${{n!j13L{-K z7)VA!-u*F`z@`E)4vLVngfar^LcSi{52rA|BV9L&*TU~k8DtZ8Vl`7gqcPSN} zJ~#U5BmtfNQ$XJ#OUGI8CLUzLow}YZrp`N*iqxoB#+^!Y#yUX+5e?N2^%zyxod4yy zf9ZQklgK!j@fl5_cV~9j11o`ZoOWKe_Lfj>@u*U&jZ?!VdIos!2B3k#vw~kbo8>Jl z#dV-Q+*8S^CW0L|84`hGyUxs+_ij1dyeZ*l$%*? zM;H^`Ik)DN3&mDL6OM{Uv_|d~-I%vwfAw}_F1A^JJIj-A-`fk*$;fXN=Oy}m?$3Q^ zJa;D5j2{#gthajH44()q_=(5Kw()IZ`s*O{$H}YEm`in7(^R_No8qLVF>*B!PVAAk zw@wHqhlqwdPXSJMF48)jsG^1QkKKL#F(wyqm!YWw z6n`a3q>@-bv0+a`--mTDXp4@8$f71uYmBY`eTR0EI*Uk~8Iu}_9a}u#x!)*;VHRyx zQAuJ>cvetO(v;>rPx$r4(22IZHt%TJ2y?QF%J);he~wBt^}9jFal(^?VvGTqrA5xO z5YQ?r&uuJ8oRKugv?zGSd6p-y&#)kjQ-9#baGH}ejP+a`ah!o_DK1z}!CN>FnOZ8QJ2fVA zaGz`5aMS_k4Mw8!>_%Tb&|NH5CE=s%;k_$ADqSxn(rcmNtsc+@9yh2`azL%pXf6Jd zNVHp|dr`My(MZ&ZUTmb6{eTv7DSyU6!pT-5wE_f*8nHExc7#(zG5Sr7XbIBSt?pGd zE~n3-#TLE|L*3rM$!^pGTJ}iUsR9eo!E4Ze2=#Gs)AMP)qmD}?UCy1 z8_>2v%W=?Zvp1L#LW^5tYb2_w*0nN6CJb{4z5~x0_Z`Z$0phVkvZ?RtJ~D*u(15jO zQ45veNx0lrQCZ0q$TT+6CtKQORQ|LR`q!P4i4XDZ^B|>+Wtb#6H9=Y_zYMxJ{dgu?>9xBg^rg1UM3=cx46PTl>#A;RQ&=Qt1sLeg(2UjX~L?@fyDu;8>`YH2*j zh`xk0nx%UmbG&tinC$rwV#+$Be#^l3Sji6brj zi6D1+PHSYAb?faGRJU-x*W!}`m|;+L^A?(c1@6)p?$Yl{;A#9l=HDyM$c$u^GfYxu zGX9+VG)-soFb=Y}*O#@TBD=PqBrj=Q-vrwr z`=;w$1ev0mRy86-y3S>g-MCy2M!4W+xe|!BQP;WZi*p=gZxB<4RjQ~| z!z9RFe}DFodh`kRpWAww%Ywm|((u`S*n=->D!O5fBC;{6F#$1}irQuaQUi%TVKJV@ z<4GFN(m0H;;A|z4&Scd=c5&ZG^L@2jmUPGT@@oIvE#ukE#ht56PatuBASXhhrCQVf zK@?B`-*(n}enRTQ!{|3AYN>rpUX*AEZR)y{2Y)@d8prN(P@iGfd9@0%XcVVc(JYPT z$#^Sf42VxP<70RGXCI{DIU{SiB|F(@Qo+9^5%AMOE~h2` zlhtI)&6-e)23*kFl93klnPHk7p$8Ww7p$}RTeD;(_1;e2)J9gM?eh_NPxJMGPR;Hc zu7ANUC5_i&l-u4VQv!*BlfTR!Iep)Wob0eEqH_ z^_nV1RvJv(zgmrO%mtv`{?8hQiOQ~Dg&`^VCw434X7M?=)r@YI71f(UZbh&#x?ztb z3~dU3UfGZc3=KiR2hGB!whkVKXibocihms`WlpWH^*h21nAR+pRmDWX3UG;moY47t#ChJGZ_jXSD+Zm+-9?5pA4e0(~*lsIz;j&$TXP;m=LcHvib7E6` z`@q6|{^oIDe~Oe)4ZL+>=ckV703@T)x$laPD+`blLr-TfpkM9l17|<*w>VA@{{IF50RR8pR!vXi zFc7`xS6E(wKq545BI*!oX|){pxVH*b-o`2s`S8Y$mWt)S?>HeqQ5@CY?=+VPq4v_etrqYS=?M7}v+0vQDaE@h`PpRS29pH{gAA;};Rz^|lU=*&AluDrc zha_aQ7}?@a;(zd~qdZ8aBG2x#dR(S`+d99_l!T`2*uJ=0Z)+}Z5347Lqj0eZ7Gz<) z-RwUPfd82jlQGkxkf!b_*&W&uMP2YL?OP&i(Lleh6E9rNUc`$|F56~uiXP@?h(2~J zvzx@Dt8U!iiR%Dt`;UEdu#VqalQC-&toAekw(Y1hBsQHCw5Uq76Y#HJ#6GQAI} z=g*(F=eWkhMaP25>BwW!BiDaDA?}}3am=!Oa!U`7ve;?rbvlE#%8VC)z%XGn3Y_FH zWtNgKq1rGz@^Txujy%#{zO>v57Jr-wY7CDTTuw=>X&&W*NlUDz($LUyB@Jnjmvbe_ zRmVT{yTf)NxVD3&DI0lHCYh#IX^(_TTF^4`&gQ2oM8SU7@7`=JxV$8Gk%5$DbK4G$ z@rY((t2tG$A2I8o7pgb2sTTRPC<5_|$!3++jl@aXy_Jl+7j zLb9E7fe)Ex!7x-2TWyolOO|J(aLFs=1hoK*RA8yes9Y_Tp%?tb=U_cNy!{H6`eo0h z7jS7T6{75I=8=e6+L~&WXMZj9>#%ds-R~R@I{jAH3w9a{^}OJ>GND<)%cko3Uhrp` z5X3t;ra;JYjlo`p4W$-6q3^hfK1>y>^9o({NIpiu4(bBN=7RngA*;9dF@In=tcN8NN;CaH zDc;=@n#0F|W>5#sC70?!U>R)9m_Y?B=>9fUveD#j}?Cp5fd zGHPt=^9nFm&ik*nSAX1$P?z8o6`Yn5qBce~M_m(?>a}A``*Hy9eVEKbfwD?;hR#C1Y3<3 z4jkTcM&PSgn!D$%)FV`L+Ddp7F$p6HG!r-{@Z#{M?ql6EoPSxo;Pu*eOHY6qNe>(S z;3DCMSe`O6ga2Fz!sayPk`bFQa-vzpEjgh&BIh_a5R3~9X+Am4VTr;CDu(_@S`MVL zi#;c%G@o)wg~JsTYeoRFOskL?!y+Mua+YZ z*>Pi%*(Ed#RSw3X_Kx2j^u8r3CNQhqnx1@eb_FKo@EkB$K^bZWLlU8dP9Afn$t>Yv zLK13-RF)_Src7}eZj@^pF32Repdz8pS_W!K3V81I%` z4B8OUiD0RL+|RBP!sXI3*2w+Ma-vG=;nD(`Om228$qb~DHZY}ImSHS_?nq+<&n`Jt zkWN!BaeuVN0I(}6atDu3J_rV^Baxa(p=fk1<(Mdo4Ey+*IJkjfTBSFL;QfhNUZ7mk z=~OTunBPvtw3UlVJ9L>DLSj0idO@@G2n*S0*SoH?w=!){YK6#HMh#=ec zNS51Wb_qxpio7U1;1XX=m{7BV2VZr20DTJpG=Iho#!hpSa~`yfn%0Fk(5CvYt9GjC z43I^em6QEJuXofr7#vpLS6nx_1uGHW9iYZ;lScRUHoK)oD+}JK*&Y$heut4?<0Qlp z_zn7Rg^o)|F?Lh@eY7tw>ax>&e6Jz@-v%3@+OT&x==3^AN4;*hGT57u{4UiE=Lpy{ zet&3qYn!$jbn)aHhvw>OV;p)LKzv_!K@naAS9FlgMxI)i`W3g6wL+DbZL4$T0$RNL zy&5}?p{6xwZuMnWJtmvZ+PAn-bqJnA)sAxwXP;hc0d5*M$)MSRwhgpGwM&?QrU(S& zs@eN_xz6p2M`()GkU~3!H|wq`fW{bFH-FBcf_{Bjyo}sDU#M@Im9B}8u!nc2cN6i; zf8pETA9N1;z21QTRnk9nCq=ujfE{RKR$dr(+y6A-x)1L_B6yl7C~Z zW*goZtUKd!RZ)o*h?7PeQts_!# zmfT}NHc70100030|J+$iQ`;~QzVjdj z@Tw({YWxS}1RD%gG_-xnr+KIQ#!+!lzG{ z@YCl&`00>@Uo#d!_;l!TV={y14PRpj894%bu2XTyvBNdWN1Hp!Wp1P$Erpf}Z+XDc zsRwSrd1D?pZ**(!6SDdKZFkk3vJX*!g%t_l=>mvWuM|3^Lml$ko_}+YzNl$HnS#b- zUjuRay=97bd)}jAczz}Ep!t>9apXO|%k^S@2r>+4RXt&QG%CKi#!f|s7HhmGpi$d3 zVS3G>=5~y+$1R`&LJJMhp?z(BkG_W5Il9Isq^fDfzp>CiooS8CXLz+A|>GJ(^4d zTU<>$*&T4W*gnOxJb#3|KiO@qe3wuXTN zI;w{GHJAgP&9i#%fdiTv{v_oNQu$C`5Ynw#H~6J@;#>dw_y3+0hA4Z8JbJmTa35&_P8L8l8v-ak%BDZ7wx~(-vXMM ztx$67o|CuN_iZ6)tO3pnXVI^Wp=-8j2&?!tSCNAfx}t`)-0LCSVlj);X?irDIH!5v z6Xb)vYjGBVXn%eUp=0}CE4FhRWnP|BRgT?Z4?9wBUkbGDy88=}TPA{g+`(RL+DR)O zFG3Ups}UM3g{XF)BX+Lj)g5PpTdIJ2#6H&hFshn!HGIckL zgfL9nUTWovHfNJ$JRWrV7yaR=+a4t8n}&=QjGeMYOv<;xLH#8CvI#n$D+c&gX#6bF zYq+y5vVQ_%prD#sAPb<@X)t7=RZaw*fiQ$hM&?T1g0^I)Y(}gi7Nk+yoRME~7SsSy zi+_VLBV~+Rt|S%Wo=By0D)?t0Kr~+EeF?st4G1~C0ap+Z{Eq8{Sbt5W5m*xlV2@9b zwCF;6d)lc)@YsHOn6tK@5R6yB$@+VE!`#h%BP zfr{%0q4Ph1yxTb-RiYh2CzR;MB0KMHgueLN= ze(jcG(cvcPPwPP|lj#Rt*M{Y1J^U=vPfk~N$UuF z%RqdvMSg>ie-J<1EiH#!I9ZO3x0D3Jv?`RdReY=T=yW$_mC-+0Wh8G5mvbl~D72ah z$gBCzxTN}iTe6aG5dtC93Yhohlz$k$%oS&Kb9uZKM|STn$a5(M?Cl@#lniX>Qp_CN zaKrKNqC2?kcuQW}$kNx$Hdnw)*klN385Cg15oq(Qjg ziuwWiW;f_`VTfZ&lbera%04Uq)gg*vN{AWW4LV&I*5_PsQBAhjiyQw}cYl4t^eUvJ ze37J`vwqCnP8jg0)@-t|FsU+@tk8;)u?*_xjv%VG`oS%T&mVy|73;V?U!*9z)22lTP@;q zo~^bD0uxX;oLx~~UK%CMcFCsB>`Fa%tEwc1^P{1?Sw;qu*Yd;mFm${elPbn!pJj^K z^LKp#|L=LA`=2W*V5XJ`jm?U4V&yG?4&zS*F#c=}C$G|lgLx3Pj-}SPxe^%FFiZq)-vmj~EsK_%<^7tM03m(9 zG*VX^-pS!yq5KAsNXa{2q)6@~T_j4-#A^NV!C?ILVV$5lG zq)$J~_(ecVpqih1+(Kio^>d5x!#YKi@F7X0k3hvCTjcOSOEd?TefKTfIvNJhBny5*TdXGg+~+q z>=8!8I1$f#!@*gE>L6-TJiIzoOfQ)M?RdIaEiE9ivyDkdL1iU9nuzCfqCm`Dg*N`1 zYstzKtEczfCMjF+SAB(ADz@>~Ee8%yWJ5E=6brA#)kmhe1Dj33mHL!!OJl5k<7ovO z4g$aJpr^q3)>GPQVteIMBQdU9#N^zfwrUdS+Ot0UdTd#2&CY1Bvu1prcw80kh=OcQ0#F50s`q8V(6EI)?G07gw5Vw{Wd?Zh+qFHu=ilKm|CSfTCs z-_`u4U6pVo^u_%>%BRZXLyd(u=wE{ce=G;aWAbGMzwuAzSGw$dq&qlaFu#h8EoDVi8ew}O4Z zk-jqJFmnTZ4f_mi#v0!;V!B5y;kOrHX;|^Y#YA28HP*RRdPFL^HbGm;!5N@oqCXxo zV4#xx#6->&o}mPZB~eZ^#=_O)Ap1&>Y%H-h)$N`4E*Y%xJir&|{p%l7v9sJNi&6m@ zlQf(a{y=LxpPFbqFS;1vL;T4^{IgEbzf-ILV=dXAF&Cm`Zq65EA$C|@bf4Rxv5#sw zK{!i78EjFc+D&cTc3*|f;09oxP88ZO)kl&%I{B47a;X7jSL}VeR`YwSWspBqZLyE~LD)lbg zjw?q2qww}&Q{cCCxkGH+d($EF;Ig4^BMjpq?HXv}YQ`eJ?f05w{RjXnS9GK9)MuKr ze&&AxU7ReXyh3ge4UqC|xiMU99^<#!0@BslHP_TKr zd6CghS!_SZUh&j(Do?c}RBh{&gAQ&CvVa|*jhWWtKZ%>KHYD%wE|Q8lJc_=ZObqE? zP7>;UB%_)bDcuEI^0j+24Ad*zVCf2^g`P=esH67$QYtb4R1I+%;a0MEli&%a66JBO zSYCe9(3=fhqf&vqN)%$o23Q>}(#+nr(IwatD1hCAYG_<{l1O&d-9`@sFTjJFIIY2M zH<7BKZJqX|;c0J1{s!GMRM0a>&^4%sMhg3y-w!Y=;J?E;+)exCW9$3%NUZA|$S?O9 zFdGC47kdl1&iFX5#>XRaat0ztlOBl0b<>+^Jhqu>PPH3#q42e5nEHLWWT0+g%lMwK z+nZ!;GOlgjl7)eMn61NL3iTcmR2*4a&7bjsoMpQX8)Pb56wbt%Gul-15!V`Le03RC zkL$^#kF5yyECh73H1zeBeK-`L&lEusgvft##jpz)2B?GzvNuaLPQ+xaLPSPq?-Had zw3Uy^9XEM;5ng$hdOmINV#bckmY{2Oi4m|jJ6@3X(UsT1Hh_ zKM~KQ7RfD-hgmSq&Ng<=OeUZSN3?0k! zRiq8@kXCKRAbA=bk3J&?-~ZK8*N!?7A79+|#u6P!M9{9XD6jFwEnXH0N~Hnur6anj zXS>}s>kPufY>=f3n(YR(m@l{J$h8y$z=+?Cx(ghL1ulK8V9_#KH&aRQiC#gRF6^wk~k7V!u{FU@L= z{hNwlcZsqm7dmya5{DMfjrmayZzIXDUA)MP1uJV5RHt<=G5@k5GD64MFj40`AQnjv zE#3O8#t{t$yN-;e(N1k}4Pz=%+=q}d`cOZK4!Qg3gZZRFHdK%@O#k^*MJd$NQAO@q zg(mK#@t&lgU2Dqvx^14jU(;Zu%7F+~32DNx*Dd-PEa>K-E#AX^=oU}xq1H_olOe&u zFUmoI1~fe(95Iel)3hJgkZSt?;Mw`z(Dho@%!$ptodBgF{M|OM>Lo0#W1+TO0Rn3W zg;Jw_43KC0=BnLLR&_!?7hJ4{3|intl9e?|2RDUuF+`R>Gv-(lFPeg=74m)1@Q(#} zZC+2i%;H8*>-qf{KtDS(l*fAUWkNcTLTkE0%&zTAWOOyTUO@iqOh^5>bfAEy>?^e3 z4PTJfz<_|F2~stuew<&Uck#CH8_{lY974_Xf5!waqI;!CGk=Ly@Y$92bfcM%wI5;_ z-;vGuWjT)SF|%q1@D0W{fetg1)B5t{tYRSLLZsPgJnau?9uI%D640X_^yI;7!N|Xi zZ^OjX>3R0-#5w;7H67=;vSVrUutr%YrTtC>`=;;f14arGg|h>gQ^p8{FlKFll9QY5 z_33&4)wnZ<0|A%Hi*Z4I0kgqz|f8h}}ll-=$4F@8X?4d0qewe3Tq$Os0bnI~p?R0%?Ga=vsE-QEkQS|yc9Opz@p;B1gc zbLumDBRk^T+%Tbkl5eELOa* z3C-kFXqK8%aJX6ctC#t!)O(;YWb&oSQtLV?tGykNx!!dBPuh3UN4UfRL}s)Pwb6w= z#k|A|u~DjHv6MO`&4?jth2qngqFZ;Is|5WS#E>aJ`Nq>|@*)c9wi;)PXqx|Qz9)o} z7pxj!K=ryYST+>2F^}$QOSdj{UHQD3jNp z1p{~Ik0~WYRVe5Vt!i9T0rz%;eJ{4egnm~zNpxP#c%z_O?$DgDN%Zbt{D=0JoJ+;S zWk+1W83b)nczO`0)lNzJyn9CFFH>y`w~|r-Od%51(S(9cqIxvE3!uO`nDnK8m73m; zYWu_(XNf;hOn|$8-k}zuND`jkD+b^0{_jO}hls3F(S;JwK+1Z79P# z@&=gI62gV@R(1EKGmHY#-}~d%>-qe+4ubte<+N_B)or4Bfn1R(NF_owU6TMW)4&># zyI`+b;=gRMQ4hw&!Khnbi&EBgz6#}IE!H5o8d2kZ|CT3u*u3adLsAw!!1?7P6Jv-N!!_-)+2wAHyoLFlTv!yEX^?f+(u9c|o8{UGbi8jlUT; zYO^Y!iMwRk)8|vS8s~uFY1z7=9`y*ADrnH*@m11UY*Z&C|G1ppvrRz^Ink*Xj9mHk zN>q`efcWoz(Gy8)S_880d0@qeo|q37=9umrJK`aUDm#p)gfnDShDYV1tk~DjDD~=A zaN>Nf=l{EnV-4j{HH!=cRPYmNit}?3DvFCJsWO^6+B?~qw*clr8i1jDcyA=O0(w?j zT6h;h(t!c&cn|HD!nK#!GpX!XR!bCt4Yu#UpntY-Aq%vcd#r2_X?J>X{CY&7fsx6q zvn*zm`a^XBMtBFa>TF61mTK+*b5CY%4W(9}SHwqh;_u6^!2kZR|1R}CuJhD9^_A~8 zmY?s~$=>1xihK3+UfyG}!}H(yBe$Qw;HJh2vR4Y&Z2wMwd5XH69zB1K0CmiZJA@3^ zxmaOAKU1ybkBRl2?Cp4v%Qfw0L$lZ{V)FeA*}od`!SQT?9_=g>w1;C*l9agilKzVu z62F#?s>e3pW#*MlyPBry`*Io(fyMUT)mnJ#eB-V5T!^qub8toR$Ov;s7F1CL=C#7^ z)SCt9j|{%fvt2C}217HqWL1T5`%^`kxk-VS7a~?D8GmX z#E}C}dmtpy@V7u9hm$c``6Dal1o(3K;O;%Z*2ob$TXGkVNnRXPOCBZIq?1IXAZ`24 zyEaUjIbO(j#uZ3vXNlZS)Ogl4&j})kp(fACQejkS3PTxGeZQ)Iw9KE92JI><9DtaE za40y~+8anRL>RNd0bQJ3qU_MISfW~~QSl@3jl3rm2t8%KU%gc=wikgTy}?>r+MycY z1wR@4_o-k8pQ?7GU2BsC2SBQE)jlY3dv1d|9`rg&ri9oVxgRN*xyLzEtGzesTiQ>ISmiHOx~7ZeIX(nYF2d zlHeu=?#1f;BrKw=@aK*Q7Loda#0nrtq}*Ar+`&yURaX_>s?Tu>WVHFwFHi!otNzFr zx@gD$TzL#^R^W3bno@bZz3lpYu#de@jW>YqbiT3J?s~&h=zQYyuF$Vy_D&btu&+Au zd(Xn%^~&$7ERx68nD(E(9MKNh-4E8P%Q*`2*3^*aE>7D-qNw4v`)AOqsYQ9AKH zm=j^xEw>eM^C|Nk2dri1+=CbVayxH>IaPV>=Yc{Js`135V|-onBnp=Q;c2iz_EY0s ztUVlrBHE65XnLx5`CKQH5 z&F4LpMIS>((}kF0(kJnXLUA8)Ic;;KrYG3klKS6ITlvwIQ;#S+&AfiwfF#0Bz{+J%l3u=hq~<@1_arINHqZkA3(<{{nb;V&%$GBB9E$08_$P_YUP!YAVd`L zulfR_=b@JpmnCEG7Wy>>yqRN#;~qyKc)!OL>eB6;gsT>Io$@5$vQ`6qcKY_kXamT( z#RS2a+rd74dxYq*Q!Dc9Inm|E5PrhFVC&aZq0=NwbtU%8r%wXVhvQ}rKE{$^AAmFI zjQOZ!EVkIY>aHK)MEjwYo1l4QfQ$d@S;Ax*O(a1t{I2cn;{{FaEcMW*2Ro^>w5Dq8 zzH6Xt?z*Alrca-!LNm9_C$8|5=k-qv-(F};(OtODsl?x3uHb*o}RHq^0XbP1k3NEiv z@NzP8SZ2%oJOMUPM~~taZp)MtT@Sr1AQ|CKncpcEgfK28eF=vwSi66!ssSw0HAs-s zqd}?;TE0XOOvF!lWbjp&RKM^Guqo($Ci;T}K;J;~_e>0$=oar=cwye>u?(rUTpHHB zBOVuR#T>Z`F6{%^rqw66A$vJ0na8E%)f_)$sEEWc?6TneYUQC`^O+BXDYVHb6*9`D zO>?LSwUp^-y&-_Da_}#=sR4dIEM_`XHG1p6(PD`I3`s&!q*+iZT+bFtG0O(;(2y+5 zx=4$uIb~`I?OKR~jFRw?xIPRD?*<>$sz{AVxjO5I`dL_lyTBZUXFg#MqZ~c5Zfbon z>kgju!$eLCNFF6XZlH@|t-9Dj@tam5T?F;)QWr(lV(%Ex;BJrh3<4Cj6Z@fUQLrB{ zc!u(|P^~0ccQCT{WQ862!Gf#`4#=I{2*xd#tbRp}P6^Zx)+kv4RRf#1?XiULk7f2> z{slIwYLLZiHYt+}Re#WUYOtsb>)XJos{SrrYp_hfl2^((y^O7k{Baz_;qP zc33{MVi2z2Kiz4^lAa|Y0 zL2<(QmjbtHr9%v0C+O^Saw-iyjXWqTzDiIAbPemy+0E#Upe4gi zSV%ZBnlQTG&>K6a^mNO^_c7c@x5gD~FYfFz0jrPm9SQ29>8v6$?;^F1A8AY7a=d0Y z#cfz>5W6c?-FYq?jB_VJ-(;BZ{L=MCD6ZxJyngb{A%0}~tT8f3TuZ7_U1**(O88W| z$z;7D{M$2Q4rh_qUZAmUj~6C|7Qb_ySc>KFf13f{_(|U8L;5)W%Y%5K{KwRgfPglq zwyGFFU9lya zFaxJ+8=BOoP?6M{;_4XoHS(5ik=l?KKs{zZTYGM~tEKSHos!c2l;}VkDDy&EEvkz+ z80k_-ks-Xbb?W}-@7~srmU0vMkuoiQAWguohO~Sx+h`e3jy3t($ciXqBer(2^4G@2aJ;w%n(qZvJiL&0F>!z#dFW z3ruj&H_THT#hxEeCZ+U2gwIssP#5m|WJ~k1Hf1jSA=6NRsF;L5&@m zBU9qy!pC_lwp@usYqmVMB&$X*$rpViVP!V-VdW66i;0N_)4;A8(AtdO{CHde_^Fv) zb9o?~2l%Mx-a9Po`eFd!b6#LL002E{xSKF^&JM>aAA+ORwD4Kuti37DEM-_Z1ERrU zC>P#^zvbG!pC3l4=_Ua7zR!nKUxi zvk;2+;mnj)4R!ol8XtZSdj%riz@%o5o9|Sc?hiiE-X=P|lvJieDMj%H%3Vq^sQb%ra z;oi0ue57T3(1O1aZD@W05TtO!?4MwvW0i{(v0%`!yL0oG`>=p&xL zlm1GHp%J;7nHmg%`upp_kD9Rx7^(-I|%k=%;*bDzN>q z{rP&W;T1`|%XXPgBYZ90$qU%%6lAevQ9CC02xaK0ds0w@m42Ogezz|qX8voPYM|q& zSq{%f-r=)fuPWgOoPU%vRck$N{waxR+P#;5PXTpeXH|-8{!=A^VXUVil&0kr{hQ=e zvna8dd>;RYu59#~)tr;E8)FEWb3(|vuy zO1n6SnD{U>l%jFcadclz;+uq}|NfZ2T1g|uHPkIE+5=jiL6Wf_O9w&z$o9oX~O8);b{}PBJqspmH9ERed%Oq&LwpzV+1Xv;^P1KwuvYCrI_LIp_ zU&c#GPmtL;so+&YfqVYIC0t1R4!P?Z;EL#I?U1ndk*5Y{Sv@Gyq_G-YHBqOAMH!EV zAx<2l0ni3cTW2|)@0VMZ$0OZ|*|Z$-N9p7elA7Yv2A%8PpLeB%pNc@EL{ykzy1J3_ zOpumLDz`CW!8nofheFY^H_$?KO_cck`$Ljm{_qtp`hVPrmeVcvh*X#}Y2{D3Cs1~C|{V>Jl|G*`#D?>}nr>iR+Mqs~n$ie2|5vhebbWB_IO$TIPg;w?j zUCnd(b5<=y=bzpKI{fN>b$wS^&r|$;$S^Vv%R_`q+5YUEcMJxXQA(=fGFB$DNCKwf+{c z zYd1=;@mR8O2qEdLW_5<4U})Ev!D@p8s^}AwHOP+UQV&sMoM2z$HI;D$zKhy?cVzf~ zd>hrFBg~1tc|_q-wqD*$WxjguaMUT7-2+`?*1|cOzNifNmgRWiv*h|k^u$$TjqQPK ztb+>Z(%486WAZkmTGk@92|4NG?D2FfYQ3Nu=dP|&M_0%+rbHpu!@|8U2i%|l@EExt z#F33O3Ol(5G6w;v9j4r*lmk{Bv~0^*nysmE{%7AGDOM<){J|SG)R-b9+`TGicKYHs zyy`KRZ0Z5HtdJ@JYzML4&e_Yl=^zmNfkzpP`OZQdhlSC5qI5?OtEXok$gZvsg%;I` zI}CML@s;v_6YaS*$2_gioGD{~CYognr#@gxPuc!ft!>Ey2!tGgA?i>v01t^muPjbWOn zGKgx6avrLl#tlyN7`zaiW&yqc-OTRDxmrTGwW~@1( zO}Pz!uQ|yUbU$oWMd5v&F_9IAj7a~^FKMmRx0e0uT=UyxBL>1nr#^P-`R>rZo!`tl zWsNepcV^%}6t4i@t2AYKRf`}#0e70vs9n90UNTEE3)2kPj@T@Wm3Cg4Y*~>ory|xC zw6<2_-yWouokjn=tZfhgAw7ZR8jNXDT<5(o5y;q4I!=f(%&mCM}=s z*;jeTfIY0SqvNwlix}LP(t&1SImai4=b?YHc!;w{#5Y29)mWJT8noQddrNS4g&&B_ zBJ%z&S*(k(%xx$%H(BV|9>$@E4Rt2Csu5{oha}<914K$pOj4Gb>>QhnYH99Jpp!mg z=F3y0GCUh|7(T*nd8AmH7^J8~G%r%3oy^UW%he5h?kuWP&Lu9;)x)L)&c}IU4RY4H z({iZ2+C_i!ae*?6NZ5fAB8d5Vv69t2RC@MkLoHK zi5^9!*!++XpvHX!ORvHwf+rvWG1g^f4n@$YHTEWUwv#fNBy8Ia#4l@L-kLtw&MLNI8m4G_#)(la=-w&)w;q3Y$OVhg$&in6rqNWaq!3qi^7{HD zerHrJBaQ5S7*Y!xX;b`9Uj65lu=$EObQ(g*`+Fx+ofU^`(S?&|pC(hP^#c@egw3Oh zmEio)&nd=l%#$+-l}703-r-&VJA5hpC_ReDYf;VQ2Gt6V&$izW1BChgjoVju@Z4gP zS8BOL`X!U%+@85IGr}>uxNAL#iBDZs6aBaoWg}v|!Xk3)sNG`8K5!<95hFHq=$-`d zt`o2n_CM&vrk1VaJ_piAKu@sJYzIbU_KUqCD)4DOy$)JmzSJxRKpc4%#r~E=nMQg@ zf$+Oabll@0x=ieMP!Ti|?%&b9tBVVlChh2m(rqs;yeFURTeJByudf|SNt(ibLFQbt z$a0m^oEtxedcDeui1cGZjnD2IKb1${-B-FCWlV&i75LnKQ}flyVrY$}nyre0xP&_nFfcp~hkpvxO9rpq37e;i zSfiyRoj7i5?3lDXJ|b>YolDB;{N8b7LyzVAg_%5dH(PTgEwD~p)&$**dWgz-=m34G zgnRZ+({Vezs1Rr@gz@ig?keK_K#9|G$)F*@9PbRornVP`-SoM^n=`p?G0)qehxuWHzqm|Q?n0d0yRxgzFpNPh>A-kI1 z{jSYoR-h)|tvN3)f|$1f3KI_B_pLc2mJ7MxyqD8f^Mu?nK7r1jhKDAb_m=#Utjew! z>)kuJ%@mJT0JiPLO6L3@$3QW(!;#J28^S(JD?Sh;fWeVCTcM*(3qoN$yMAZtW-kYx z@QPnaflKp|ntaXU^=SsJP2RNtgN<~dAe369^c0@A5fOv$_gI#7fxYrj1D66Q+QXls z;7~sf(*$V!^~y*ynz$yL#2q-7q$6jkl2+m}vj#qjwtprHD0F+;lUE)t!fu0+1<<7F za)Nb209>{Tc3Bqtr;{Wyzcyy`@rPh$=ZHVE8%ui-a_JM62y_%v=V<7_2ouLqMF~vQ zK~7dTGpS*T37_e%V^u3$k@O8|ka{C6WKu)$iErE9Wijb{H<&e~2}LrAY2)09gUzWz zjC{pFQY-}fLHt%U)!*eGlWY{LF}4$+@S-)`0H|={@WiEGLqW=^SkhWIDO%0TCly2D z(FGk(9E<^ZNL1C4b4by!UCJoL2&LP7q(CGmLd_-&H7jF-D{*8kJuN74N@Lcv|BOcm z%S+9te?nkyMK%`$k{`>~RD8dRiy<}vT7(WC3|+WF6Uvym>HfwQXDuQP?^%kSLlUaxaJci?bz+&wV^Q9G zkwFv$BC$4;gHZCXQa$T|{G&fkeKhQ=l#*Un7&U1WYN1G2ur5<}e#ajW?G-NhOb{1y zxG;^>9Q%{*gX;&K^Y#=MGB{vxl~wTJfLcUC#<=d0@1YS{mjUXre|`cjw45kbiLCbW z^#Xx<_(5-B2Nb}Fz#vERJ)&wk=H;P!vUgu{Z4#5ZfLi2GzZWfS?+Pdb{!F$Z5eXIX9CJ9{B1t|Tx zjs;Uf2>94Dxl}rZcIz*eb}HFP=W&hYNhS@#B*HcnFTh==IyD)pg=Z2; z3Z7GE=cfy`QcvG2qwnSfVOs%D0_3Ep@n?U&Psa>fdByo|2_F|v2e;7s!8hzCNWQ0o zJA!tXXgiHHSZ08y(s^k&?Ai|ya8s-w-Iq7)qO3<}gF9}#2YQg< z5w10LJ9;`lJx-U)1_&%Av0{m3K2ip3! z8$T8BbuAP@3_J$st!MM{Y@9h-JIk^K+Ax8dq_i0NgqBO@(~6=(v#_v>PfUx$Rx~KW zv%x7rw(E5=t3uJHr$e@zrK2VWnd0-ACF4e?AqypbcnJW8(%-h>M)J?lq;%IPJju>O zt%3H;MKvdR)WdOcS(HE~;EMOwiR;d9CVKyCR=Ou6#)Z=SRj%>&8D2ZS0ruzGLsDp*l>X60A;vw1EZ+yldDnkrkK%bSF)`u<68E$~gT^vP8b^ zBA9wI_rx)v)NE^EzPT`)vev-ab7|tb)#ugfaX!)@ryx*9XZtT)zgwHrd>>8Dt8l{0 zwpHqd6eu3j?|d~55o|4+NuA7euogD-pTi8w0C-4SUSu<|UuZg}s4yNWz~P9rnb}65 z-STAIskPFe)(R>?Lth=n7sH@0-(!Kf<#257Ui9d`xExGP+((TWT|WcGk4BkLw10a2 zhf<;aXiN~QRXlIZfMw$>149nO1)EEe-5BS7h%!;2;v^C$C0v7Ld4@1vS>pFk;yVOw zhznmk=YwraLf07FmyG}}9l-yUtixPi7`y)Q!E6QwzP14aM00@DghFvaR zV@Y_I96I$=8WaolpHJNi_Lu~IqTOP!875n50iJ6rl?!hs5}eqoRO1}ZkJs*yLuBoo3IoQG1k!Kak?2W z;Z#;`x*RyHl_UOWBW-;+RpuQl(YztEdp|7d$g$RBuCQ>^<>0s}>N`Mk@#kdFaw#eA zahiq5R8&JB00CKZJtUnJ>THA;V5e4L56Xy8I432j$M_(+vs&-KU+MZ##o+@-H)6bB z6DInTCXyYP|2`TW74bzEu?b_)5hjVx$QN8*2Tyf*#rxl$^3M_Yzr;5pEjq2B^*Xze z=5;NhW_O{e<`dDNYDH{PRc6bm#TR2R<2g28Akygy2Idh!C04`%zs^tWp3YcttpYv_Eg$= zsioIS)aT*OQ8ctN14Xlu4!F}Y7BA#Ftih8B1^dfuQ^Imi6jTh`cJDp2G7sNoM{fW& zvZYwTr3H9;?V<54YDz~%cfu}Rme|d>rN(+Z`xG@I^Dv`-Ala(Q47LRtIPtLa5|PVd z%+~BYl!sAKVF5?8Ty*q#8@G#Q;`Sj_mbxWOEKyKXzw%w_^c1cR%KEH?;ZF6btTPF8 zCvS~7CWiOva^Zck8NQy#HFkrcEv5l9d+&{XL#jKjniODOu22|Nq4?pZA4sklvb}sX z?I@U(x~K4yuVX%fxaqsddUc_s1?&tCR$w>JbQ}OlCFtiq z@ONsp=bOM%I({%(=dBT^EkYrSecjWpjvAn-)Md8iL6r|DmaKz9;haNye*p*;?B=&e zAKq=mefpgZpRvXhz9Jg zhcnl6w^x0J8X3o$r^585hWwGIxRNvX8{lnmQ#7GNb53lDPbQ>LwM=EJeK=rRiO`Z} zJVW(x5>8<5eFC(#qnnTz>&e+PS6~1$pR2iL2W|kZJpZeVe3xRR#~I+_v}b#*K_Q#} ze12^qDVradw}W>M^p8jg$e6=`&2@cr^5!MQ48-Zb3TyhML+$aK-}B-v#6Kk1ie4h^ z<1Uwr1+-Ie57-yZg7s7mX$H%h)0FK32s(i-x+wC2?3z;|SQGh9Ff69OI8v6q;e+Mg z5dre8?&4u5koA=yse}M4|6b6Bj%ER-gw+QQHFcGxc#VvS*za*vyQ7nKy`FQaj2H)Z zZ$^8&i#aRXw%_>ATES3;o55CGPGj)ml%|lJdmrSTS0j)b6JdwS+ztajd0;`UqCDp4dL}0t%x@DlNPtm#Y0ux`3)!nw>-G_Bk z?>);qg#TMW(2{pt5JKwuarCDA5h9^bm=DieE~u_hNe211-y2)qprEOLt#j_4a98^R z`@zz=2ZX{4(A{3%6GGr##ktN*=XA87RF*bquJ|ibIbbP2r514V1sP^0@0xT%rszMj zBHhi2^|Dkmh>k~5p_Rsg%SWlNXppr=7!)HNU^OV`$q1H#gQiuCSsiL}VZ3S+9IMSfP@CNB*IsW62FnMxn&Gr6 zr61}6Ac5^Ojv#>!S{z$G5FU2%SD@^8kjiUD*!(iG%OLx9TCooHL^-p024A-@gk4ez zBCpHseh)3^R&oe1Rl|%43dpBR{VSjdrR-_wM|jU(yB^_`YqMYdcUO1;=ps&&Id{Ox zlZKPD!sNu);1^jgevAZ~MWhgN)<*M2YNWV2fZ~@PinCC>n}5*20D#4euWTPPf%w znADC!fi;MAg|_?@tTK!Xvy5j^$%NaW&zj9d z=ib$I#wsT~rJ7=B^yL=;j0fTh%3irSpaT&ZgH2l1CA|r@mIFn<_u(AX-_$6;qRsT{ zMOn$#PrB6=q%@za6$B+O`Zb!o;=;kTAlcl*3Qm`7cXHri7o zZY$;Ke2PER~%?i^LNkFOh%I$#fjriHi#aK&wV zwUsuQC9d+<(Yt%4)Zb1600iFPrU1{U+!nRFTh#H-r*;Q1&nRVj_FRDtoOcN8|GEJ` zGW(xd%TBFOipFJh5{i_uEosw2PRW(~y6Q2wC_Nr;1Abn{(~f0o1>?PYTe`5mHoHv}6$WHBR8T6iz)CB#x3un*?^=Sl0Em+Z>pMgs*s>oL4 zhuDSE+shFQLV;w*&=w{~AdnAv#!Y>H;TEM+`vaG)QVe!%Rk%h!SRlehRDDHbQ!2VJ z&3VpKXxPJYau0!vMYfqzx^)ZO5$RQ^CxN2VFMN41sM9&sJ>UUU=fj5A-O^jrHEn7c zfd6a*qkw&fEfd9fMPCj*in>B-EiBlv2yw0-E*K$>mYT4QNAaa=j=dCOf%aVq5j_fL zFiY@=3yf2o7T`JPL+-*KbsPQ@fpTLiw`Q+QZ$zSpkWVejqP5c}q;Q=cwrA2p^$lra z0;;`AXx*2YN_z*`G(1A1u( zS2J{!puc2nw8<36%}(sg!}@)nXrl9=DD?_M^N)#I+>n#(aKiBBz2DI-^I&d9V-0cR z=0WelqO*R(Hw~?Q>fysGs6l&pb4*_m%oN8|g$^XO%{LBEI}j~Q9cN(zMz&ll!45A= zf!XVMl3i^*Aa;+{xk9}#cmQ`gi*R`IF!2MHxDAS$>+tW2h@#8!5XYdVb%E0`^ECO4 zWpuKlsN?IL6EBJXi*(>%09(=P(AwQ4Hj@3YipUsZQ<16#an4e(#Y&K3r-bva@BxAxyQ+kC`3_W7 zW~FaH3=w>BR9!`fNJt=(h4>t!S)cob49(FAzu*v6Ue{0PAMCCp zTB1Ye(b&?YbLndlJtL#+7}{&8XN=2f{2fH&cu5YJ$q8DrS%%rPo>;%fXr6D=D<`+X zFJY(d5gl?a4u&ETeehc$=E;?V96E7hnd~EeV=m0w4#kfep3;&~?jC;K6VWOmZ}J-w zyYVXxVe0WG$Xd`(b!zj3_$3mZ#J^$Wxy~&JMhs-BQwOuVx^MY`b6N=ZUHrZi<@C`b zIHQjn{QZC0HRXWHjMV*1U6A~vU9t)CnP90G>Y#XJ$S9C;g)>`DL=kw)I5V8AJ0D() zd4H;`6%ynV5Puu8v8%q;rP68qv>%uMBTD4N?Xc3I<4c=K>uPywq>tgaN~7B>wS^!g zVKp|y@9q+MdfjIjLkPOMr|c2_QPOTHSQI_ID_v`ax02A%eB-!O2x3Jv^?t#|4E%e+~0?plXeqvf;S^SWgl2~*-&{Y?t2;4bI+8fp;)7|kIN*pYiuv%TahUcThp)D5b#M8nwdUQrVup^1b z?#3Vx;2TDXye~ZfGl%Joen?nDU=iK%c*SDIXoFf(1;ZLOBpr^#Em>-6l@zNS(hM&OC9e(B{ul^elf=%xX4; zWMH8aV(7~;pyQc|ZLIa_q}ZB8Xp$@C8>K~#)mFDV6np#t?QdW6%=NNH*iaeNWa57r zTzs9EK411Co~IpmL*jMm?sVxd5j;=h=0YQ2(a5w!xMV=>#(ODlbb|;yk>E8Gnd~-I zCfl4#h3FoLf(n>QQ%m_KQBm%CbqWR?%fZZ51F_7%bkP>9?I;g|$Ra_SBb|hl2Ek## z`d~0fBwT_3s4r#Mn9op5(j?jOnXfoPmBQp12W-kvYK6NK;K9N}tb?R({E~Evzw&^M zLdgnZY#tA!WnI8FqJ|dWK_+KI^9XAlkjrkLjxbCzEYL25+Q7ne(&*XjxM>s~ScHP< z!7WhS6pg8|23BHeIjre9=PLguP~|^^ZJ|)KdF9>!u-~N6h10^Skr@VRt?7eSR4e}Z z{p+o97z5?-WKIK&U}x%nO+7x=l48Eg&U8XnGkM7(%?=?*mzWIpZ9~dftTV5>QucO6 zLfxRNFbh}mmvY1Za+8C2Bb*eBo2_fX(A@|fUT55DJ z&?0XEoVv&;u{xAu^aq>pmKaN66(x-RI^tx153G12WsNkWE_0o_1I*(VM7xMeL23c> zxtZ1)nI@o}8aTm8hlPo-I`M^qb*3G{(9HIS@-0Ad0oRPhfU)UVSy)jfaDUVW0<#Qb z&*dx^hB-x|6JXpT4-$g^ol@yT+ezdT6}PGe1j|5cC{bWRPLL@y9I9BnLLfXu%uThW z9~Rp02QoX<=YXe-Rm0F(R?5X1q?^aIlm;KCSHCXcKg@J)TRYA+^QL@HaS&5!*8*nR z-llso1Y1JhDdl5A>Q6II)B@o%1DAA;w7+t7^}z+q*tkQPshd6wzTF6yzP2e<98*L9 zH6@IGaLKIS8MH9PzJ0E$!V^_O38i3RY+)#+6Ab+*_BS~!QdTNH14KDO^`S~N>ha22 z>0mm-MAF|xf0F)GDixEyWY|O-WlQUlxp0B2{A(0Qtq9~|UCNkXK{k(H#Iq2KpSvTn zwnpPp(#a56rW>m$Hp=ZWP;9ZB36cc@EShS^YbpM6U%z@<$G`5}sigzX*ut*`lvRy5 z(eTqne`A(*OW=w0#9y0UWfsQnf>jcAo6hF5HBVGR?YoMMoX05L#9aH7RAVny^DG3i zI;nGw&Lv8u?0C>56dJUUdb}7s?l+bMWLVt`Vr1nTsf~*Dwp7n6dcm7w~cO^reD#U=agvCA|1#~?QH=hIa8-0FIUu>7Q?EL)om4_uZyax1~{5|SrIT*7U_)(aZ zP^pKPHTt-RIK^0;h!%!52?)J;Hs4(?j4%3g!k2nM<@Q5`@ap zOk&=yK@4ZQFJxnAlEk&N;X0-k<%et9n=O?)|Q{o~7O0geAeAb+aM}#E0bj zP5-i^@_OqVkMG~A9gLbC&=#-LfNVZ%Ua5mP=u~gjEH~3s-Zll!L77lWn?A8l%OmMH zM>(TD3sBZ4s)445U6)66{9}?XPcwEkF`#AD^oMnz!?u)mkAFfr0ygU!*|lB%JJZ~y zM1I(up^iJAFK833PwR=GurER>;9`}Jwj5uQ_+|#5^M)g*u|*SzKpuWi%YkjK-;}n9 z-9MHlGl%CM3-1lw->^&G8ZK}08A8##U^V8$2v7`iQ5sEvKg9$=geottUXZ$B^#snA z2*VQIt_Wd@eA$XbFMF5gh{bQ{^yYOLBBfbsF>MtsHchfz<-XgU7YzLH?~{(M89n#* zE?#+{Mq z(Sh`buRgF*j3a~8r4%)qwBgkBSCN63>_56lyKG)(HsQ8wZ(;&tPNyyb62t~=>!>lN zhiDLif3tV!Oni7a;7J4iFRE(Sa;+Pk`0rxdu=8JMYYE?UB?n0dh<23>N@akVC|)UF z%|7?hGl&hIIRxq^(#Vw3`Q_#_v zk^b8y3)-ZK3$N9Shu4Hvc6k6E%5C_j$;$tCTOGFrR-GA5)g1J4alvT*--4f@V&5KK znUy;im~is$_&Yh}HSNIikNfg?OZ=L%ceu8nzNnPZ`6=J=S#EP$JfM>bI&W$kvE-BD zS!Q@+>Xpek3>A8gddFZ)PP%oeZ1UysbD^IAzPhdNT2KeT*|+XS(?_bKVw~GC?F6@M z33KdPCHl4~BdjfBeJ!Fv6<_&YerS6Ao;rSmVBw46|5h`7S8|9K7K0#4qVi$1 zMT7GT*%YURSs$-BMT2nZTR)Rr0=~{^xSIHqH0ki_g~Io0k33y^VshR^wm7TsCvGp; zyY9{lcz-7h80)n8_!-F!S2$B6?*}#(IJDXHh#ifL5EzmM)`}#4&um#VIqkayjPb-Q zrAW}~Ebir_$Ya}Sxc&$ta)opQheR^S?W!e4K~h-7L_yC!2sQQ5vDJg7Np4RG*K=qZ67obVt{8gXd#AMy_mIczK=L?Mm?5+Xdoi^IoR**Lys?G3# z7Xi5!a4A2r{a;vv?N9t647b_n34b>n>iL19fNyr*jCvQ%2s;3&6iN=cTz{YH12*xW zB&HSQgkN%=QaDwSdK603|W$tP8 zAwkOmhL)=l0k-(Od?M9=+JNn_^_ z9gneCjcUT=4*s-}KZ{x;Wg@q7zEb1RoPtQ!wB`J~wRoqe%VO*{@9pV2v4)02WSJQ0 zXJ)_kyd7MPI`%?X7>KW}l@nT7d4|*j#4WM|5Z!t9I%JQFS&3~F8Yu&>1?1sB5~fU7 zJXV^(DB}fdCrJwbqhyXl4M92*Oaf{@)@CLeovkeCq?yP_Zglcex&suVr>IVC#>z@6 z4+j)M*_l)#$7$v)7l5GLZO6N&=lMt$ic!~jk|WJ+Topo@s_x^FI{7WCVPrnqdLqIY z)NL<(=6&r3W~MWSWA=Q863lHjk|NKRc=u;LdK$$AWtEq9)qFWY0nntnb4&JhBy9y; znO-4KLC{L1B+#Uq1}B@)UWf@NgA|~G+2coxDo&t200UlC!rj#rdO{jzp zZBOg+Z4ko=baaY?%gw!j|GjEy|JAWVuODdF~Z@Dmad!X#wjvYJK=8+&=%c`1*SUcr3J ziB7%~X6_gQ8o8{_zAa?|R^E)-c1v3rzRnyMEH!3vE|Y_>)fFsgXHnvZ`={y;JU9vi z<4MD_@8zcgtFjp1D{K>WFJY51>#fu>L6*v&bCX}!h&bt(_A4LP#0ed`1C=TMmjsU) z!vMBu8n8&o>EnY!hJx<=5u7gdHN)<}xr}8=sR1`YTj{>r0M6igr_lFTXKx6B#)ih^ z$ns;o#z$!=pm?82W}m5x%ZW4Rt|OM1PeCohjh0=%k-1I2PRTmiR7= zR6`RmCRK2Qith?wB~LvMz`#ryCe&aZVDyu4iR);aX24x+jsmQV(Vu}M6d}8|VQ>UB zE0nnhGWEd|amsos#+S_b#ld-Hz?^W+p3k_26j!iy=~6*sW~8nI?Iik4s{72OmIaX5 zDs}7xOd}l+V83t#n;mgyky>?C1&^Rrxx@m<+a(am3OGjS=WhBUK!m3?8TLRDC#-Rl zAP!Y?#Zu>RANF(WX@Fhpkun07^)j+_i(Dbl`=22ZGs=+LHu@surg*aLQ80K%b?xAF zW<+^S`O-m?oH<7iTEh3kKz) z*|SxCns;w#Hnn-udG{V&AA-o^nw}^r=tTI9ax677c~qgcN}nhGjb(Yp1-M<~T^}y2 zP>$N~3V2c+JG*qf-a5kuOupb|Aj1G`Kn4rS=mckPp=wy;kyuES(a0*OlFR3rLIP!# zHQFJk{Kwpau2Dv_rCP|4AW-&|)1|r$Ko2`{E2dH<*;ybGFtrqO?imB$4D-a~HB`a^ zcJm3j$ng3o!9Qfe+2Vqasdbh@;s&(htAJF_(C{a47}pe?)z0RGKSWp0A2TPk+3N zb~W1!;Z%e=k8s1vmXk;{X3M}M7;t!LF@)-*zlbra%P#*W{J91=LH(?VAMr&?-Bt1i zwlN|IA16qi<6b*`wXTDQ4j{8-xmuO-V!exbW4gx={6G1H3oZHBpO%8|>slf#HYxwH zmXFJ7`MlHeu%eS35S;=|F{%Xga(!IkFCWRf8dCBC4YjQ5#&Pj4q_BI7; z?creAE1EXi+Uj@){q`V`o=u;W*#{IWKi)+Wv+oE(?suG?|XNr~7! z8#@NW=Su?-$%hdmfJ-;8=1Zb8qgl+$CiUE!%ZdC3R^l#B<5O8WKx{IFc;Nv6_RA;W zD}kpKVm>NA>QZaOY{;aEpGbOfWW>e(`4o8G)*u-5xz0Y_|D^h0NnJeeYZBmU4mc6V z*o}=5{J=|`p2QWzh3a-zd4u=_f4dom`M`%hE9cVD+!V5eljNU^~mSU`O2< zwBn8jT;L7W93mb1O5y6qT*!lh=hG%}jRZt&BT!D0;QemX+zTAEF`yU(lR}Lq2p!D3 zJ_Mzh-3UsB2M*K5;ajw4nEa25ONr?Pqe|}j=Nh_)k1o4qAD-vQhoEdDdgLgamYtGF znu8oTs8lxrTWhmrsJ6}R1PG`tvFc_JJopozH`{(Ur3oo6;|;K#+;dQ81NC`CSly(UpmpvV|d&F*&bTDv*a_Iz@T%~)yV z^HKc0nF4z-xZd7v%KRpp?8Y5}a~U-5odmE$?up#sCa+v4d_wzHDDpS*`^O$z<@aUm zOvApYB52yZOg_~iSDe|FY5%%w5mL`n#rR2;bi_PfSuXr#arZ8lyY&0BLi=}e>5lpZ zq+Ev^u)vqdbpFw&#hk%wPO9Wwy2Slw%_Z>BI*#7`s06VhCf4OK>J`=P80A-nnwvNn zj?JWyfgLwhU{p|D+>{o^w+gbk2xa15H+ER!>RCl`HIq&{bY;ku__s%yPhXk>lw6+H%(;p!uBA@YKcfnKpa6ePzu9Sj$&=on_)Bg-nSCByWJOj z(}ii%sp*n_@N)Sjit%?@mzv>4%J_jfLWGN{y7#xhXx_V~OgRRf`2tC1-E3`m+)R>s zJW8*bPWqiwmsT{zz6+nXA4@gLruldiPhHHka_tf#$>JC2{}6fhKMq>BuN*^RFTh$i zko0|yZRbj#aT_AIM;P#?09R_L_%$7;hsstRqcv(InsmG` z!Mt3#t)lnW`m$0z*<F5Aoku#;>X3ubGN&d`!cis^s_2j$J}NLL2r(8}$00{gGjl%J#wj_!Y1U5m`Xx zo3ATV%j1ch0Rs5>LF+apn>R;<|0o4aN6;S%`+9U!w+JG#3Rsr2}bv zCABJ%A94aXuj|qN_xVPd{a5(CLWhfG_UKDyLeY2s_OQ~itm#uKt3dVxE7(j4HG?tt)PGn&_mDK8v13F!dB{n9)a8>pPs0K?6yQZew z5l5W#6}nM)YPdU@gxf%1qV4l{oL3*q3 z>w7<G=fIFGcXKWRr6d@^g)yAK@AV_^nLugtgk09Z2L zn4Fa~JwOOfz1Ok{%;Fj@edHkC7b%W50<2tu3Xc(OV3W{sX3rikNbh%#xntvB+7hon zbrVuj#aK=fmEbf=zmR}coy=!S-BMs-P#lOS3G{pYpqpWe)7Pp^jsRV@=Id$~3#;A1 z+oZ3-BM$h^ETQC9-T8dC)OjQ6pLxn$2VU@ixcj&fx<09q{9jfeFZ;+K>2dv2N6~?h zHKcRc_TwqcPbw)t_#^fB@N%^XMrmyA+FLlk6eN;tJdjGTR>#<-D=P%ZrVIPkDiOSS1&fgDE!N;jj=;&9^A8ELVlRsJYKx!!S1ar%N68#eji?V`RlN&L-&Al zk_VsEAm{jKoyP0TBxF};TjuNh(+&ia+k6gi94l~aB81?apIn!PAtz)ez3uQz4i6Zb zF}+^N7i9S_|CW1M<4g?2`!x^rydLZRkpeMr%!*4$6z>&NVgGUn9YWH=5)O#XBYQK=|X0c+_ouivy#g_h^MF!y(%VAV-ew&BqnE5J|&6%ROYn{e=rCL zYWTb@bGm4lR&*q%CF3FNkB)w$L!D?mRZxf4s(}z-6c8vFp#EhI<_olE`n}6PQNLM= z25Y0U)}QSf%`#D^1PP>Z62EA71*&?(&=1Xc45oIXC;*NW3S}DxNw5G#Pelg$Zu>Sm z&P`p#7}eaivC!A$pU20U6g3PGUjS&Fi`TEo{SD-T-AoegC)#dr$%kGc&;?~I?;Eap zN8%0&woE6(d7L?3mxc5+v&O0A%upEd-9RCXqvN#VowFJC-Yx$RFR=T(3x~ zRLI$=20Y=sCn;U3Uzw@z+AOKVzr<%*C~9LW+<%{j`v^NJmwu}$R&AVl#M{v$*z<3UPf%4jKEUjgMC1Sl;+m9t&`%gpo&Fau)FkhZ z9oCE?3UEhKnzo&AwofPzs^es`fZh}B%-~iIv3O3`ZB~+A!M3Efbr}!6qLhOhnLk^ zku1y6El1_lPN&9G62o$Hregz@+w!ZeVB@sx+}@(pIYFZ1Ua7mOWk5U==pe_sWChNt3GfaW=FD< z+)2-VPPfe32E&|p^XBgD!ch!sB>K9m9hRDx8G^l+SxBI9NB!e(rnb1BeFTWcYPjHD z7TghAB>L)zJ|+GGX+L{Tgn`16@{g=`6XAvkp}RN@@>a8Ev#coD<^(I3TZ|IHq6#D0 zgXtW@(XmK*g0+vcNu&$uwzi8HT_4q`14tUpDv(eLg4TD6VOO=XSomX`@p|X&1&2O5 zRH{topLzrPu(O1uYi!}VU8n>huSr27)bzbBZoG=HZ?*WMB%Zc`bs}JXCrvs>qnnww z?T(XUcz)dm1=bdJ;viq^EOl0Ql&QtbPmVFZb76K)ED-c4S{bY8bs;@s!r z4>y+xBgvUg#qHe;`|ga3e=o0})XBX9nBTh}eq7LEwNCy3id@@3NBmMmd$ZWYzutmN zlO-C#%P*-x!93t58)MkNsI+vDfLTr&RLH=~>xtpo=Bi8pD$@Y21?8+SawBS9BEnKMMS-wN_c=|9*nnt0KVuxG;3J1h2wo~ zHOwe_h%{SHuAxG0K|89EiJ{NEjoIT9eu}PpP&DL0ESI@9Pdj5f z?5aPnLH3m}kb?{7HhSrEP2o5)u~K&d1g9?=>Dp})j#7;O9zt-EjR|%k2s+@I1-q*s ziOrz@-Wk)D)(%0}DI|Hg<2q45hz(ilxYi=@O9ap7`qR;x-8T0Qu9M@PTi6O-c( zRdhph9io46pc(%5{&(%=(LMcHC;$G&!Ux#RpL^KqkPd=q7%g)?_53mklf=tHD!j0H zt5m?dAZ!<$n$~HB+XZV5KL1t(mQi=FD`EV8MbM`3mcZS3I~f{sFWA68V~Bx0-=J+; zx?jO3K@s1u|M`<1(*AJ*aQtVR6sFSJ1dhks{8dYzcL{MZSjRY72RvxRgo2J7FO|Yr zzis^eTXJ0{QM^cm-JoX(N_v~W%je2V5^!83qGzSE$f%_{gJi*7@e5&4$7F)4Q~KC6 zerA%o;FTnhS~Y%+D(#P^S}L>dXv7%ri=c>hNV1Mtgqenh9!l2cty}OMUZF~Y2{JyP ztl0SVpD5Z1hs@f2pA7}+ji^!g;JP9%2J0NumOo8wWHKkF8)dkM$CA)f8;I=M=K!kD z>3`xH*P3$0DGm@E6?lp>!y=6$8IWut>0sjahv*{a@Jhh3Isp<#UWFa5+chuVqXPX1>6)u>ruX0MK(6rZUDyE7FMq|pO5 zPw52pSBTJjz^Zwv2&09{x4F^J1+9439R(TToii7H^%`71(dHK*jMhrLh17UsG2|re z25%)G=_dUrk8YfNvxMB&jc#EDwkhMnb6G`hsJ&ztx6E@dUFNMDX@JpqM&#%@V&|CF zc9$nOr>nzI-}S3I!SA5rEGz`)nSwzOMbr4ATGm^fM-RtWLx7a=8E0cP^QAIQar5e5 zulv|Ny8vZLmvD$1K}a4O$bKKy%t+cOTq%q7mOQ&^VQ%Zerz z_+lyN2ISa9C+g|;*G3>QT>M*{HRl{VC+HH3URJ_tbqF9|18I|jHyh4pcnv!jjBr& zU~T3m!j|7!9B87tSW#{*ZF6LjDt`TS-#w(};|esT?l!mv{%a_Td<3~ZUe9AG@^0S7 zXQ(XwRozh*$}pJWpLSatRLORM_8j=dwVNZHuL;^oUkz;wJUtSue_guztsQYo1^RH0 z8%$1Lz|_w?kk5i8&-RXrw$TAcQ}thKWh&3V{IIigbGp6#o=lj$<^`h{0Gw5e>l^Dt zGJ?yA337j>3^(RV6@C&K{Dcr3TpdW9LZxZbL{0ugTq#q?1nG9lKaGw5 z<(O?X+y7YJ`}jM4b|#w}a{XmZZjdDK*{OB~IekJEs!Ac#v^?e!S(>Uix;ag(cEIH~ zrO{$is+Uvs+vM7Ip!Oc{GAe%_*Siv}VmG8-Kri#Qivz<`wp~q`Eq)dTUvEcomdfYn zEq5{7B3CG^8?iFccE~57v_?2)W`un#syb0U@6|(`=wr=+nEi)QeEsOcfc|?hMJ)j{ z--*Q-eJagu4{ce|uobns>fE-FrDcHgMv;nVT*cTj)htp=HgoURs5#qYhqmTe{(mr8VMfualPI>!$E8Q^R zpw|}e)sWH@O!cdDz?Hgf$X#N{lfaS5$huaw3|oOZq9XttX#he7Cb#9i?1rUZxg&q~ zchL6g(7J#ly^#@s<=R(ODrs(X*v;|4TWJWh$ffKoOO?N>DL~0INBIO|qL-A2H-}SJ z8|i&zE8s?QXov`5L7|jDC&vGzo}Lob0z(=mRP3W(=HZFd zMP03m+LobeUICQ$in+wxEy&{6Tx@P0e=s+xNdd>pV2Kn{1`SiV`boht|1+_F0 zCDoUweK7|#rHe&$vK<#qaM2{F|7wUd2Qi_^{jKxs$b4SnwUNQm>54b|vD)!h%YXy| z91So)0-0%Eq+(+!`x;@@L+3TXn>~)r%Lpk@#EMuG%Wy?Q1;#s!{K-=^o7^hhU#w91 z16Je;Iv)F>g0u3UQZWJ|94vIeg-05-vO9J%{X7(Wcjb*!TzMWX_7&Jm;Or@r>{Se0tw-BaU`lZA7h&9#J1&`Px1&@HY^XCY z`^Y=Dw4=84K^Ty*X%e@I2inw;sJB^x*AB>R$1g;tZqnyEH$YZjQ$k7oC2j)to%23G ziUprL055TvBCE^ZkP96)`d9)w2bi9Qi<{}KA% zsjF^4RAj{?pZqtZP@!&_$c6~8BWmh5wOs8cJvNdR6)(I#S+m@u+S?fIuNXX^7*jhd zAwiD>S>;yC%<>-j7UlJWHevt|DX`_`Gk57zjLa)gbLat^^eRPcZqm>aQ7#}b>{&Qq z)lTrCFfmwm;UW@5s>nm(>rn3R9vmsJc3Th=kPKJz*FwJJ9NGe2X20Er``IEcs&sgT zl1V0iJyqzgdnFJT`rAT;PO&mHbg6dr;F1tWOK@_Yj?Qv6GQT9_)t&=t*WBn(28lGp zZIxjoMCqP3u!M$ej=D>w#OdVWJ%6k5*wfKuKckhiv9~AwR?1?*Kt17OCSzY#2MR@( z?7L}p&%^=}*K$QB+B-r<@Daa6lWn%S%$7yQrjP*eg1{BgOqPDXJj)h8m?mD8o+0w+O@^wW^%fr31I z&38^QY@bB@axC3!jJCdxd;r5h6S*!%m-GTzv4{E9n)gMu|Jw*~^2HS3vYzHCdelQH zx@zJEMk1h$k{1ZQYR2~o8lJdm_a~^YUD5cv$N(vd+7Nufq_h!lr65>5|BR7Ej5nm$ zPn*m4`nwobjrv8WZe+QqRJ}Zi_o?{bp@CP7xA`iPlrn8hYJON5%9v_r)&OuE$;$u% z?$BWaY=Ek0a5x)aIn+kwfo!TiKwM3ukK0&$n@NU7F17*v)9V%hRttfd%o3`(NomOIh)!e9!N? zmdl(`oTd_Nb5f0jnJr(sj(9C+1g*VZn(ilXlbv$4I;*hA2F@vtlxo!_bT#97GP}FI zZpA~249Fl?W&pz_1kWQsh#;0%0AUD3GEO=#nZ7XwBUy!UaNJ+L4jRV_2?96aYU^Euk}UPrutIE$d(;l+1@!o! z!ZEGMElCpNOX*CNb$tdIaMbqi@0_)9sHBWu&mY)jN6ol}Se_#hi$C+dQmXy=Dk<+d zMF7dkOHQdhE!1Gl;s;b;+lWd2O|dFWMM`FI9tG0O=tH^{8N|}tb)GQ!3vZ!mu?P}F z5SNc8`(+teD8`1eq_UZQ>g^{)yoqE9K8j7K#{6ez<~JSiQ$jX#m`x{bT7EQ@bzTF| zu`2{hX+0_FHa`o`%FEU4OK z{o!4o^g(~tzoi#uYSVaN_A})}s$wiuR-fke;Y{JzeEbRX80B%1Iv zMCw^seh?D1qGA61ZDKD9mctA!GywXmD$@H>12OP$eOwxj5$#cm(C=42P;;1CFTEj> zOqHPExPR%hks}e0poez!TcfC;#ur=!q|J1OZfj^!lLB3i)1NFHn`{_EpB7ryojZU* zcSWbZ!&nBO8zxDQ(^$I+t`1i7a&zGVu>wB)eteUPuhX`}b$>VaM@Ht1QUIGmrb`RZ zozWF}NgL#hcIQlUMk^5+7d*b%Z@Biy+l>LuV-*c{s#1P>C5L*^dn$DK5g72 z=e4X<>4TqBY1?XSDfF;Z9Cu)>W)p%B$UnohBC_+;ORuJ>c zhe>w)QKi5y472cg%NMdT9pGa?!eT;9o$+s9Wv~0ZB}12{HPab15(A=^7rvh>n_KIf zhP=`MXHU)}0D@z-eHjwKouUoaf0c#V$H(6zj%ZC3nPU&%e*GD;WKfk2 zx2tTglKNMk`SjcuXYDQ=0>r5|w2;*?7S?SCk+2ZhCf(Nb9u*ilBN+78EHaUhKi5m; zy)bZQFBnDFP9Jie!}?qv2=D2_%%`UEjO6OGSpf@HtJVc*SU1q!b609PSR*=)qTmv8c zTy8ePVwgNDR|Sv~EPW+`_&!iN?7B+g6PoF&Va``Q&XjD8$IkguD{WAi5Lrfz_-?>1 zKM0F*g}MOt%airsB~Q;(Zku)(PQhfBaH{hsVCI&zTjTr*&HwaT|9zdn z9kz%<&&jl1i`*f*zbwPmHMT26w?&(kpTp7Cd1(@N{@3S6Y~EkEoG-O$H+|)Pb^SN* zYi{tR8AcIycTs@{f*57p>NI^WN7hD1#2p<Rgd`ISlBxG+9Bc8P(GCFS~PQvNs-KngimvSSh+k zEUN+2^Gx2LqZoUTso_NgoP5vDYT0f|a^PAKauqaA)=_kRDVS>dAb}eLz!;+x(C@M$ zHn%xUdNybA3@;g)G9Q18898_^r57pn065Gs(O^q?Jt)<~!A+R0VS z;K&6~IbC6=K;n|OW#+)d6+rAJ^NPJgo@1iC(&{4}&+4OZYLMak+6?y>(yfRu0K8CU z%0@rH&vhlBWo+1}$mmxdXCnORhVOJvyOa69lcKqtKCcUiIf~z%Co$_*d$1iD=_Z1V zdK9S{+f7K4R*dvsalV-z1Zm#U5`^VP*uM_PQ%Ec)y_-eu1S@l`Qd_5o-jBzzvcZ}$ zJ{;>)?gUVTyjAdGf({Y5;5~gh0VUYayTw(w+Fbr4--SRV#|akCxE1&7i1h$iZ`h2P zHtLUzbrI@me{&<=B=^B=rKbyctAO3<{GR{2+p?27w(QCoz|`gBp`Ihm%b5DNS}LowV@Ywkmp4dJ7jzWvq3cDTMheW|>CK1bN7z z<+s5i_h^Yykq-Dd(*#=jHkhQue(=^zu#Qo(#34{W&?+3R3#t?HQ1t7eUQuv1(S*72 z19XDcJ^;@zgbnBSYoOof6B_7jF7AT;kfoU zLD`*^66GW!i9ejgV`Q8RH+y!}#(08)sEhT9tO$Mb?&s|)=gG1O4_Nv6E{B!{Iq=Ba zv@_>;XwCr6Zx3Z8K#QQmu12%|lipPa$NX7IE+B?i`Wq6L-g>Kj-NqQ{TTQ?(QeM$o z2Hp9I$rgHX!m?L(E=g{qe`gvIRliz>u;>c5BgHcDwLccPGl?7 zT~v2sy*`y(h0)tHF$v;=^BvjBl`5%Oj}+qqOBasVtb zoe@!^UV^@yhmv-q8s!}8DM*n_Tyz?0ILCxI>rs3Zz#vuk!zq}aC1XuvZbfq-I(spA z8i+q^Z+~dzv~JF80?NhUG#vZIpX!1Z<}XpdbUsrh3wF2VR4d|;ki{B1$9y)IlaM!0 z5I~hJi4 zxDA^47aF03n1gafG`X|gRT++lGCupy``CI&Z@be!I3aqru>n6e_Th{GaS0G=LpL07$^b zi16~X!1*tfq3cuz=S{aqUGtUo4Gze&M3bPwc9amf%K%Sz_BO1jS;@ggB!%un1X0Jm z$8BI>50qB)y*f~$QrqAlkL25Nfvl4}dp2K~wpPXbE|fcA#CdB`)e%BF1)Pk;CpK$| zJk;iNk=O1vQtGXVDIQqgZvYs@zgMR?GN|r&Xwnsjq4RK+l$= zh!6B`3kZLjqo-y&1E%9&o2AQ!>v)~njes08> zo^g76%j6>o4*4910<>Frt=oo%>a~tmq{JHbe!K)Ms=m7SvJ( z7e*3_T-{FKrES^03B(+6O|-TpORmoe*fo1Tovsn3O-(tQvC4u1w5%&sEa1C*ARTv6BNmL4XR3XRRxFNPn9@+O?yLnhB7tFg4tSC(fl0OC z%IM`FCl9}V0l1KSb$Tui|BatFF5XgvWO*Q@E23DT9bt*Lj34LX)$V&HpJQ=yZ^;(~ zd02_?L`v&R+_v~~kh$5GUb4OMy*bM(-KMRFH^LA(TewSlcV%y0ilr!}2xcJ&^>qInt=%$Kalp;L4yKx5um1OArA? zT0(LyZku|I6pC(xhe_ky4b=Lb-h;A)Ip>%R{kSAUHB7U8m~Rk&o_O(7*Eyw_sVllX z=C+2lXV*n3LQ3u=e?QndX;j zCe1|t(Lw&}y@S~|fl8TzZxm8Jl|a??Kh8g^Eps}o&I?5mR56^*Rb`heRFgcCb44k- z(K9$Vr$THW5mtP>VA1jeA1wp!p-k*&8+THTR>8R?Lo=DwtF1Q1KUuyc z=tm;FZ`7YEx@KafuA?H}Gwip_8In`;Nu-u%r14Vk@x{WMC_vSx$f{~M&sHInp9_{vHs73pGr#tO>?PB_(Ou}f(8VS`TOp*Js6RCLBxFfQDO{6RtyBPnXkSaY& z!9k%$aS_&^yWV>Uh78>t;;ax?s9r!l>qSfb8Xs=(*e8qHNEB_S-u%ZgG?8;wgq+IH z(EeBMCe~b(?wdDg65B>a7Yacar)*;xTbiXmi&5F(Z;^_U97Y|PVEAq(*oMV8z|U6C zu>Y#Ne*UhWeYC@FFJh#mPjta~@L}@r;i^q#cd%~^L5^jdY*1i~Y)59`UYj=$r5UGc zwoc2t*V6s%pS6YIaVNH(Lhmo9537g!m30CsK~AaMr@kvHEf3mdX&*eGbDgtCD&y^< zx@RMP->5o)-&k6^%3;?K9-La=0LcGYg#Smcd#)u?tD5xJia27EUC?X zg{d7Uuqm_O55wQZLJemQqPBa_Ld4DP)}hisQ%aW$=@Y>Zy^z>1j4AxKDEM1h2a@_S zOtu#C%wLyyVhz-t2W-Mc2QOBYMWPxodNH}`de@&0_{7LOAAEJ-J@4nPQoU~RJ5f=h z8Pm3aU_4n+g#sX`cjx1J#Puc5dLoMdN;vy7)xQKLG884W?@glE!;-mRlcStphp7O)0@U&SXmq7Hs_V~B#K z`hI^oV|IcP5I=6-Zu;)V5>*j{yVpW%C619K1dn&{avxSP^qyTfKJxGl-xB^9qeKVd z?(bC|CbZqf-BfxaF>>F}Ro;#ejYwWqVBD<9J{?56Huc)#lb`t*N=VepCn!rh4vtWE zM}JvJ=XwDU-3#yNvE7SgXAtAC>PzlPE&Z?MSOm_#M^BQ6JPlq!9%D2G^;|BNW2pvP zZ9jspFX4HsP~>1Bw}0FBG_xKVQ5^!;uU&oM6aOpPkGuW_j>i2#yu+xZkylNCE|*$|RqX?1YC)He_O7Wy0XlfdHi* zr2w09LUbIUjIQNf1?zmY$qC1v9C#hrPiEQC`Cg;(kF5fV^bK!mtLI`tlPfP(it9UaMYkQ1;BfY9>ee|hDgjN4v zk`8_VEaL#zf!)4CXx;CJj}MZoCDU{?Rh@HDG$$|!0}qj}aU)TK2^sUbaZf|R^Mvx8 z0~R;q;oz{M2dj3@#h$?C;2C0Moua3S(UQb;j)?H&<_-CDfk{{Ss@1BT;6?m0?^J&% zCfxF!Ls)E{E;&FiZhklevFRj!{?KR5Lt_m9*iGsRz7U2Y$?;TySB(4zy}^(vO(Xs~ z>iS7e+fdVVc3dyIF5uFApBZQlzRol=4kA@doJ;g7Q-_FtgHM43WjdJNZv~m44DWH} zJ`7LG-PE-`B;Jr4rTTzQQ`-6ISmZXYLA#b{n=6KXhoP4365ShCtd@xRoMmKW8~6Z- z^)6WfIsF6^q|MoJvQY1;_dB1W&DvkE)t^Jjh!8JPKwB%Ag1D0~;XGKHkoG19JGB2$ zp-E_DCt#*agocro?irjm#-6`$EOU>h%N5PCnWpM398URk)wRx0F7>I=TFiF$*JCwZ zt)~_u0&Eq9;~(=rg5&tI6G)=lmg1EL1Y-$ryp3JUYy6 z&)pfvJAw5r^O`km%l((swXUsE_e-Mx;l>!Ty*D{7AtuECE)4m+eGAoH52sVi9Qy9q zHs#=b&+i;{)GGX&vG(o6)@>R@5|7>q?p)izaVhE<9uZ#hbWle~)u}GG=1B`sQ3-k6 zUVBdm+3NB=@b_t)E(m5Q7;lN=Ykc&Bh7p6HsBA_cIco&0||tcn<^M1lkvShP37k?z~@YeAXZer{t9#nw%4$dbD3H$h4MW zwbD1C{uTW1G~`J8fAimWO(d8MNn33L$AoRNP6!aROY${Q-KMY-C@X8T`uv}8=8DG; z3Z1ox&IeUC9PcE7V8vGUsEd1dYOXFpGqJKL!;oBMs`eDIw-mqwnj+(H=}wk}22=2Q zk3wib{$Mj>Lg8`@W0c_7$E16zV==W9VNK5Y(vPr+L<0#*V-Nj*csj@SOrmvL$2L3a z*tTukwr##0+qP|+9oy{Kwso@h+Gl^MA5d4#Sr5jzhvh>3JQkfwqt!r9Xx7TS=8??D!Shz6{KH+O(!d{{T#h2lQy~4g!A=d`7H|uIf4mw;}&k zZA+C9z%I_ylam_mU}dgbl-49=vfwWYtGzbqOICGIk_|A;irJ9t(dmz4D6noT7O~OjjtZfb<;Dpp6$)V8Nc?BTFt){I7_7W}NVTs1y?O-igizEVGfPCr=!UK9>vH%5 z>LV)9ZY=n6fHhlyikV)mcPFpyD& z*|DZk!GDT+_{E`Hs&hU(amz4+4IJq8!8=cAMe`PNo@WHrziS7dQ?Eova*%Osrhdz9 z=S+_C%Jgu@KT5tE@1dx)Y%cG?{cbVHy93d8q$rd)Bt!wh&}c%Ysnl_Qw2z`Jw!LhPRI*La^siH@d}Md>yd}eV~40 z5={u`icc@5IoF`f*$LKEGc6AWJilLoVtLewX^b zgM+^Pl4Ot&YU;w*;%biROAUYq?C!Jwr4E7`g0jN`&Jp=1Nbk6e-6ndlS@?$Z`XRaM2k9J zrw*$Fv3hp`hUF(~yCBL5@PXHv;qv~0o&QFl%-7@N#MqrRZN@yF1aLPssq&WttA?lo zc^=+q0yTa&{dks6;i&!-NGip1PV~hcI&q zZv-FgE9MlO6b?V#I>K2L@0jRZsy%P)Ij9yZKYTTv8D^?HzW(I&r@;v->~gnk<5djp z%A3gMdH~{z*^^@5*Z(MaE;O`%hE8aHKRpdv(+QCDk5Mga5u=U|?w5iKB2G(a3&pR< zm@t4sk(5$T-Z@~K&~#1ir?)<`8LGn+kN2?K=Hxjh7S#-5$p%;dTRC?4V#R?S5=iiB zh)tb^Ikxyw!1-5dDnK>vA!F;_Gw!1eqw6#iDxpk~OyZ^ER2VrA%^G)0^0wN%kiheB zHYGBt&Ma7R8yVio;{)Z(_p(m-*I4)G?@qu+iMDZ|vPVu}%ROBWtJ%8-k(jI8N3ZdIgtD$(4)P5P#jsynNa2&*6b{6wfpx6y(F=AdoBL1YY8fcel&~}!sd1ePqyBc?+YLHz(>wpGam?ig1}Xb);ebvWyrw7+82f!@tm#2#uV ze<2^fh*iwkR*5`;L`z)r?e<<`&Z@eikBDNS3$3YyDR|*Nse4TK*z|J5Q|Ew~ysY)b zOnjf7jM~Iwr}KjxgAo&g{EY@8@Zx%v8-#izl%~fJdO}~)L10v&49jSmid_LNB;&VVHS(UZDmn`lf6MPhp2c~jvgmC=-T2g{Y@ayV-mwmDnfpfln_~- zRrqyE{6|Bv?5IMnil05%cEK2cz-}IMEvcDD6~DbWvVYV)SVc1Kj6LbI?4A{j(NyZX zd*6TP;K7@t6E&4*hf57pGJdRdvZC6xp!WQYo?X_^bx_a_Ho!Hgpao zpKlL6%UXM1K^`|WnGLY*Ds)1zaG&8UxqcTd*T&OrT2jQc-KG~1fN~a8zRsh=L&8I~ z;yXhdn-M6hBkEIvVluVRK*YpGSEBLnDe`?UCK3tZ&qoKRA_+&afmSM5@tF27L<--6 zmh17Uj>SnUl?a#GnxzC3w(cq3gt5-n2lSy3*sF-2OqHl(pDp>oS@3OMU|tjq8n}B; zj@=G3bRkI)*11a_1qp%4yYioXlFbIxAjd87j%oXsQD|DVkw^qodzLqiwvzQD(B-M8 zX?SDk3VT!cK)+DuB>*x}wo1)fX}hG{kbju7@~lR=Ia~g8BZYvm$E6_;76+$RHA0Ck z*bzRfR43a8SI~GmO+a++r zE>t|0My>Hi-ZAcVHR+udwT3>RA%=|>E}!kK!qy$1DuN&~_7A*fp|l72J>!-^%#6%0 zBK2++^Oa9juJ3>?E$u`(UJE7F-t1monK$-=Z~(EDbhCOgK4{xUa!{|g!re7C9?zQQ z3DAx8v*yXGwqU0|t$2vsC6XJ)?7FhMM(8+3R#wTn_^xMM*AI@>HPiL7RmK*$UBcSD zVaEto-0|~d1SUq-u{LtrIH|bU1m|%IIG7ntH2U@Jdl_))(n=c2deE`?*zV?Cbe-GA zz&W?;3^;wk{OX3>$A2Db#S`P+Q~n=RvU?A(A5*Ye&EQhMk|> zt(pctG2rfn=OLEM0^M5TQXmaW2k07%xDPiXP;G$BH5R=_skfV>x#D@b|lfNOf@wsmie)qJekwN`fq z>?)8-A4W@HpA#Co)LLOM#?YrqCTf6>va40i@_Uu1S&Eq<>9!A8T}d?Zm;C-<=SdXc zl6<kpTPcC&URUfLkT-(9k{znDfOjGPN5deG*^g{KB}-uoMM zl?pID2nlnxTBTWOv8a3p3vwB>V64d?O=^nyoU9i0eW1HMKh|S@Qm#_cFT*{HyC>S$ zPsGVNpLGGwKg8@Mi}d>*{W5w}1u7T-Mh7)!^a!pJkC#2H)W(-D8ZT^F#vEq$UxL0) z?w(Iwpz+0N!;61K&>U3Yx-M=$lKUgN;*+KFg&k4FGWqfm=5Vq@>FR$3g$NRm-0=Pn z5*;GpjLJUaHpe+K&5B>Ray1hNn4;-<%m{-CNRB4~{RcHHCTuq4AcHwY$!raPoA6r3 z(|E}7gJZX5G5$X%C`xSR6eD2c3*Z(+$0iQ~Ynw!+)x4@ICMH81xs!ipVb}{pij(eH zt&Bqyr?(2tI>*yPv`?%`c4uIM2%Bg)YurY>0=&7*VHPL2jRA@1lqVhaPSLMWpzgX< z$Q95AF+1F(>l&aL(B))JGkWKMWOVa_DuzcLCs55gZ=~P(VIK_S*8d#p)=W&8*UNcG zCpn(Ss+NM7rXNC#GKqZ&dan}Rsz#d;>&`e^LaiC_;?)^S(Vxp=t+gG>i#*k#*-|Sk z(EO<6joFGO+{+JC37d)!ZF{zK7<>ur?d_d?KDu{z6W<)y-i^GzF3L^;?`#uI#}zJ; z*b13fMTVrnW~56Eru`dp&A-;*Y$kt75OB^IUBW&5VZzY$4CG|dC}57r98vSzSXUSE z?b_aCkb9LK&MDdcz}x+v?iOo`Iq!{tg5U}%+q*nZ8|VX9-Mx4*X8qj@_Y`y&ksfzk zOT=7K!-jFq3~m0CB5hYd$FV_lW=pe`LHe;-Y<6(-5aI-q!avP1bRu@k3evYfi~|K; zvAc;@onXzf^w*Vh2u3d%YL%VM;% zVbyHB%C>daZX(xN4XFn$3rFQGiFjybh-`N3>*~MXmyF%h`!9s)S#WT0^;Iu68%gT{ z-v916x_unKz>~xofm{HN|Fn*3S^M()xW(FxusalpDXS)q{EPsEnX~P-YSmhrSlWp< zDFVSRE6@PyUk!HvhJ3`LgYJS{RpW0BeOaZQB@NW8Vf=M(cB$A{ohswlO-uROl9A8+ zqSGeZm2nYL>ZujO;Ui~-QTRz;LH~}Vfo?~m=#q#X(!~$uCr{AX&;eik(^m=c=F2kq z4zTT4SyPIAb;M4MF66`kHYQordO^uAAY2JC?w}qh?Dc+tP_B^B$hhFQ@@((QRF|Jm z4riDdf^m9i++^w`qbuh)Bgcn9j3ZTSoblP+3+Ivje{bcqng3w$N3K7qX`UM-VX-7K zOQ=N74A&8o-O8FK*i^|p4JwCYY(fVB{2S1(g88x*UcelANbkJ8&yOqknkFK7L8#TG z8kGV7n4bPydkFaxy(21ribX8sTXd&f3X@Y;epFVPHp17y3L;3V47qz7&1i)i zR0^@ni!`sSRgDf&`xNn%Hqr(G%)lIe7<=RZ@%&7jPPJJbs-`3u1(* z%?n8aX8cI%=mQ0u^b{UNCh*$5%7`fbiw;_0VIC*n~QDR>2iv+J$3EnOLlz|RDL zX(65EXItl*9^1vC7SPOJ=%DcwbPGJVhh%GzTr~x2LP&|!bxTx4Eaby#G$b*)#`w#* zam^88fbRnSZ@%>@oYXT`lDuJ)H!?T?=H#poo74dl*z~dtxDg)8`HjQyA&rjjZ(qFV zON8kUY3s-H&fC=6Rw=F&jfDgu+@e1llYNa+L&M>m)NvFkFel+8GsZ`<6_h8hCNl<(c@3QjPG~HI!=>;qZndNUDOZJZ z)*l*g7c@>@XjW_<`Ev{`ZEgD>&j<6={l|l)pXvo(H_;|s za}$b7o57OTeMRI3uvP@VwUJ62o4yoF=i`eUAS1V@ll1Z-P+mxKemAlXR13KjSvu~R z9Pm)&9b*DhB<9c!>vRHeu|?y6h0S=Cpa)?k!w=)16y7_UxIup&yWflc-*MQ5BSPiP zeW!}$cGm&LhIKB-XwAH9?aohrt;KlO?ncVYCCphFka^_UdT_jmJg-yRktIe47Bse~ zxdaJ5)|j~PL4_dv!U?5rcI;y^^P@x3-`4D*v1W1r>u^-CAD??L zU+w(yf2XV$>H7AW3}{2!@02vpril=GMAaEo0gKOaiE5Vg=PaBKs8!fFYh=aSe#0No zV?vk|zLIwdNR#b7&o2khAR-}>{zP)kdOI3o>l9?m99H%wz-KBqt>6O8mQ!E^q zU0@bE@UbmZ`n@%>2M|f-nZ8a;)Fx92Mpr$lMk45idINjE0D~Yn$s1!J1OND1p6{g1 zn|L8WgZp=%txEqSN$;u(B(#4{dj4{7TdmR-R4(Hc)br&#uQ#-k&%LBqHo-VaHYMbPleQU*RexyoU#)%XDS=6(oNlwA6@ zPv$C_qr$8?2Xt&1ytf9jXe#8OeSn8TwpxlgH=G?@h%#9(+0d4ap|9GZqh}}^b7P={ zX20^G<;F$Fi^ayCtX#)F8JIU0;|Ze}GxC4xujl|APzl#UQAqApA$sd)L9-PEA`o$?r2&c0puZOP6Usr0+t7K5hqwY*Bc8@ z2Aw(IW3zev+}zGOG}cZUglse_7B9qa(P>muW$*LFnG4~9DlEQLro#mUL%*~Ugd%a`Hy6|EW0`FA;_)S3Zx+KU;lCwPf6r+;44JX^a)PB;8PaHGG?a#7 zu&)KvF4P^0zb-}yMvb?W3AGzQY|4aZ+Q!80$$q}f^_2(m#c}vnGz!Y$J8pqxB_2+J zvtNg2h8r?Rv4ScxN3a~jU~htAe`3s@D1%4$0REUU&GLvOX^#uUO9BrtjBiJBcp*7H zFzlT$4xS=sLpo-#dqUfZj3|lTii5`awCtO2LS>4c-hBA3g6g{D2Bv)Er1t*nH z%ycYc%?;|zj)d9ggf>CZNkC#+q)^uuNf+uG>;r=CjHcf;7*Ut4bTYEfL?lBP2|MYG zJj1qIDXe>6O=t~B zp7jtk8d!s+*c+~bloPIAwO?a7kEqdSt6eYX?g2qv!Fk~dUCI!Q_?)i)E(rL-oAD08{LxgeGdTe;VljmADD34In_kN zuwN5ZH@x$#1jUay+>SEuG%&=Wx^tCC(9R;Q+@ap93N3#!gAw)dNI#PP`=Ig$)A07Y z#_hTe@@k-x8Kdtcx$aQ^UI?gWdOK#%#%aq%x>U# z#|-8e94j&|NMHLES%Sa+ErEa0*n&HohTuf6eMEBp_!lcsO6ixe=s4AFcgM{(kl16s3#ZJHQCd zTWB27VCK&a&8l%4g&V+eBo==>tK{VLc9`{PIAg!=XeJeAQN}3?)%qM|Q1Lr|;n1Ca zFKEeof$YGW#1cY;TsWPL-xg&ZzXj9f)}Md>pe0_cz7Eeke!-rI*(qm*`I$0Y8bF+8F?F-1LMxr^mK_L4DN= ztf8PaJ}eB|K2nNCcVu;$RU>di+~0Tt9HdXGIMsbkGtwOM+A-bK?|gy4sC{*XbER&< zZ)>B{tubxMmm4AZQT#dsR`$06zSzyq^^At4M+C2%jZQ3x`MGU3x4=N?-GFeS0VYQGxYQA(Gd^OiciKf_Q4mGfpJZ-y^h)Twot~{;VTHK>Z zA8g)wbY1rpDit?M335%C0+Sr8oNri2Ugy_90UVzCR79fwv&G+GLt&CK&!DR$pej&V zE~%KJssEdqcuqkoyl-*v^vhEx=qRMR%uopqS_PT1EdN$$tRq6x5hq(;M?W`Lz9co# z1ErYWU4!-@Z`*rHmcP0^a`PyyuT3t5w!~Z<;^ub}kY9xNwhs~FKev+7LGRf>-m%9^ zKzxpA)S2>p#Upk7Nk4vQ5tVLg-X9x*2EQ^+A7AZ$XH>;-X!qJjvz{y+2khRs?G3umV=PHg6RmvmwUdS(_v z)8X`pgJUs_B5VTooaErz0kdu3vP@a60B!N};R-l9jML8kXqX}$Uy7I9J##gM( z@eAU)3q%)G%m#mAIbm#s`;qKw61f~8DHm`Srp_gu6B3?g=sf6CqG}piiz=3NL z!rO}aY0ySjak+k77E0z}^Sj*_ktiT zMhTkp{w>oFZIgvgQ}It*bX3LMqvpUrqLZS}IdpWvxxT>gdHheQFwrb2@y4R8z3q%r z=4l(aP+(GqX5o}U*?|p4{IJ0az?JfIUx#tpQj`-9;}X_6d;(|59Pp?PsCH6V?3_D8 zDJ%O|AEdh8PjlTa%#(j>p_{6O}vK|k(>KBn7SF6p^=?gXe40VK7)wP7*qX>}ah0c_g?}@;D5xH99wG+`AhUe~M~;D9 zmyDgDp?#f~O}<|RIE3jpCl3)e%TFdVaV&1Aznzhl;9lZ(2K}nlLKrA@*`H2~>i!E< z%TPt5wB6O4?h(vh--B|b)8YBKUsqX(OCm!`HzO2yI%GEE<(@D5!km`PYeEXj)T>sC zo6~xHS)R+BjXLFHp@;`7fbF=32}#ruJsK%vD&IsBShI01oMWf-mn(J*MChTtbQ2x3 zu`?eZ&YT@XGdl-3MEWiLZqEg=xI+!>@>pcWzBz`Q3iu;>xa9+KO=8F!LrgO zWQ~yDW}MN)II-HcM>zdW#_gMvm!~JIudnZ;WDORtvv&#pewrH%#-!y02*-h0ySZf}sU z(f(`VoONS!&Ci(>;GD6!hVy}CNuKdMvlgV6rLXG;u3L5z(9w$S+ey2MPOvfcc4m>T zklM7~e+{_eo9e}Wh+Zb0kk+Q1tzexj?i$Hio{b8mcER?m6+HOzR_=GHiR+rX^gdv` zLm7@_jDXq~>A{NaL7cD{c|EE(;ynC_H_)WKurm$mL$(zL)BtVZSof_^=>!s;DhDgq z$l~rwBj9vx1n$TG>y*Dw2O#clgRa_;-(JyP1BU@C?H-m3L^5YW(@|mX(y1k^^yt|v znny#H%BqvHQSB|L-0E(_d#q0}?euLwGoPuY? z;8c}Q5WHY9e+<0gX8+PXHR%3o;%Es|C`u{%0Icfw3M(d zlhP%4(s(er1KQ-!K@(uCCUr-PcTffn+{Vg((xhoi1HM7Mesk9ci4-+Ts!1gZ`j&2$ z+4$EoTaz8YR#{M1L04ffcZg+G4oWIdurE51A|eO;!7Ykz`3J-a6{kw6+?u9i z2Iyd^J2^s*w}oK)+jJWW)iIJhrb78EKMI*#ih=oADY=E>lEi|E?3k&gBS`U;FCc{g z17eqd@GvJ#OSPsOh!NbiROFX91ss_#^w+}rK`;)gUIE>>R z4GOU@CKM+Z=SvNH1}eu?FL2nzM9h&}AV73_SX6aQkAcn$eeHt7yoq9Pi`IvrHbzF5 z5EEjzSf4)vT|wJQqpF!IQm=jaTHmLydrwq zWU#qD`mMd)Ju)_WmXviFou33Hl?WOteFUPyKN)P(wA;#tu`Vex&cO|n3kp+B8DI%x zO7KN01JV?jQipdCH%KF5e`QL_VeaE>!#4hhY1VLWN+H2gV>9?#4)<}9p?f4w!w&pk zFyhP3dqSog)C$=gg}n=F?Lz?%iHb9}X%=Igdp=GAo!nN1%ITt(n7CAgZyH>OWkcnn zkrsV}W#!aLLxot>a&aJS0J1bX8DNC6XWN&Abj#<6SP?wuq}Bdlrk8JW-$qOLpCEZ- z3qz}8*AW|aYNE;^BUH~mS3+mnRZ>){5bgd{>DciY8lsP;t(?L0mEqgzQOQt(F%Ykw>1=>!R7qv!s7PAUSV6^J4)7CP1D?Dt4{x68E?`iDvnS|n zsa(mOdbF8Z^kUk37%snPDA{$Rr$^-+nXcb1+3&$uk)(HjInm1FOzhCWaStcR8eFR| z3&-t*QNuEp1fApML)6s9UscZmxTiK#x!2wNWrv6{ zQP<3Q&ORa2!Gp~e{hf-I9gz3vGinwX{o7EnL%OCq`|Vf7*W(HQQV;oA`ud+IU7gm+ zHo9u<%d#w*3!5~b$D9nV0fdZQdW0_Wk=7#5W02e9%ctSZaQfrMErHxQJ%*hXAxl$C z4z5`@zN_gDryFX>UZiI2Ui(0MCJLJ)EuUQ&+c5hl61{PDc>wOGkIy}a=m`6H zgyw-qNf#3C13-qalzpL9fKR(f+;OI~$wTkueL7t~5KLBdHinlS1TuNkc6ecuQGMMP zd51J~dxS2|-`+GSq*t0{eNgL2I!drQz^bYxu*9_TD;;-yRC6QaFA3jd$zjjB?mhgd zLGVcJL(snotx5Z~cmOLYr@^Be-TJba!1*lv9kY0W{ASd_T=!9LkodFe^zF%)4%S#< zu4?by33Y7Ge;!}Jo)3?JHT%d39E!3M>D}uFEF7bIawhu2d(de=KCx~2JHYNkW9#s# z_Tm`LnbHm2?NcD@N&N>sC>xj7XbkJ@$ko)KDYu)<4iwnW{{6msrI1m!%%lkD@?~O7 zTw=$=NZc)w{4#n;*cR8A&$4y~Z~BA&f45PxKPY3Q?jHf%x-@K6mFv7L>|bOgmXSPf zjl5Jl!`Y)%BB4Yo$u&8EZ)g$4L`3$eButa0K1%qJkJHOmV|2S*8=8(#!nRE^?$#XS zteFKMuP9?0Oy*dshF-&sb?hiYkZ;U_FU!P~kCG0`d}XXKJGGpOR(+ntgszOyZsH$p z5xBzeA=4OSkPHPS3r$TTA}=nQ6+G7v&2=}e*8NNTksq%vX^@rNLI5+w9anwFKmr-o zb3cRS^GuS8m@uq%=7GLYk>nBIIAmf~TVF3Ad6Ych7`1919Z%M%a5$kR?+Q{f+<}(Z zj+TYYxkNsFJ1~WyM8|2Od&MR=`SYIhPnK0n#>DdnwHURW6fa5W*t1tK^1#&abf1T>EW)0P$Spb#BSD z`lYLP681x`R zbm8|-a|E7z(#DGG7!1W)YDm!|!QT?fOY_rKW#Io!XY3i4^__0&_Enb%LE8y0Jfaha z?;Lrfd^0kj#2f?W>JniS9*4;~Tsz*$fgMrt_t#MvUU{$Kbq`p40vQnO~M>vKU68`AfC@}J(5QS~sleDOY&7dt?+}}SK zRV;O97te3QKRlLRb7SL4$#D`3CK^b%-4oCM^?_{ZqvGDZR5P$XAJu3xW~(?A;@mtg za6`LN{?80;NS+PVDy5nqzBU9voE4C+k2~=6ck|@#tiY0XghvTGL!1Tb0j|KNmZG#1iJrO@&Sv(_Fv+j%oK z#<@%JH%5qlo{WYP%GO?MBKhonbK7eU@_kgY{Tt5c<+FB?_UP@2F2SrVgCiNUugdSTMMo!6{D<(R8!zjL1~^gfIxtL zlgOZ|UNneIHNmOt{hS+(RI}yqg?J0YAxoG*(MNbXbA%nZu;>N52;k%s;;nkdNLEjo zCIh+SmylK**x$!%;*+B$4>6KL4Q<>Xg$16DjZ@j41@XX1Iln=}+_%GS(y|C8gT@&s zQz?{;0rnn&T?LX!EYUP0#V;xe&?y4z5)TRztZ`(5vBQ)SbGK^=$7s(EcSps)$%lDS zfQ+gQWWlP0%iMJMfsYYf?zIvmt^zBk63HZmq!(P8SzuufUsLu%?3`_b6{fB|PpW@2z;U!2e?zn{I?Vq^JXQ6eT65sZ)0( zv@F&7guh2kBsxSG>rh?`Lhq^C$#m z$L_D?Ff6VchAzPLMenYypWAUCtjc%|`}3V71GHpQ5w2QVmX+`sq6M6Y1;sLqNV&FY zF4L|Cx0LLJOM}1Ss!ItVEJG6r(wr7icLmkYj5zF}6jn!)Z_-YXo3YxFA}!0rZe?22 zv4U5p1f^*O5%0(Ae%T*{myyymRT$yX;G77;7Ccu_eVXl*S)s6+n-k``2*1jLiKb=kHIizf=1IvM5h zugsiliTwF_8hUkmOM7&9!U_CV;=W z7qzj}`LPD@^xXPLdgh?7ZxqY=gh`~6^|^OZ)SOylq~{Cjyo^czz2rL)TB*MkVx4<7 za}#K%9@D16`9^m*rIXK-PuBS3YP`M_|9aF7D>2 zHeIcE;s??^WjTTKtr0oB?6U5ia{E;4jQso$Qx@=jp($;RBl(|J4J~nk&cuuewSiCq z)IS)Bfh5k9K69w4L^@-*`YOLuJofXkJg3!0GKIDpF#&9L)M2t87?hz1X})DB2o5OL+4*)G4wjLWG@I;k2w9m84O_DK&*qO7lt}Q{#~!I(^taVz|g10zO_* ze+KK%idpd19^QQ>(}z#WxHCZ$5t2z~o>59wLC{N7X4KVt(3;Vh*9C}kaTUCj_+o8d z&fFjS`|lW{@mA;!FyssrV@%^+;1Blx(l7b2iAJ#dWHCMELO4oXY_Jn<-XQaMX-_72K2_$0*?S*Ud4IRc zMyRXg1fzng(>0n$QLZJGGb`L#;&azuz1-F4EteqYIG+h?jPi_xn0@%qlDYO&D6~vt zaS+vwS98d_QC=gz5w%rfKmwIo$TNB{K52!MOMAFdxeH}W68ie-y}YevRFD@oswN-?S97zb`AbKdt_7hrCiCs&B&G)bP8HGB1$c3)#iDX8kK?6?7}i zqk#E}yg%j>rNedxz~jNsV>xoS(l)4+aQ1(fY3l!1FCznY%nn=bCvMX=7O-T#H#JLU2 zm1w5?3NXao7$FmtihD8s*)M)&B*%32y9E!yCuKes=-7mzYYM7?3}ix(5jX+V<+E(lP=3Wfyt6`bjgRi-Qk+C$<9gc^<;$U6 z41!wQcR{itt-JMr#rKxzmEesKu6a#SC+(N5HM9KlgJWp-Fb2gXvWyqx3-@}mtiut( zIA%FE-PxhXQ4u@I_7zhFFlT^XKf9pV%gFdawkA9nxC7H z@y`9wyND4F($z0pxjJ5ojoeM7i($>@dg{7tM>nZ9=0agY)o}3lgS*Y&&r=%xfz`5c zPfwAjk%>jw>h3q8_DJ&gi^?M>T%W#I-0m}G^^s%_hV)@$5$&pt`;Ajo*;L$}%G}=T z=}cH1c2n)6OsT0_zCgvoS+6qI;d zRr?8BLQkqDv)KxAmc0;%);%Rj=34N-T*)K1%OJ2F{1lz{?P+zCrX+WCr~Mfu|=L*mcy5W7X_%U zlIVXKZVE&(qL}Z&pwu~!LXf-jcjd(!?g`nnDK*tEE#GcAGd(+3YE2sb1MiI+`FJPH zG}hX+=k(Lowzm9GCy(lQ@ESe4J(vU5d~W9^kDlM-C*2et2W$YCX766#ojzNvxwccM#mr%Fr*ifoG?yR;%I0IY!*rg`C!<$FW7#GnV>k=L0(W z^v<7<_Dm}Y^z&J*?^-c9931=s=L_$97f)_npAf6?R~tWX$n5^jev*3<6MxFmFBV_Y z0#cRU#%mX5k9pyN$Eb#f=2eX+0f>692?mRuQtw zM37kU?m>PtM>Drb1RUj-f-Ss(8?MQYo)S~wQA$u33#8T6GlL3atstAP2aSojZP^1T ztQZv$ScFT1$Ep)q33G7_7dWn4HRdWpp3B$8rq)|%sfEm~sD_M&n<@7MPY@O`PN1`C z^Q^o}lMZ06><8Fwm&tK|MTf@yGUNCc)cO(&UpJOc_MGg{QYFuC3SVm!Lr*1S1dMV> zGa>LVNKEzDy5;0DO-IrBUkh}}e*8IrV&|ekUR+BHPsyY3#HI}$uEU&`Xs}pW!H`hM z*qaMxWor@2u}gGVmo7T&fslUdRNDJaYLAYBf4_D#*hoCsL#luZkNXj&hr~o z8~1YqWF>oRV5R%t=JQ`xQDyE3ihYVqH%^yB+gLPXn40BiY@=&VIl(?f#5{q3Y2s8w z?~m2p(Ug*w_pWbT5g#fr+C}!=)N;1*T{b8N$zGrfut|o0*}b$8T*1{!sg>DSUcWj* zOAEbUdIg@fi6&P1Uv@S%jX#W2EQP`!%MOx@2h9R`E#zG2lEZCU3Bl2ncoQNw=zG6> zBz5wi=6<9CG{s&}DJR-VfsW$<6Mx?%tdM|@ew%xy9d9*vwdg0arLttz8va66fwP)} zq2s{PbksQ-<1Za#BQSiI)wP;{7~Hp%I2}5H*QyFaAD!jJ+3koXfAp4~SnJ{*o5CYaq%N=C{@r+lXb85kf7DI6ykxUC=HMRej+p4+P~?oa5$HCl;Q@RCswlC+=5d); zT1oJfVw{B`V$P(MObz7#u3s_l>VM}>tZ*Oy9?(aDCgAt8oEQw~pw+oRZqhP)cAkGS z$&9iOzH($49c-KYgK0r<_bFs{bS^=GG;jSYgNDQroL8Pw=tUj$cjI-+iv3lJap+VRS1r7pf{3> zzI^&}3<}soaLe9-lQzvPF1B^wc%kb_AXgm@Dh4p-;eM183%Pbjpblq z2!&U(q)DgXMt}|g^fC7bc!h5aoivBel?(;_c4MRDkMV{}42@Q7Vl>WmqT}ccx#_D( z)H%qLRQ8IS!M;Q1;NJ0O(qqm)iBjsv6Zn@Zq+qtX}=kJTS-B$Mak8k||&SagBhu7B46>_6m`F`k1 z;>e+z;`n#Bn~u@^w+GQVy(ecKdv&bN2%NlfSuP_cPS?ef}A0r-Fr5y#y3N;{x}$oJhN2uG+w&{nUHK;i5rAstI?d zkzd}SMI;;H7lUiq;PXG3yN-0Y*__N-4N#h1=sG%C+4@c^Yy_ zc2&+R^Pu(;TOK`;5?fr62OC&KEr+qcXaq@SYXHebi?a?DCa2Cfc=lKUi@5yhW41?hhWh*8<-ZmcGhB4zL$HObp4uE9{ zRi_3VDGbF1eH{$eyzYyuxukLoK~0vib}1DQP$q*Ij-FYjen7c59Y7S)OLo!;i>g_A zmON7Sbg^~1W&cYi@~p9(Drq-4^;?$CCjlH&zh8FFlZGUcKDH3knF=D6VK2f7I?EKT zAE6mJ%QD1!_Hz`C;MxBQrgh*4e7wj2t_7iYjLFgrlN#;w)Ou=Z6SyBnuSuN^7A8+r zp}4h$ISzk{5IS(-SD#)9xnD~&_Y+q>7wA2!a-C*LiC!}+6^1rbRat4~2JQW~A%GdA z?6HK>ppx=usKN%K47+2=zO^L33)P^pGo>yfjLgBx+@)GARhS=50*BY$`T??&^N3Wb zO-uX8EoxbYJ$67v(Qg~KMUfw*Moj?D$3EhSOQJ$@L3jB?c-4K1T0WW&)?l@n6_K-1 z&f|^vvUSm#ih?qgfc=lE(s2WBR2B=%gd{%I7fJS~ zmnkh#TeAWzqt14CDu7^-u%otdHh(`H$u4|1qxf<3b(YgWBn%EQK2#wg9KbfNUWafr zMB68v1{d&TbBmY>BEy@dkFeJtDZt}#Nx(_9=MEZFK^R z6syxc=jP=Qs$rJKOLFMl0?3n}z^}pUwu?e(Z)CQm9gwAe?rJgEZ|2eUqeq5H&(b

    Ph&LYS;ltCvr+g6XXYUa}dfR9|0uAt;DKzSN8D8?-*QX|_N2lS0U!3urC>AEmpbogH{57q zmc~E_gw@x{UMNIYb*6J#AIUg5ET$0Em;V7vs6eLbwQcSAfX4^-7^!EE(#cu@q8{<) zNU^nY$0}~c1b89{j_IT8-%!_1Go5YAN5&itsTwwnLUYl=wbK-# zI2rs2Y>m2F*kDQZz|HDoy=W8B5nbDHr`FqlJCuG>{y#q@cU>3iut^gTwKN+WbQvMG zKk+^=+9{zRy)5GF{Qn$Iz7_Oz3ri)LtZY}a>jD~Yk8AZL4>J*WVE^XW^gWltRZ<%p z2E^4R(1HbO&10rkSmdEjFpCwaRn4N6hAlK^$xpn6ky1n$z^D=`<-k()zg$3W84BBaaw?4tG8zhZJ!75Fv~>-6{uXbJ z$fy4^mm&k5-Jc$Q&_9=1ScID@u+)+{Z9sY`+kLPsc7lCqE961B*Q;lhQ)Xug0}HMI zZMAN63zLMtRA&3-D%de_OaV4E$3$l&uMQ|S8K#kXm$tXerjwXv4^!r~Ed_<6wGZ1R zjwXlyiQLWaK(iI({b^eROi91KSnlY^mWAIdv9c*BpYJ|RG6WjJmRR$Gj5hP%BGnWu z3pBRGQ-ARuS-(+((Xt{I+qh^ z**yr5A*;Oeba-OFYD@|`bvU8+mkt;?HXIlFK9U`Xg>F(r>@T}#yZY3&BX z5jl_Rt7g1#_FDNr#|nYfg-3$e(-QrJFr~^7n)y4|iH;kMh7ng)rQ-<>+!Ye$id#v$ zH$G7KHnxu}O4!OY8Wf~)`GAaQXfb){dAIysh_DL6Xk6@qmaPVmAkD&6`NPDq&#_>L zhA4naWpl=iQZ;J?3H{KwQ|gki-d~ZBp{~lFjA;Fk9vBn8C7tBbRH>RT2(bW@Je&P2 zC3J~7lw&6Gq)qJ_EUDk$FD8@k>fbB&AzOIuvlqM|KBzZg#VDH)Q-E{sYA=I(nF-W> zy}!9{$xX!*f74T&)o!Y_xA?q}uUg>@k+c2pyhb3DG^hNIWpJm5b3|{1s~_ULzQX%|-u?N@$REPAj2{ zTd3CZyaI~6v?I^@zR@Zpd?xx3HH5%-C7ttTf450~Oy`Zv#Rd4xpZHjaK}yy~;6=#I z7w=6eml77qg=YK+T(=AS_4LSdZFTs_s7aOn@B5!d!sgL{`{KdehIyoF_U(IcMIc>T$0Fk^fS)dt||~%huLB}#0o>-l?klWZruQAKK{4-YwI4{-!KG!4 zc16>n=rm>P<*a& z7?~z9aa>TS*wvglL6Hj-dHqyTLKT!20-@TCGBiYO#==e9@*ZWm#?KkUzto%-U}#zq z0CIRkyABg&1mHDkEDo`RR_AK1mDrhL{0sU13s9|gWo=gx+LXdyk@u=&nc2)7JKw75&sguyro#pu?8F#LanLQUir%4 z!ACv4$wg9VZF&4I4M%6cEw`PvYDbU}CgaHBb2_%vzXF?eAplz*&A8eeyH! z-E^4*NnLsXFX*gB1^jd<(5V4z6+mVmx`&@8Nz6kVX29Yw!A-#yttJ2?R_SkVv(^y8 zrWS&W-SHJswW066Iyx6iQj>zAa2r)LpVNMX&qvL(%N5j=HZX=qsXne3v?&<350S6S(~`Ip~e+;(kU2Asb$0NZ+x zI=G9r@-G-O6#Y+r%`+aa1Z+=kxOIGAKD2N!4Nk0+WqNh;EiH4Wg_Ups zm%{R%CQC`aA^bL_MF@m^B_KHwzcyPhpC{O9NlV-#Bi7td`m6dvD1;p9)vwv8tA zS9j1@dB8NTOi{OcnD(-RA_2?d+&-lQdW-hk)g6I@qW7?Sc{yh+$f`sriEyJ_%^Y1K z9cX8$)OHx{7M$QC(0F?}AD+pJy|)3e=#WiWWK1Ai=GR3B%Xfm}Dxjg{h2Mp$mgx|- zBwNx8 z>B7vhb*SpYv^xpVLB_)aqqhUE*=p8qRVnV9t3Nj6D{nbNj|YlglkMXltF3oxvJt=N zU~v+3)+>A{1l#{d7ZBiTFk6C(L+WO=Lh_2}kSM9WRDW^uS0P~LyU7ERKkGa91|dTh ziw*`XUpMLg*8sgpY3}o0USh{u#KX-O(cIh7(F0_R$WA`_)(RTafwP5Dma@0!Cvoa# z_g|@9uql|FgNG7Yy^$L!L3OA`=tQuCYRmhI)r$+w1AcB-Y{2D=&z!%RaQh6_7e2E> z;ew$!wy*(2xybD!a1UH?$LQ}{AG0m@ zBi1k%>sC&JF5pqeuWOs#pfNYDF`$z;;_<4TA7stDi*!@ah;=38CID zkB!r#ofSeT6W#%I$`{y?Jcq+Dqsro{=U-DF5$jT~;_UrL4?5?%lV#{4`18?Z=VM6T z+H(<=*FZ*w2Sq!$BXJf=Acen-Ag!#K7OQKZVP|Kj2f*=p-^k%x7Uc03Q3rT`Bh%z$ zyG8xN!>9e{;o>FY*=8o})ZEs_{Eg*=!*Os;a&5zJ@OCNUWp}dleKhqimG?qX>n7&V z+gr%l5E~J1zUhXP<+$lkj`jB{)E0iiu^kfqg9^#cdolJea^zsIC;lB0<%<0R|G$gW zrlu|6KUB+qgs!C)bHMQ^8=m$=xf<-b!gIbFj0PHE{wWkuP@+~OM;sULKUB+;*@zSE z%33W*0Fm@z7tfE^q>){&u;i%wvse{Zx*P||Zr2H*+#%5_qb57MrzGjwfNv`;{Npjc%~tISIXT4rPguM;fXLU4J3Y5^ojkX{5iWk(x$09;H-4 z?a$HPOyQn<{2H4CP83a7_884l4Zv6iBNFz|*P0(YI(vJ&dwjg_$A4iEycBIoQa+hl zDM8Z@F;8vPW_2>IeYOMzvxCrt-0s$kj0p{>%pD=dZVJjy_n4c&3*e+-;6~WYpXILa zFSo@`M|q5X)>`I!Y}R7D61TwKfD0WZTjg865(%sY67aAh0Ii+gvx~f_4xer;$geb`iix(Qqr~!X#;bBwYm^qOCaU8 zQ0@l~fU;5v;7i6LES0K+0tyY)gp_ocw#p;H<4Es;=fVkb!}0UT4M0G3au{{?lu>oMYAg1%qrKVzf)JZ7HmJb>){(X0H)y@jvz?cl=iII|;fc>BLhy7(fJuKO#JN6Yn|Kcp z^iUzN2dZHr0JQtQPciw8O}iV&?e{vatZp>!C|jfu6fwH|#on(b5Q$|qzys}Fm+<;h z4yO;5!$fsuDhi(}?;E1kNA(P=W*a^DtH7B^fbl;oQNpim57oFdBe8#03kz2GYbj7= zu;>O!UwEGW9YC7tw~!%BwCR#|xd1^~Vq^16x*?Ns02D?Dw8yHUkZEac9LTtQtjQSd zzq}ohh5&!L*D9oGNyHMnOsIXVr3|8 zACZ*;fFQf!zBHbFV07RFiiH(_l4v7n%-y&$ujSAw2)C-QhwrQIrtt0MrEybameWF7 ziF3z_tp>fNjccka-8`iHaF?Hop{kYK>x;V_R}0ml7WF8u?=G9`1adm=RP&M@mo?9Y zz;cBsD^(oWy53v02>2#fajK90!;*srd@9w8;Ad2BzXyy;@Pwi@ zgX;~bvn}`cKkTcaHnTEvbi*U)2NPv$w41HIi#Chebzd8g!tM!^c@eZTy#3YP*Rc5q zfVD#NhGj6I638`9mJ>E^yp2k%<^#+AbFL>_wrB9IouEIG&%b!Q8<|$~x_zaC&R!Mb z-NZ@AC$Fctn-jvIxV>*7>jwp&)nZtbnb2FQzQgK%m)p0^M~o{jr-%sb#4o5lxALf<;h2UWl;sV?BzUr!+yRKv zPMWHh1zj3$_923pUYeJ_KG$0o8AmIgW69rqYs5Z!QfvVFZj!hgts(R4)LFhR?}^%u@3o$w zJM@}4Tb`V+=}li-Yqves3oC~$d=@$QZn6veo=co z6kff92tYXC7L2N3fH_KXFSp{)QAto32COL$0UEa=&!jNs!%*#9#!}%giZv<;v_4JnPZLh_ee3^pdZYD`e?tC%?z9UeVuK z;1>K9vWUa9(MAZ+0u(t{Uo}kk`}sR9D)LZ10d(P3TR8PpCgKynH)`w6umxqSfeKVr zEWG~(WP;*Fh+3W4VYJnHNSCDy!TMW|rqY|pagKC3W2aQ&AvUm*Ej-+L3{0}=$1F49 zf|`35hZ^x0b6CZw>r@YSjGbOl;5aus{T8p~|6uvG6M~v;Lpv0Q7K{jv6CYrX+R($? z3Lxzn)2Yhae_+g0;}qr{?0j6V|7E-BHOhPMB4gQCwlIkys$6{Jb>3PG)WOXr+;bo>OKN8~3L>C3B~RMMSZTm4siJ!zZJT8ppB zzUZfCL)$(gPoK8y_u={^M98X(Z)Gz+yq?DWsG7a$wDAQJaW~{1s!(8_Dgn}%BA`V^ zz25b{#S=djXCjYqj1wZZ7qe8Pe38y6Qz>wpQ-Sn$49J}Wo?F2sD8PTC!{zQO4^ZkL z7mprs!2bc7WbE?akLpK9=uk&Ypo738+nUhjim8QdnesI6iLr zX=9yTO{QqmJ*Z}&ddOqZfKOS6AYq4Q1`616=z4>PJ)|Ckek7@@b;LK}V>{?Kvl&k|!-^1%t-;RI5pOt~!R43=5}?`U{{3q6 z&diCk%?n$vyRYqF&EED;3|E%`%a;E0?jZkmOWX~ki26UA8NC_brfWxllFi?2=$EaY zCQQ$Y$AgGoc&6LQsJeneGvBndE)y~bvWvNvZfw>UE{=4)oPC(VZ$JvJhg(0ggZ9pqHH&5eVxt&~AaZsPol;3`(*1*`U;um~wCZVE2V4Lrx;(ObNCciX^dv zsU0UdC)W}ak4gth`O$_|HT|Y(`4fYDXM8JMGW3mU=}0y2fb9u{qt#uDY7tW64E+%D zf`hrWv%R}9MHj4M{YTlrslePD@ZSqchrw@TPrT@c?LH$QIo&41uI<&nRi z&o+_iHKygfV42ouktyr*Q~iUCa@>lKG zj7E>}BMd=D{Zg(sUx(sMr_Le|GxV~I(a4nJ41z^2)^1=UYZNqM&)nleXuwNNO=8p-RDt=tYCthrNpta+3aqNU zl4G}^1iYR=$QlX-yx|~BP=nKhT8ohq3V=zHGUc<*Y7_(4D?%uj?aGN>jT|p{9b{q1 zXolP95IS&9$vD|mzytzPwT!ksUBRHppUbcv(-%bD^GXRg$f;y5-Y(+!I&{6Pd0KsO ziG<;Z)%UPp3x-pySSnf@8a02Z6yYte+bPN?$gV`O?NDJBS&Z*c_CmYayqKtxOvJ$r&PxV%dne5x0S3q}IFsW~DUZt5zD&;~MaNfS5Kd|snQWB$ z;TK{bf&(=vWlfkvtrcrf;Slt$6JZcqC8Jn#dV<^O>fzvuO?23ZunW4xIq>YTn&}L_ z+Bc14;G>4H{M0FWOjeFpA!q%=&bY-;&~gZc%inoPGvP!$)+4`;FweYZTs7v{0YbEO zu+$TA%{1q!FN{T&+fNKn6`= z>6b>C8a!@>35k}hJ>Vf;Ky{9m6A=}>GLyEFi6N}m`<0B&m6^z=gW1x+NmLepU8D7w zX=iPaGj4jk?O&Wah_TTtKsx<-0I?;}U<_zm{?~b8(7xKA$)aYF4c7e+7+b%_V3~c< zV+Q9%ifj5Hf%6qg^Qtb0xWv|R;#V>`f>zYPu+=VgiCBTRnJqqSNjK>dTIff-Q^ux_ z{sBVjpRPly3|f-!C8ZDFEFOF4;%}C715vWbtOb?*T?+0R=aX&&tc*Lv0PNF}3?=XD z=g8kA(zB?V=xz8LMa^2zY>}M(Zt}{KVgy~QW7T>~VnD_~wi1lRDWnj+4KOqs2r5RF z_=wd3WOBK2Ud2g@&d9BiycIM7=S21mDpil|T<=%VR9lq2FE}LU1H$4BIRgX3xiL<- z1uxU)w<=Ky1X(AGVlYZnfV8lyJZKo9VJZokfa+ZfU=LuhaW>cs(hH{f*VF#R0At)3 z_w2ncH!akrt4TMd&k}|@L6~csW*s`?d<+~Nqr78io2;ZX@+smL^5BLn490Y&*d^J+ zjo1A21>9}cJczkuyOlUifxX>hAiHw0l8|+Yc0|oS#gzCkH%0Ce0G}@LlWW708u=0Y zc4levYlaHaQro>8*LBwY36fxtEJmxfH785Ai7CD2yS=j84&C!~09?&;TOFo{=ECE} z@3vtMK3-0I{OnjcxqRWBC#^KTe;UvaJhjl*@1Frz!di?&d`)wBX8M_D|rmc#vG z*yy}+bzOg1ciEV|2ExvJe6R{BtJfs|6EjHs9?#}>=X)+&B4e{6W5#C+s?F&S9^-3% zf_c`Ld%eaotz{l_igp*@OZzetfck8RkzUZHwnH6?s-gUaeZtWpncaixN zcJ}-~wTPP<|Hl?8kP1>~vlh`&9N`ZP`fO;+ZeWkG+?Pu_P9*j-IX~pq{PlwSgth@d zB$ak;F%pPdg}J$E3oGd$-axnhg4mKpkyH9N{V4`KAhpH4d50R0!u&4LUkr*pVvm9n z3)nf~`Hl0hqG}gS*Dlpr_Mbd1ic!pnHD$ev%z*cam*ayMo4$;_-SGC}9X+iUG9uD) zYVtve#=Z!g(@oLPV(Hl%mnbGcJvd(A0F^jaK7zOEs+f1=-$E$6AHm*z)+-8D zSUyMJ6E*!V8o2xO|~!CSJ56x*j;9 z+ip*O<>_kn;|yvsT@YLDif_loa8Av`%LyUj({F=$D8qr&k;`OA@DP2e4T!QMuZ zU?U{YjfB(fwpC?6l3t2`1A-Y=V%OO%x8RlmE;6W*OgY@z zRG_Yn3CS#}*yQm2v$C~ELpD;73Dxxc^ohk-&4v4T>~ z0%6ipI96S_R8eLTRRrPyw=sqET+zIS)u>!R=cr4@S*kkJ!)+45oWO+GM4jz;%$agx z<}U7T@t|X<^{|Z*)}X`zKWwl3ZN{h=_?AjT&9R({nuQBNg;9LnY#K{ilz4xYNqBFY zRt%8GVQ6_q`w=OCq3(tpPbps_iP2#@4j+Xu+tC>Y-G!oHJ;DZMAlG2uFPBK!CQJ$- zv#&%%Oo-en=cyUh>nv6~i7J>b*b_Oq%f4<A;kqkrBow7=BUa?4|XnazWQ3-fe7{bRobWw1QUcIH|Y zckN(E`>^h5<8*byt<5bx-Nbg=h?J&OcBDlS6M!KgJ>Ed3kJN^dcDkN?v7yshT>kt9+|RRwX^VwR0<~%yhs!c)Cw4 z(K{iMp!o{gTb=A?FyftIn#=v;f$EW?Ck+om>~9i)2}w^idr+?hP{_?p_C%d^L|hYZoZhm?VifqfaeLqV%>qG`~OZ-R~0SX{{)q-kNb z2Mcuyb<;9WhFoWo&>>m~P7pio$Uu%!HkOV|_MV0e-B%e;o!FuvC#*AX_8fi7+<7C- zo^>3+NleILzt%esZwvNMD07-QoKYXt$j~gbJl>I5T6p2TSP4v|taGIC`d2w;O~z_D z{Rrea<21=naa}aB1NjsN-SAvcx_+NS+2aC&7lSE!$o?ynnK z!ko-%nI>C4nU|Cql z$pnfw<<9hAr~Y-C*CP7+DnHfkzmr@<@j3}oIE`O7xJvitCXh}EM6MtlakA46$5Y-o z5lqJAo@wK-LdY3`={z*8KvHSPG?Shw0T{{^)VY#4cO(Q)`Zg%YB%%ON=3=ohC6pJI zx9kDjH(52V5Dq8ij5o)@1Oli{(P4XJfGI(f4xO(F6RVmtV*v+F#i4p_GFqquV{*HR zA64e)IFgtYY|HChOxbvb9!}Zdhn)^t-}})smjNa{#ok)lbg!PYYxcO51~QxlE{0eu zC+B2`&=G1X=9_>#o<}YkEBZRSn7dbyP9aT5m4o417W*NHCj)HNVGQO)RJ}BL_Vu?WH981&TE|mB__TZ-JxL+|;4PbfHQ( zlr*MH6b!Jiw~zF%%V&hFW=>4gbLm+&H}OO@Y#oP9({2gN`-M&ErYUuMaUDNU8*Je* zlgWCAV$&ia^?!T2MvsoG_%ZKh4d3$cYC8+EVz1bF8wmlP*C!PD*tGlpC| zgBx@nfz3M|5#J0tY|UfSn7*q4Gu^nSjA7XX>lc@SWc-cg6KCEta*MJ%-j7SZD{I0F z=22GsMpc0Fo(Fdb`Qp>Mr=p31E9ruj^;EI2$D)aqjZ_+B&l4ZM>)kD5F>&zz5@CT? z7Z+NOS->^kB@drIG9K(VK-YsEzXNjVi*1gudGp{L(%6S?IBbL*2eWN1=yx>Q@Q9br z;e&VCK>ou-A2ufq5=IvH0gd!1hmg)+w*-itcN>5>X^#X>9cP0p95XZo?HzW9BbtVG z%6pZAp~NAJf(+=lQmyywada!m=AIIB$mS8x)EF2Hr5P(Xfkwz)RgTB8mZr?n`i{5g zNu!}ejO|5MUWE{xjeDOhom*kKw)DL8O(iD|zq2Uok45l)aJT!WpS1dbuD_CTN~{=GPcLU|JLf!K3; zMHEWvJ*QLV)TQVK-_I#*F{LS}GpMs6I$jbmoO{}|T8 zMp;Dg*X2bsmoFyIcnovA&b1#-%<_aw=uyd9;0Oo&4Z*3LYIA&z<^Z{EjpBZ zot{W7GO^4M*<%(ne`<^LI5$R(I)gQfkPKltiu@|Dy`+q8Y9g3KIDt}aS~1qiGieaI zYDPV!1tQ1nf-FOj&yG|=hBshfpiv{8KnmiqnG=nS9-8zRGZ%wm&C$cOD+OmQ4z>~U zLrnPLZ*jd}@$&2=}6D^G^n3N3#v0_vZX|>l5*O zQ!H#v=;cWre)`7Tfc3oOfjGDQrI+3-NnQp70Go20h`|rw=;qa&_IDaR%HUjF+*jrU zUi)?KE>3vsQrRFBxbUG>+x;LE&*EB~|B;~M?|3iY8{79m~$g;tPx=fCclyoR7DCK1Yolb}~9(`QgC@T%Br)ei9QV z1;nm_dopm#L_M2v+|)!;6wDZ4YL+RJD7^8^Mp*_767@rbbk&MtE8NXo9h zUx@|egw&aySr+39R(q06#A-l0(n36K01m-lIkNQAJxW406t^VMb$#{%LN>}zeSQ+_ z(uYC^o5n8Q*E$yZOnOAeL#HJ*qu#U6s8AK#oz;n39kbyW?WwocddG057k)?289wsY z*Q`r~pTDWmfqfGg@fGK=k zvBm}kbId&gr)=noscYk>vf~{8Vwwlr%mM^D%+Ljx!{Dvvmo+lTQLHrG)B?db|6Vrs z@L_jAB|P6Qy0TxR-VwD~P$qs#;o6%56GIFY|2c}XyHJ*>n&)YGf&WfOD4mX}5$bl1%(h&y;jo&UBZzwq z-{g+mjC(1gkLRz_0q}}OSWaQJOQ6rFzLZZ&Da--*W;VNFND$QAcPcoWohzVVUJN!8 z)_CXHHtg{2z0Jcs=?D$WGcCj7qen#)joxB^GD;m4Yo`8$yrK>J^z_3B}*9ir} zJ4nJzm0*J!ng{MV-_}2R1@7|4sD>dMM?bdAjEg1BA_EKG`jxgX@B_c2oE90(RDtsg zrdOT@Mbsz1Pz_z03;g=?vHmM-D0cCa^uE9{R}5r&Kl+Sw0@zWiB=Q&%z~v= z)}bVqESw)Gm#l61)~RVLYj>NUvUba~kevu=1iA~<(0?GaAI9we93)T(=+~}B9uzt* zhzJB20W9jOKV}UD>fnKCvuXds_^Mo99`)QvNYx)oS!NXpWL^m)Wp;9J^YZYG7@J`H z!K0muYLHmy44=8QPjD!I@@$1Mo@b})Pa_+n!(xi=*%0M3H{`)ofRd;g?}R#t>W0SI z`L{o}MIS1-^J*%gWOZs$W}+Fb;e|JCMjjRh5PxOH#SLW)Z{+{PUL-U8Y*dRxN(cF- zL{Y&IHcCU8&OHA4AewUm^Pt7YMA ztTG^uR4KX?GJ;~)0@GtI_w82@D!`LoI$0pd5M3k_jhMGz!^I3SWM+>O$iljaEgDZe zl?Kr)NF*`ddfnv_DF?t|YxxhTx^(e)4l<0zjsJl=lE4hz+b%t%OfKDh)Q<||+ zoK}>H@eqT9%mu3yaSE#e&oVeG#Lc}xEVdH?wbqj=s4OZm1~U2u6h(rfSyM#`Qv%zO z=-i$`oZEYg;0J*Ky0*UXOA-oF)V{RTNHojik;DC5NiE6Xae6Yy06Kr*76(d+JCseEtM|zz0aD%j@T>;KRH$B!z}ueXd&#Q8ibGNDbvK>$rvc zIwszeN_FtkE7%$AvRj^Gyg1Zd->ZoaLYMX*{XU^+01)Y{ftgdcI(`UqlniKt!|dP& zj9hcu^8!WOz_b1ga1s7-3GGi%F zeEK&$R>PQVh%jF#TIqycu_P=U8M!cM-CRcL-dwBx#xuvGt7}LsZ=97Wl@?r$0unDV zmQw1JTtjRjI#ipzsQOryp+MR@j6yYlUD>yk-a2=Z!yVlZ|L9~I1iVRGAfFwdFk+td zBWfao#~vR1CL#04p}LNrbGj%$Eulu0SW7^u$G~%8$8%`>m!$ztLduqbEi0I zB7c5;9FuSaXho>B1=!k&GmPx(qYW~zk&CR^ZYMG{n|tmu8t}~CcE}yN@xC7(I=J~b z+)lon%D*SMVDP_pB^`$_U+`~_e1wS&a20^VVsLK_t;WO~Dt4JZ~#oEpMEm9^LTYQe1+gj5M9uhD4F01;Q={+r$Nm>cVZQc>1slu|j*P z!7`o)z;9tq-KQp%!rxO7G(a7wTpOD_@ zVitA?#%0Lg5En!%|9@5@n}Q!Nr?8aTrHIryiD*NmzDfEa?oE#Ob!-Cc)UO06t4T)& zI3p(%%;-UJGfN#Z!B4AVlZ`BxeZLAB1Z8W>glsdZ?4~LTkOU>^fe1BI#QR4Mj7ys< z+)j?wt?l0}L9d>G5jXj}7t_>q@|$Ir1Zm~kK{p9e+XK$u`+c@N?|HINxi9O(iGm3B zamY+ZLUD-1Hp2@_<*)z@|f9Z?N{D!UyN%H`>eOpUo!1X3!~ zpV6ag(N72(=HT<9!`zhBXh&I?p(0W%87i@3nRUK5;bao}7}8O%V%jk(@Dv7`G7&H5 z%5UY^MOQ4t-7!}dgMtl{mJ0(npN{xiQx06vt1`7p zuY!{!{&c0{yE@#5PbNpz5jL8o zJ8|wS#4t`70r3YyYrBh%kmn@LaZ!ORJ(i~6>{ttc6I&5iOl}7x@2m+7)rOisnL>Q0L_^q-T5eDG5dKQD?Jyi>>U^etMX*wL z+|uSmh^Id@Sc&2ALXxDp^H^4PC*s{T&3ih0GRgyi-Im#2HhPqWZz3F)C13nvN`@{c zVr4tvbrYY7Ey8}-ED=Rz0jEa@Q%BVh?(H@H^9|i79nNYo6=ym*;c&cvm&>vX~>36{L8I!vS?j1n&x9l{Ie=KWrwP4H60vOl4B zkTUArd*SVcq1?oyH4R5~mcx!{vJ8Aiym9+kmIv_73~Hs63#}ueX?&jYNo1~jy{8y} ze@bjr1C#a8v}kH0hCF&oMo3hp-7HsCjycA^l^fRQ#6}o>eRfTUa70O}S3sW9EE3DJ|~{>bbF-m(vlTfi7eQ7Pt<(eN^JRtG#p( zXHXQXBPFuyE>I)57Sv0v*CrwEI;=_n;O6mF!aCB3_|V<&oxMf!e_m%tYOAM6NPJ|p zRyaY=RWp~QLGs8<@-5@zGuTvK6=c&)GQ40;VAJD%Zi6)1DcaFrCHnp7Flb}q=6Yan z%`^5~N<8ohr{mV5P%j|V-pr{aer;`4F?o}$iOjd zTe)KwOWONFlV-JB`J`?2xQ;DneW1Gg!IgXZdio{S80~Cz%Yjbh9OT*HeL^bRF5yVc zv|amMTlFlzK%t{mchQnG-Fs`XJ86P3E?KexPoud83`%FY;{EXC2!{3)r|x_Yp3|ra ziR9=&tL9?LRrWb~y9dg-eDfE88h_Y}5Q)WF9PAl zR@djHLy@a1O}jRrqROLkYO1Z})F$O9Bvb#9ZDlFP=l!!ud-L`~c!^oPu=R`nxrONY zFZ^j|b2JPixm?_zlWodar=z#S92*~Ma&?N}M-xP@9G~SE(Cm*z(hb9{ed)`w zzYQ0Gd0DSe~$Aw(ZeEok&GGb@86cXt@Ys#WYOJYHg=u-{SsT4yps7+h^_@9Zbv(nndABC z0fyR-^aO(4de{uULG)8UZypH98!EC=kl`v`L!z}xMDe zt^uOLPw~*MCK%-v$F9*C+@=y9N{UdR6r6_T0!n4-R3H@EpgsUxYH@Ojkx+)UMizp+ zpCXOgcg#h&R2v-7^XYT!^mf_(s~4{J*D!bmnA|AZa`}ov%Q!){5-m5K0%(oQ4M&`;{{W9KSEU5C46rep2GA!&2@l4Ol`Y_hxgm zptLx{qKI*#6LxL(_IM^Ux<3~yZo2Y&UY#BHFgDXop|lWykU6tQ&L`gZpTYh@UaZf? z{UWz+-P0u4L{;y>VGPvrlWc-TS=e`QnY_{i|iql88zv>WC>OG-^|X*BH_d_ zdeaub8aMC9)uu0Eb*an5Ga*ovKXCpDc#Y5gUnqjG=124*oeW$IrFg~BA{kz(YrhNh z3DMLcbFda32;dO%-)%#5xX{EX0wgHEg3NHaVBF`zi%)V}?n3?7#S0Z)47NiixNA*`Kb_9U8vvJI zWz9Nv-L?zer@wO?V5Y!FtIKijb(h++gUHQ?-7fO{e24y z*n2#B&rdGb6;aD_&fGEoFmB$#V4=3w$8Uq=o8tI}mI)?}8fdVn1x841GFFwg721`)g8-0Y9z`Dq(;H0mAGo6*SNduio z+??C93H2}*aW_Krz<2#Uk2|KiVO>1)V@Q3buyhU}HATSOx=LW3J-?Tw{70}wXwiT4 zQ0mAA{S915lVuBd>`-L&e!p5zrd5#KRv^#@6C6{um|wDf95Bw3RxZ*SLPlyNnl9vJj zMg6&eLjIbxDN{#kbX@5K`SlC483Y^j5(IeCruhyJ^ce{1KOc|MSnz0)S1Cbh0PjDQ z4zORmfS@8E9T@Gs@%-lzWRPKTiG^Y6-nHuxa`j0GBet9=FzY%b!R^Yixtrr04h1_|=WA-2a9g~2%# zM3PF(L#?Sz^bF8T5kEbC{n<&1&boD^QF@fvjFSZdHpbU^uNKD@^LSvo_8h6!0t~ll+7wrp;Ai1R^%>}32Ky*$7hHTE@FXcK(6ch)K0OC zv1T4^^fDOuHGsr19*&Sm9@UeK!x4k~DEwMTli{#22^T>fr6d+F4s=4Thy8^ZmZ$yC zd{GcUrmU?cT4tk_*`UfK#+b<@xQu;aV>V3zObrd3;v1O5F%e1(D8&bJrR6D23Nwag zn^z1CZPAoJ0ta1o1e2)l4PRHm2p*zg8W12v4?BdsL*wMMESVE0XF6s~lP@4T?FHm% zIoR%pMiNW+h(Ih-Z9Ex;3|X-vPPNSR5Q3>l8iCJ^7$Kz_u7Qs^UgmuSjOT(WAmYw+ z^=HsFX+AC`%mnEGe2jXpoysUENP%ZbC77?~PL_ln1W7c8)TUVHndZhcS%nZP%v(%c zCHZD}gu2l6$D3zxrSXi2c9E8I+7Zz>oW*Z8`Zq5d!Qd4Zy?Zd1N7~aIoJ53I5~r{m zWDoIinReB?Taye>QH0i*|4P<3RpJE9fAw$eI5J>1v~zR-^ctbc);nWbS@^ls0UB`l zSrAlR{oXyU9!veZ#gb*dLcLT*E!E7F{kyM&Ibjyr>B48dK4GBIU!HcV zc+FhN*kpRrK~+$v@X2qiqh{`xPjv8!cA#4+PHJ=T|D4esAPF$tQQTtK{ZS0_4kX1B zuP&E!$Re8oHe9+)hNR4^Ica#lgKmi@>MUb-dmetYkfB>BZjKTLI4pE~YP)GIS8|TGSf}8hQOsYn z)>Rx)D3t>svQ5bMtxz=GICyAsH|s@{@M353Fzy>)F9>Y>pR8UQI#{VtJk@4c+TYN1 z@;5t6t8b#av^rgR+lCI?u0~q#gBA}iWs;F5yLv%Di|d;k5oYlCs%UTCML&|~F57ZnI9Ph*4 z4DP2c)$UOZ`WgA5=Jm7ucB@6zR?3#;rFGPw796~SoAuKo$s2M^Pv>6E*2g$96dwgs z;LAA>oO-6Sfx-l?I5XGYTe!bbg);Sd7fVwC_a{6uVVXK}?j=N#=WJ$cJHP`7iQx^m z?dz_@hed+jo{;>~*n`%(l5aszEExZuJ7#ZuegYXRn;W~UHa6on`<8tMj(`;-F zwmJk^b(3~`IiB(DlaKFAAlqA=LU!BR|42g0eyCI!(>S}0BCDPAyJi@_w#i{TDFw@eg3I-%~+sZ0ItStP{n_kY(H|2Jb;qdp7#&tx<# zy@1KjaHZK5Q$9y8m?)<(LPYpKUH)jW@7JWXIVPF( zB&&}@Gf4b#Wl$hJtW1pa+Zm)K%UmbP)mg_RBiig9%@1JJaw?;S0L=={f3#TvxCmY z%mBQG7)F9>`kP`5WM2={ODJ1}REgupEBi#*7_p>hpxkks>%rlQ@N-Jmt<<)q$(qDq zsEJz_8H~H9GDf;UQfg=!e2;{3v^v<{qz=M-vPnOwQ5x_&t>}-bsY(2nt6+Qv)FD!MOjh110r#-%a47&-?nZO3VP%Ow~TIDs#dEz(~hE{A;$ul%I*;?n_l|o7G zQp!iub{Xk(2?bj<**!@ur=^F{CfBhTPLz`w_b_{2wo$}+sFV)%?wP?VcQoT^!XC@b8O zqOys;iv5$VY4$BKJhAkaGZv)@W=@=~acJ0^vk8?~uZmBjo zpMHdx*8Y=YP9D)S3)3A;=`HS%i_T83lA8_^c$cPgdp@@>bAp;_To=t$11EdTCfm4#(62Ey*&F8>(T+yi!Ck4O_!{)xjfWg_vscY-~cX6pgQ7Ozc3n`V& z*?n(F>E9<~6VqHgMrj?M5Zx#Be&vg|<83>%?oflVU{0@;aDun)?_(b6!TW5-lsmko zck}L_E8q9`qJ~b)JO`?Gww{4I$$(^m#k?%Ei6zwK6;6#f^(7490hz7Q?$E!>31>jy zQTO#E`J{AQZz?J{RASNXCl?=ID|^0i8V8T|e#9Zm`|)em{v_k;{@kK-cI@~C3YfoT z=g@LWM&oCflsvW-nY#XoD1$TyH_v+Na*2%tuKwNEjH{R-lSO0Czy?XhrK-PzIGl@X zS!FaB*)%t+9?tCmKqA1u~K zGR2GZh^GZ66B29aaPVcyTqsqrq3a;3WWq-OCn8o@(;vlrHGN3aRGXt>*+?G~3;&cI zh$;rA@F7k4etkDzh?pD7JZgY{ zw{x*4AOgMmqqJFlO3!1eB47i{-Pkg2!TH9_M9IPqT9*I-;POrn+k^*t2rOIv&1qCO zr>mh+QCJ6Vwbk3D`&)iGCbOLfU8kcKcQRz***)@P_4U+eZxwIngT@lTXdD9g-e>R^ zSK1^h;9b#iX2T55q5}+NIg|%dwJi&Uf7-MU<}-aLqzxAJi#>&lO(l89tVQ$I zCwBlb)n1sRPRpZ%Jz+*`>)L%beyF-5q~>btC%oD?3)At$PWQlZ%t1wIp-bl~IM;i= zwvET?v1W218?^`t#f+9ZZZ_h4)n(kT3IWP)v>tS#9=7eWIGzu*fLnaDWN5&w>EqrPg+`aMU+aQ zwJ-CtQ+d&$hww+DkzU?i{$-NRCiSlI4 ze2Gt`ekpo!$c8#kE&wPlL2+kvKOVDY@Wj>%!Suu$0`D>WTNwEoUS)k#E27e8&5{vd z=HguQYjaM>9mnr$0@=tHAC5&&Ia+Y=;|2@6s#5}AqrksrqU*c4H-o2<=Aod+UmVN_ zwsM9;?4x69*jJ+w{l&Yn5bp&U-oci{eVF!RJPFF^6gj}Z+XB=_lS~+Zg-G?SI5d!G zf=KkkgU18H#nA!KA2)B_C*+0; zy2s_+N*?ZYkpUlNuikAp-q(rjT2r3K&t0+?wfFA|&~jEjc4SM8-RsSb#ab@d*90IDb9KCeEvJCrq~jAJ?C}Pf{0v*CFqh zo-uim4menOLieih2G2VLv5hv~7yC9?JIu&iZK-7|$TYs@-^K-)nLRmcy+vlutvSVI zlk&$#9kn@Y`7W9q>uS|#tavTx_XIIx9u7jb2$b`6SsPR^wlPpqHbsNk-OVhX{4+ig z{@>X%v(}HfC|Q&PloaqCv)+XEtpflnM%4mVpL?w!Qzo4vI1UYFD$<>-v5TSuSXbqR7Mjj+OackUH4d zco{*XRlR)rW>NM)mCgtF;#nt@i4>*>O)S@#FqRIk4s8(yL=VC&xkm8vbyy}x&mRsB zvZq?gX!rPcm^M&tjCB|!#nlz9F>pt4w@ZJa##dxFY(T4O+aySTPgNWh5<&H5A3PCz zCnYm*f?|SfbtooFae^`fKKJGLb-xc)&X&N3iWWK=2TmM24nD`vi|xMNn(gKRAn5K# zVc`^i)a!huy5tv=68y34rR)UyNLN5#+^-%JZY)y*cX<_k?ww$1H700$5NQt|2}*hKEa0=YnhmS z^4c@`a|_Y_FAL((VDl~t?frVMNy(F#uz4eX123E1i=-lqi@WtaA^QP3D2izg3-H+q zO%fG=u5V%ga^oyoX2ccVT~Ve+9{X;b{vi9?V^10P2K=$m@pUBw|JaaksZn;s99e93(Y4azp6BIAGDHdK~&S}XflSD0Fp?rTncQSt3KD^%UV@e z7SF5&d?=pO?GT(%kYl$NU=F`s+b3xN<`w9dD?>wB=Jte?fKA~Br@PNI)H%zoJ0ke& zC>+)(fvmx?D4ccYvvHlA{Z%k_koW5`a>%UraGbY41kmmGUy9Y`bD?D3_V+}&mMg5y ziJhQnMqzMHzw4aAtd$i?Rw}q&j(Npaez(&s2>q3RktXh{vb@iPH3abcZS~!NUwT}G z95;i;=-m#}_+#7eV3%$RBmJ6_n{5SQP>UI4tkqOxW{hoUtOxzbW`BI;$tA;Wv~kf; zF7Dt9M4Xp_ggO8ln+cvzE%9W#Lk?hq%e0E-o^@Z3qP9$Je)kK1N<@yf6PbE3d}_Kc zWLA%6)6Mh{Lhq3G0z4)@j1(pT!rZ>^C?UG1nP<+NlB>MZx6sUtP${No1Dn=9zqU|b zqDi6@9&6sj3S;RYkT^YV!}#trt7xk}J9;4!vd~wA?mK}(`oAgp2m_mcT`*y2|2Yi> z2kHWPtj4b*me&VX$yLq!6OD1YOz95e^_YfZeiRb?`0%BowzYAG24NL~#qwHS9D z+#y`d-!FBJ6Rg~c-DNIE*#D+=>eIPiX?=mk!cglJzMGPO-Fj^9b0-hizS&nh;A4NY za*T_u##g(KfGR!OiC~ermEwD)nzsM6z&|{t7te|1I@g*ek1sw0e?eX$8L=Q)qA_TM!WQkS0b=BG=7vD5epn`}+2K&` zC~ol9Nl!1ky*Vfm=BF(jG;B^r-$;GfQ!zd&+?h{z2$Ff{NQ>Xy2-(`X5F}4h4D!ag&#!R z;qTlexhg-T!5yUyPOwISh9vL*>qWDaw zYBFp=RNGEK12XQP`kslXzVN6j1f%Sbot7$WH_E65jpmd%?l9@_tY|bjZQ5Ex?qRL{-f?Tf zRN#dyem>ID5z7o0v1_%;d4;&^n@vmje~x$Fw|;xJx3kq#^UqqLo1(d#s($v2Ra&NW zYAv4dZCa{#7VTn!&6ff&T+6nGoy)&|e!(jI4Qr9oQ{`BT2kN?>gpW+ilS{?pt+uzJ zMfDe+|7JfHPK0GNbNbv)2xmvm6xv2%*z(hO7R1ao7b38Pj#-R4N6Zh;Vi%31biOZj zGPms7om@ty7IVKHT}sT<*sqkUirJ>3H2y3M2gmb*9WGK(yXFF7Lf-THp`rezm?EC6 zK&&^$;$j#M{=EWUgD+Eu>hz4ZSM*2ea~mzY$1KRGIJ(JW8-g(xjp#-cr($LbVLyY3 zg&j$NuAz#+Yxh=1OTn+nxU!dp2RcIKrj$WeoX9;WWXfNr zvyBwf!&WqTF~OUzRDOh!Vn1D zr?48XY$Yp8X&d`7Dk&FKm9V9fSUy%(R-LReYM2{Gy>w%5vaYF%vnw4dBDkbJ$4#sa z`rM|beNK1lqKGyf9QO^8Tq^{M2FQ1qQ$?3|eqOr7Z#pTYg&F;&qz;cxRy$=|a?dHjH1qR4O&P@U=w2KrP6qcnN}P@)MFV9cg}jA|lb z1p}b@CLRALHHeyA{j+3Zc`-PQt2Q0gy?o|^QCR3AdIKVk`0jg#K`3yTI zp%SSyd67>EsBSD}n`E#XYk1$jmwY4vOlkC_^{L>-<*6+G8pbqeQSgplKI|?$bfcRr zi->y2iJC%PUPRg5U629Bh>y^YL6TvOrS5p3QnvBdN7IW`zk1oVJsnYMqT+g`OEhY( z-i-i=kbFN4@;#P(?0nXAfGvb~wZb~Ds)Aw1%TRDX9A9y%fBX(Dq~9gd)42&vWtZB6 zJ6H%xEMX|vUF1#yQ=Z(H<>}++%8&sm)y*D(;7x(!XYx(~tD~ff7eN*evXyfa{=Li~ z#Py4?EDA7niCL&~D8=mCdzg+9hK5gMB0Ko_PskYJhN-&#tUj0p0EHA-s2K7G-6)U0 zhVc2y&SFzizJr{_`K&(ldr|oZYZ6hTl37Pqfo&H@ruKuUU$mk!C>C^fPJQpFlx?)f3FdR zdAy@gRpYD`8*@9-Ry5$t{_j4V`9p`xOsr`3xxZ?LXn;op;G`}J7`|j1P2Mar%ia<$ zGJ3gZS*h$&1f)@S^Zu#x)OM_RE z_%{dsqrpA^z;n6Qw`11VWg+RuGMX4=b?tYA`MihiQkp=$LQF@HGn>C-o_d`vH>h=R zx1VdWhHcr#ZeBcg)8~4vZN|5>M2po1#X(@bnPffzpV5{+BFN71@mZNw)=%&B0l@uw zS-d{KfjI??>|QUPNWg!qf2{7ZQ+mhs^F!OazQn8pL?WxI^`eIN&%az?Lb@@WU0>vA z8zHzpEa7-&*pVSKW zoJl|f%Dp}1#SHr%)gLrVe%7=zhu@`8#T${2kcsl(L38^(P zGD`ji&XwPvR*^^%DBe5^Qm$5>BS|+MfW?Krstfuwl|~DyAR*1D$cPk&B0{bnxT2#- zTx3ipltsZ+=zu$fkzV?A2yIiOApI#d3~k*U&n+U4IR6R?At`dVREI7NHahXpS17`% zRlkC06;UB%F!`6GURB4*d=1^K{^BUEEX94U;JiNObN1IUJ`+7ExI{oNMG-C=AcqIo zEp>0_Y&&u*pWXo3vuAb*?EsbY@lQ7PobQZr4pfoctX99BK{0VTR%II4@ZBoaVgElC z7mnc@A-O-!hCp<#86%%j_VlyLQX}XhX_g5}QHo^>9AarkNqZwxys=QTI{J%YWo9y^lk9Kr0d$0C!=__CL?Cs8KCV;K^M!&4i~xV?IIBn49_b`y z3L$WkJjY~z8x~Q4uxu~|@n;S03PSB!j>>zRwhY(DZrVM?a8w=Y3!jy%(5?dRBTj$f z%U(dAP6+ez>|zZy?Z2de^u$J^Q!_e5SD#(EcD0f0HMJ2I8E;ZLKYklMKuqp@BSCOs zro{xPPQ1xLcB;P^A^q?t!FnZrEPO)3s%G3ZOPKr;0)8W|7_elQGg7Xw_!yBr)BX-< z{<#~>BYwxs_*leXw}jWn4Ssu~Xw81Xq{pto^L-omXoZ860#Sd<8PNknHiaS09zz(c z4otpa^L9Jdqn&7Q)MmsIK$Q2w$Hr!%no)DAcAd5TCk>>6v&WL41F?9q)@*dK-7@-U zu~!{r=~_wlbwtboZK3$*);4d99lFLgCdn7b6|lf*&&?104cq>?MHKzXbuidjaa*b6 z((Ro--oM)32T6KzRRBG4~l$HW;^6cEGotmfS?DxUE=aGl|0cl=#*p&B20;hBISL{o8kO zzxHw7BSfd(y?^GdgIW?@=kAiYylz}+VXxfge0vm}dI1sy=TGiLG;c-uaCNt<>`T?h zA!|a2=^@=GR36g|_?;+inn>`}fId!=v^8W_&ZJ(3V39H2o+&`n$6q6UsLv-;nn84V zL`8eJH!7;qeu58MQYDW`8FqCwvusDtUQu_syN<6KG)^%provtJ_#bSp1`Tac0d4MQ z1>>OHsHK9fX>&{vG{$59)qv3VtyFxsP&$Iu@teyIqUIAYb9boz#k^h$*4_+_ny9aeBBAW78c5DUfAj|{J#+gsg9P^tV!{3p{S?Av{pF{0k#oX|#k>D^$1&ck zYsKX=Ey2pZK{tT+B+5$6hQ2BQhVS8NsTVANZ4__BcOR1LSDhY=JhOr)^n{}%Rm^gw z+zUBP{<#of%zQ>(R7`5EymZ(!Z!LM&#RiMDxxh@a9eBkaV4~J!NqI3gRJ`EQf5DCq z|F*ne%gaONBBtVg6GfbnOuEJYzazl9?K;Q*7?~uINPI~(x&|#wfFd@%&t3g&W|Ny3 z%)^>O=MsLJ5bvM47vO4tQ|983TihY4BLmEiCU$4-MR8}%ZK042hc{EY0=o=zac%+(in)FB?B)2-pObGAwgrlF;0{7faCC{mL4LrT{uR zz<{oe%rfpz0$PBYAsLohU;>4jLh&c0cyiiVA3R6YULUjdQbv{# z4Atx)h=m*|_^WR&ZM`~SBWGHI4Dd&hxlPU@MR*{I&6vz+lHNUZ6W!xlxJ}Bk*Rl#5 z!&vD=EDqP6Hk>3O7EcdkAS)XjK$gy;B;2QpqRyQc=AkEH3)G$d3Tw`pn(9zusD?rOqT$iP(xy%DgoPMp(wfmNh1x7JwAgAw`=y z+PtuXKNxMNlwd?6x{QLQuX#uER zV2G~~=Of;jk_vjSGwxjKMv+#hm|{PUVFZmH%Qwf^txJ{QSCDQ~cS^_&uppMp7v-4w zJT~MG%bIFq*C~n{Rfy}sCtY+~*qg&dk~SaM!EwUu3nrEfm+^yf`qTPJ zZ90V$R+N^>f=(QE6k&E&ZG9QP5yl|2YJP6&q-mkt zw#6-bx~%B_>O?vpDZu5ertG4OS(hLbEN2gLns=EWVg1M}sV>}s=x zqbPoj7sAfoczyadFQrtLy*R3Po}IV&Q_R;Cv)8_p{e-N@AhOTWx4+~7H!&kQUn2!7 z*DUqUwtE;zxh_H2y7p;aEJf^*s!TtztCN2Wk7}s}$!Db(2XGZy`RgQTW0`CiColsC zFQnWt*xhTDWik(aSmNHCdnubL!cvr92b%mEU95lCoDEmDs#+5wTQ=W7s(HoYAue8$ zy&@@iipIk%_7tdmdt;>GA3MGHAVhbwLTu46YC*XX^xTCD zdcmqwN5XRTJ)p3|Cwil^_gq?cpQ)NoZ5UHVPCG6~O=pLJ$3^BX@p|ALCFs?(kyRG) z9+wWM+w+6iRG$Tfqr-;LX}w_Oh4--cLRXT|2GkkE_WkbV{vZJ9Rj{DH~Nh5 z@xQfz>f3f}T&O$wlz+YZQe zfpMrxD-BP6;PLn8_s34r(^Mi(vI%K>s6Q!@JsOj^LimWH6$1Fr^K~bMku4oto>dNZIh^#k-L6EuJun4=GYw?;NN4%GR^9TCX+D z&=RspBV*@rX-KjpH!3Z6ked;Tu$8vgG883bh13wJZWL6hwv@f`qMK+~vppQqGz;0{ z7hW$xE(B!1h+OmsaVCj((iB`7n5pSgfDgV9ESNLY9gM@7_WmnAU%?@P)S{3f6$cUM_OAery@gHGF~@Y!gWLqkbprsE(1FG`Az zEq2*C-{Hm0!ZLz0))q4RC{}J_N%b_yD_f2Ac_(w5-jR9wRQiCdyft%Q_)f`37Y_hZQP@)6{XJCC5@z@bv6R@8Rkk12y2nBKGQzpT?Me|wSZ zIblXPTHW*pGZoA1@LRQ-d^mu=tllu~`|RewRs2a=3|mb_6*F?j0m!^gDxSkFdTf7| z=F?Q>*tEL!elbEB%5f-hrI!+*P;7`K5JV-=52ZgYbEhG|FVfhCZ@u>p?$z{k#_jwF zlbw|<(HfKDX=J)vkp<}6F8>HTN;uYd6ACnQ>GfugtB=#uUp{9eveE$`t#~mfDS6l{ z6dDIogBb%mla9P+d-y$pCo0h45m8%Rg<;x?mDnh8;QV-hiL?}Q@zCNQb_7QaxTe_Y z-PJUO2^=H1jp8FiP^Qx2QONE#E2%WI#A5?=b;R60@x!mcc@NsE+>xvGWO#mWSLIU} zhn(C*GJF0DsW)`JrWgRYnM{W~0D?$i0=F&gH3p%oiIiz`MTm)VjRjU%cd<6(+Cv9!!b&fkgRG_#lm6e>1)~bRbj&C4sHLfhxu#jXaG17DO14z>sx zvis}jJ0xf*2BJynblYPhA>ikY{r!N;fWi#RjrrIZw58;Kp+cE&N|Uv;o(CLF%nM>^ z?><&uB-(&*b3@4v_)UqsFrMJ}vzvc=$NskVc`9

    _94)&P3*H~}D`VuTC{E~=nPy9WUc~gW4 zK?#5T7Meh?MPM;c*pSFhdvR6*TLAI*MlKxd@s0%ehtd?Sxik*y!sUAv5{aPwR=ULV z5|kuFlMh2@l+!r=>nrZoh$M6V&=_nlT#GNiP3~1zNt?x*OIgfw?&+eyoLJ2gZIZJuwNVK2{?wdb}EAl}CIg5nx@t z4Hx2?OZ5o4>`;Ug4jBL*7!=g=E$tvkX6lbRS{&;>~q7BxWpX(7QLm@jOXwl`AliHIVa z%~M_%w#8d$-#{XXeKXV&uW$bXeR3nvL8g7m)I;cTvH1OqiU8-W8@x+Veu zOdn$nFh|k4y%`zf-%KNgKld3X0;MbOZmLM?-QxD%8|8{0wl#|3p|>e>Xh3=;q0#tr zgF^KM!+L9QCJgJCT}G#d772{~jKHa({Azw!Pp)h}fI8dF_sGt2Rt zwYR1?b=^SUCWyLR4sh76bCz8BUjqnYM%0@&@u~}g354+~Jt&i9?D7?ohYdHhF0xA6j z16yqwOg&YGAJk}lrXJL+s+1bJtTvR0Gb&B-tz)Ht*0D{QFWemhJJ%kT?;c7GY7%>< zli|`s-gWZwY@)vS6_6U1)k+F@*DUnH05;JVDH2?QpW;X+kF2R)poglVjt_U+>Lu)@ zp{s>{W1ItwYs^Fs(Z-Z&T{?gTv1H4yz-0LR3RDxbXv<~IP%u*w@_mM1jZ9XzOGQRL`H{n!+C#sK`8|GO8{3S7NgTM@hEgKQiH6fdC#8OCX#7mW z%EwPkd-h@Xw+=1J3z#AEB@Xz%3+10)bApcpJY4unextamykAXO%dP-6#idg5CoKK0 z+PV@OIiG65K5&=;P!&Q))v7KNAB%g(;~z`SreC~!x6p;gyEZvS^Tm z7rb-vJm1X}8I1!}8KFt*UHw%;PlqR0tzL!LEkF8gHSx#4SmP-I8)#WWszKNF&{Y?{#E}m7v zQDZzp`%l~8DW7DSDR=dLXcnb@m4gpeeX};3|HLep87nA2^0XHqOngsG1{1gpo*d?1 z7woD09=T6_s>^^Ar)8uuIu%>WY#eo;w1Unz>$vU}W5zK_#~eq6hE3y)s9wli6kEAJ zYG=#t5!?WxflXI@x%;P^EXL9FtSQW#KT=>!biRdZ7xh}V3>Oe963zh8w0M@84DyZ6 z4^hEr%8s}GAeK2L!QX;t9ik#4*pyUr;tKzob4x}vP|@6R!oyh!?u z+4?fL$w)*?uM>ck29qbkSet$5(IO#zhI&|3p1>b4T7pAXBlcU>3ECci`WiNVDhi!~ ziTg27yk1FWelt@p;EBp|jFP{Ad<0~O(jlYMA}1E=wSX2IT3HF39#=9LiZak6-*c+D zPgwx649(&a>upH(El3F%XyNKB>6q^|9FV}ct!kAgNP~r%f+?v5SA6V>uSvTADMjp6 zGySNlWP*qfb!iR( z55R?~Ika~aL0ZPPG;I(xxqop2wqexINIP<3jZ8-~!SKL&QUA@!-spEKVsP_BEHDB5 zkFoIfpHc3`t6N7cbnVOVqhRRp1ukBO4BqVUEh1=RBYLid=W0=%GphEYk*TyqJxDT4cWrGz|+Mfvjg&Jk-epW=C?`nv!@(ltct6`Z1l`=VimCMR@&y_*l zwY;3dTHypTusFvIz;A_WYx;qWpt1xgJdscpHy zsy3X`vwoTC10(~f_EHm&PYJ_qL+MoHD+jH3H2}{Q1)-!R|I`8Tx*I=yC2E|^>*ZX0 z((4E^O1T?l=!19>To;jrhuQSzMDPVNX6Y0@ZZ_9>CFS=ptFkdMylM?*Qt!lc__($r zQY-tb+aAX1yTm$IC2WIlNiHe?6c-~789^tUWmz?WII1R#EHnTh&URd8HHaqS_BHLe z%Y@W!v_c4ypu2!92GC{dro0 zTHxFJizs4Mx;$%(y%kt59`5q!sFf*8bV0v0Y1@rVZCDAcs~`p|?);0%j`K^chxb(r zgDf~{)yT552^dE|nC+gYA5gm?bnjOz^9gzyww)k`c6)@NY>6qzVy6Os^KQrFE6F^3Oawg?;k+X0d6AHA;G4H9Pjot=3zZ?CXV+|L(0 z0x>_#d~r(=Z_hL7uI|Z@0I+G3V^79*Kc61pk@yH2AW9(7{ zytH=N2#6Lx{hoRp-u2$s2K-_(tm1FN=3FMg`DStY%&!@qdYoKXoRFK0sfH=rwXBQ} zsZ+yp(HbSw`~P4+z%3vjFPXp)lm$p4S=kVj487T0*HG#iKkL2M3g2Tp^ZV`TY~Kze zIo1$VlYaud*`qWyy7Ze2W5b<1ArG2MzpGbfnbXB1OAC7;mP6uXAWKP>boS30B4ib% z*h$2PHt3MUjFU&R)IcNvIGNH2lpLF$@8fItZrG{#MhB=|piVg;PH^+^?DBADGuhGz zR0AxtSinv^dBg~m5-2zM&IptlST{DA-1tX@c69)z$B%6HYa%=yv+Ie;n40WXawb{Nl#mn@<7*UPF4EQrvQz7@L_5kpCk23bLU{?N+Uv2sBf-y#$;EyEsK^l_vk z;|WJUDkMvqf|>(3dARv_@!@6PKG8MXjvnl3b)2|ZXgeh?!{AdJMc`$m>=ZqBh@9%7 zx_pI9qizf=PGt-ccnocAH|#XudRIMKlBzg)wWXFb{(JO|(al`bBVO~J@!7=u^i7|G zhW3-d^b%rIt)%drlM|G7vn$>zZRfk$X*Pu{`PmF`{ol9MiJy(%kErnHX;rX9;Gf~Y z%N5x=*D&9zo*5g9_n8jZbHW*9TBIWJs03`!6@u>HHq>=Oqe*5Wqe6SJ!|zvJ?;>fz z%~Ovt@#UkBFODpZ^mMq_lam6wB#*WacBl*$3zhdfe|gtE&TS&j{myhdsLyI{*+4BunD{Yd%4V zP`w*!fMZPcr?7+&bo;O@2OTP0v`z;ZB~Qu$8OmYDQwU`E_dqkng7KlBK7H9uz`<(C zdx!Z*%2@o+m9{Ek!?k>VYD@JC6#ZV!{9gBnQv#%fv!@)W*|<}Kwwt%QAL^>j%ZZ;U zqa@TYO+F}F;DrS8A*|FJ!V>XjRA3+c324Z4V0m-@?q6pY*3wKU$|R1`nZE+X`+$~+I5~R!1s98-K<@EAhe_egdFf=?uIq&GN+j1=iT=E);PspRk&t2r< zqL=uGVH`ygQrHW}C;S7g;wKSSfBc(;C;>7dSLO!yNkr=~* z-BdxY|n0RZoz%v}c^SoV99SOl0t|HiL;z~Jei0>!&do)o)GrTk6X z71t~vek-qTwXVjp$1zSy`L4XP$6_Lty-R7(B^l^@b8pVz@OmRae+svRbs&MQVd ziTd^g_xZYvW8{SdRa5>_-8Vss@qc(a$KXuDZfj>^PHawW+t$RkZ993QiEZ1qZQHhO z=gWEDbG|=!byau&=&G*Xz3;uQwMz4Lx@Bbe=ab0-f}yz+pyF!~7Xk1#^`%k+<3O6( zvQx>I@_M=Bq)S+ueAx6xJsT`5Pn8d;641WE;wh}P$)%F3ODAtPAuoa)^Q|&ZcE(0o z|A+z>XxRMkmCyezU6>O4G33pQoaZpnE7-2Sa2{-6Ce4b#z$Vqseqv#Juk^82@N%U> zgO^mB2|jAl*P|U)NdY`VO=4!2fuT|%M2b&UlzlFfHioChsi%sC*FKicFJCJgUF9Ji zkNPnQiTXe^TbiFV9lxJ5YAJpnUE$^`w)EzSO~}OSKTEQE^i5wG%W0kmG#2k4#Y5&Q z_wm#?ol+_0T?bHoJQ^bu#zC(J7-MJ&{BAnaG=yj!6h06_js(z{H?CUHxGFh4GbrFRvuN^R3*wYbnh!trgq>NHvZd zC>A%;$W6z7xcVGj7~aiM;(Rpp(A@3?@NDFrL{VYWGKwT3%hcao3-TEVQZ$UX7nJU7 zEll?nm1UlzB`Q>$wP;9RMe2n9%ktZZYx*km*Ma)*+;zs<6}I56k`!Zx*uM z1LMuV-^(pE%;T=}alwu7mx!c`%qg++BZv$1`_)&6b=I6vF@qgDUNT$31t~^ITIKLZ zE)k<;G`Z)Bzg@pH)c#jpWog^+Rbp*O!jDT>4r=`k7=d)CAeyC;p*G+kE{CL(q{FQ= zENZInoGUNJ9@dW95xm!HU@47MzV8&kAy6JD3Z)R*fC?-u(12xD+sDXy>>z_KSVUel zROxP03bM(9nCc_RL}3FZ`Ae8p4M~RBq_DHE(Ws%COPf3ym#|d~IRJurorebMg*ZHm z3Z_F>HZig`-b{LlUjR^DA6Fj+D-;^N1DNWg$lqO9P|!}rcJM+qah5-Vm7e>O1tsblqncda zMa72!L1fF0BTE}WiQEgZjz5D_&0ji+=M5I0 z$Mvn0e_~=FtobL#0mjlHnKT63c1YwlV~QoRpJqPt(nPpq_k~0@(jm%#FLm3iEM7I0`>wG z#UU2LOPB*A}tdT?NE;RNBHF*>m8K(7f1p4y&`O{`Z7aS2HPs>do8t@&f8`it&{{h) z;NRH(sgE(Cg1>Q*ZBKJibH_pXbp*O|VI9PdcVG`;#{41?8(!iv+yYcPazY`gPE5w& zXbZDYjbQ<>Nl+n?y_>`nY$GzYVfWHMUae+!@UtAh_8@?$@3HL_rMSW+xYFBdkPj&~ z+9-c=Wlb$*jf2yx=_JEXA{$o>wJBbl+sklphnY&w-Ow@!r|&0F`8X(#Oq!!is4kah zH|#lg&YOvNK+Lv&c%WSyL4^;$158SnMbGwICBGy| zr_^4uc0yRf;hIXkaT$z z?xD(OS$*&7*n6H@*Om>wr{4(A?anVN!`r48`%^2==sw>H=mr`G~N z54!!yuX3V~AU#fwzN_JT*5W>zImihF)4DUDX>qj%5M4-6T4S35$lH^$I@h(gwb)W# z|69~9)U|EX{}&`MUn@YWnN)29H@{xgVZAI&r-axT!1`sBPV@Q9L>##W*I#3X<$Njh zS8?jLlk8dNN`holX|!+wuq}|LNP%ACPmmwRNhQCO*k2D8L5MVr2u1IL;6M8QSM}@P z8(2J>cJ*_;eoBUOZkQ9~lR4LWAGA7jxzP5}SpWqL5GRrUC{6D^JQ02Cm7q1dR0!(k zv%TN*{cF*UjsD)`-psQh&KsoRbozcvIv)p{@B-#^6bHW_x$7$m^YM zu>ZSp{tqOnHolY<2qK<9@Ru>oO|3p(vbvG zi3*Gy4lUH~0)v~-#+B0TM?6jfHX5cPz)9J<*ua{7?0^Fje{& z7no(Q4f;*R^$Q<{gU*L9zkOmoSjSv9kTP)N5z@wzNcwh`_WiwQ#SYEXDnCD1@l={A zCKXzZ`75cBk8gv-R&$u$7-4sS#4*EjXcK+%72VQ_?@_sbnFc*>WzN3|=cu-nhX^K< zcN?}qz*Mh|PewqU0+QD|cvbiLlxF@X*DZf*qe5bb-6pE)xd+GJ@yKz9uERcxCfddM zCGDfbxzEzKF>uU1b6Td|u6>**>x9DL#9} zNc`}M@f)`P{D>Z?|0w@N5xkF8VKIorf#N~hG?JR*TtKIO0isCreVX}NFd}#YPerT? zyRbie#3_tzW_YY<*sBA485V5E<*Fro0L%h+#+NFT(Y?{Lk^!WZ%nc0+g>(NZztL6z zK3tKCDOk;>V-B=s^$kw?Jl}~jqKR@EQ8Mu#niXTl@znVEW8>do3CYqX`!%6+dbT-9 zQoq#wazX9!0TE_yVogV2t7*0)DyBQTAiT1-<;4bZ@`qO`#u7-KDRCtSK_fU&197w_ zSM{W5hPVGD`|bJKNjNg=E@?)?&UG{fePAfxeZMW*=4@^}_7Qo}@R~v0)&GM1KNqE)J z_>!_{5B2>!+JBtfW7lzg+yBPwq2lx2_$S~k$x^JAWoZiA(X2}BP}-}7z;s=LY`d%4 zAdcPK@9%+uZ8wI{%pscXdibx+N1lB5E@wOn) z+!d&W0#K6HL(nuaq2vMOqjOT?(r#)yD&%iBr8|@|0=M%xxJ6)x$L=0NhcZ!=TH;~y z`6W-gtN<69O^5M$O-_LFr9XD5_l0WTE|G*jA^RZEaSAVfpL=F5lonCimic6xw$0Vs>XdY5ntqD)BRXFHsaM_hhxI z|9dy&IvpPB@Y|94Aq2=%2H0ZX9?`EEaEHIqTgR z`JcN+%$QN{XyalA%?7g-R7p=&W}%4}nLGbiJ*A`5=@B}gRSBvaEDD2Az|!fiNs~OP zpB6NgMH1YIh=C!oldn8MF#t-hRgYnmsum(Xh`TiS6Jcs!?OO=7Sen6Ky6D2(*T0AX z%>IxxmWxh?d`$!E2b{WA?M1fehbAI@%sCk%_9N)}!UJOOB`G1-Px4R*jZ~qKL%Rc} z*hjE(C(w{9vr_(iTHHQw6sH_!+>h=P#E>)Wit#cdsum|jLR7yh!~m`ksff4OYrXjn z&On$Jh5$K*JE)b7;Mzy)^T)=xL~5t6`Z(bP2SE3t?0%m3fc~b7gJJxa2o>EhIUOAR z*k2)WMx7v;=YSQ7O5EBOZ0dxsG-lcA?gW{*Q8T;Hid{?+mtiFuuW(%ys(>Dfx0 zK~-OMwie%(`3X3Si-6E4S_hhDX;n1$M%uPt`ZSiCESJ4m*x*hd^6O5^LPW!79c2HI zm|!4Ex@L?O+c83hXPe?VfLQyplq03`;rh>hlP9It9Cv=hk-d_>b>hTp-kXqXs2iH?hpPMUS5(W2=kBn$l9W3h&<6+UZF!)8~~9nlGs zZ#>tast@kDnX1#`lmDZ}UAvw%EAGw^rSF(#oISo*Uod4DpQGMWk0}b+ zmdvPgy;pZhpdl$S=Q~Wvz|5T8OWRyt_gFSb_9*R@OU5e7u(@8VE{1U~Ma-pT$%AQG zc!5c#_W`wnB;0N|YTD0hZ7%Qu#R37mW?4^zOuN#+QBZ#C{87$cr7|i{Q(Tpoolh&% zydU*_`)rpB2jmQ#{<)p&f`{PN%>+^g>B&PwbdZG;gJNokx#03tZFBI9bm>9N(XUyQAG1bz@7h8$SSmr)w{A z>SOti-wqXTJCb$RU2*@NT38#3GlH025!m)c3ODVm6&*+G8@b&*4MV!~4J$R##srfo z-2><&c?`FyhQor*P7VzRk!$^amQ#x1u+OoC@1)q|2lkdJ;K?2Spk4ra2RPHfd^*Lyamr}Y)sUh1$azlvCgw1DnYXuKm3KGyb*Z%v>p9ZhXng?$#5Z@&&v88!QZ>%VY}oP#w&cA z^1M-7<`G=x?cEb{y(H4+6(in~_5&wxmkOP>kAeFqx3uG@9pUf5V)eAw_ZR@NWy?rn zMZ=^#t*{O*ZDgEGXVF&HC|+kV4y*+WKce*pu`Fwd_)!8(RSBoeTI7{nri=l5_9tyR zC(l=2ab5O`L7tHs^a!uWTMuV%vpONb9@qXap;Nc6>$UwHbP}w+Xks^4C3Pvf70(=+ zk0!K@4kU%Sr6I2zz5(Ic!V!SF)LeVuZJ|jG&fdU$Ls%I_S;x#|nanQlivBySJc9B- z7!>aSaiz*b>n(S`F^WdEGrSf)kX0px{;_1055D#@F5+17anf158YxAyd`-dLKe z)HX~`07O2$ozOa5hrlY;xD=UPj-!7SvCojBKi8SF48}7j8E6{?_%UEaKZpaePFHOP z5dzSoPr%K;|JQR{JBGAI4CjJjZJ2L8=t$9{$m-gGAnej4Bfq-#%!x(*c@Al&Wl72bQ4qI&wF*3Eyi<$<)UMr0?YW$G7xmp2Ks6txudh zb-p7HISV=Az=@&1c$(!rv?1Y^k3m7I&?%^T2fhGQ0V>{1{sthal@H}q3&(1TI;EyB z>>wHuIC-2fZMTXhdB2LLz|l=nyn(>&Js@A67CDbPK{zS0XEkdz8cOK^y>t{fj-r7> zjKkb(>h2?YSLhgZX5$zoTtxi#f$9AQ?3~6fUE%T~l`wYj&zL4vGR%_+jVipzYSJN~ zng%#>wM}-g^AP}43_4${;G(-NRc>QF_jsCU=-HcO4K9jRaD;rw3$&WGL=&YV_6ot9+3MG999j9WO_QOSwUIFnw#z1^9r zad^%-vJd!kd~(EImNn>EvhL{A`b(oQ6?Y%95FA}-__}lq zw_%TKrqzESZ?|oi=%Ap~i)H-Ug(~G3o+g->rg`giqGh#j2>(DKQ;5e&l>{byWyx(O%^gz%8 z0N6d=U)VA8Hc+sDz+tBBH)wfqH=aY!BlvSIC>=H zffoERP{m2)p^I+lu9jJ_CWgWGUTU;Pg(JVax?rcZ|hH2a%MR8q`H422Ne}7<5ZWD;^iNgoD zZ~iSd8>)iUE2i-|YRb}$ms7%I?+H4Br0~&qkPk)#QNY-jB9UHS^7LE(LGdqSK*Y+g z+BE?3%KC9Ta%qRO0Z({gX4n7@<&T-U?OD-4wHZamY-8YO96jw}@8?@!Jb=akm=ad! z3yWx}ypmEsGYNqpB&Gbi_of67Lh_ePoYcC@ldNWDAnLt_b2UyW>)DtjJrvoQ=vWm- zNF_n37qKtO5T8&J4tlQ`%M^^WCP(XnlaGLNat2FYZujQ0lN`#XM-|5`CTrr+XB-ht zLjT8lXf=kUC}qtU8OXCYJJNLtP~#b|<-kT|b1xt^oIPDyowYX=rRhInl@P#dq8IZT z+s7OSFnT~7%O|-x4P6qpN|u;d7(|-u9bxJ8*9V=D#b(WE-lYE>6{SJV(@?Sg0qXS) z@*Y;BH)T0S?RbeW&EoX#DWOd)Q`o?i%(H}7Y)7moYpnef3-uk4`g1?8PM1|IAJwO z6zic^xWu2LYq}$DJSqSHDnb^$cTo4SvCki6nG1|yhhZ;UY`3oJdvJnMm31W4Q_lo2 zmq}x6h;ZYH%6USR#E|{;TqhFx#CfBgV38z1+1@gjF7x~vw8$P=YxqpAz8SJp#?wi% zFXdbAoG-wQ=h2&1h-MUC(>}H*W@CmrXbzAYXGy)6 z)(1{^l+%&lkBEF`)J%cO`&37Zb!)!A>x+ubtB&9G&3CVO^P85CHWc!-6R_nTZ8kbR^T(oA9*rPfV)# z6zb|QhWQ~K4rBrOPD6L&o}kTs1*Z1vN1XPbxG(AO_5U~e%P}jY4rWPn-#S#Dk0Xh4l69jb5M%&b?t4ZLrfHoV-4UbJelkrf&iftS`op#w z7e7`OMx%@H->K1t%i0CC=QNDeEw4Ce)4uN=w+RD^ zXrG&v0el)_0TbiVHkwtynhuC}??O~j1X0RkM1+x`O3K-O6? zy^gf(!zuT~lP)16kkhqF{L6#F39WdPx%^RIbMcV=&!lVPbcYcLv`(C?%=-#Q3Ps`K zHKbmUn2H`PlE(}L?n?HUQ=iA)>l1+PhNYzZwsS<^!hJcS{VZe1`{xi}`>9OAxm4(V^ z;lsNxOPFFXk#ZyjBT_qpYD}-!q99C|lOjj#1-++7Tt-ISKn`U^J^9GmFA}tJnfJ3uySJ~^)!V8jReHB-PO z!$*Un0WzqlS^lUYq}2-u$Hlomy6Pu{mf!+8Soka~nnuvy;j@CAkn|kh{qDw2IeELk zb5+VN>2?hL_s&J8q6&DPcB)-&hW)?nQBB)?_8%gxy5_`eA+~RjU;Kpl$4pc5UG9>5 z!a~PYw8~?V z|A|ZatD{sn8rQn}vI66^3{_kd9C~1;Am$!fpRH5*Cj< z(Fg2-&Y!*_1V}(uwLoLbzI!CX1=>0T-U_o|3E{4^ zI}yXio^2$gl?nN*ZTc!`C4V6R6kapE${&I$R37&^J{p}d53BfgmYy37;|l$PN<`@Y zP~&~q9~KeZ#Rt{4M6li~^t+;6FRu&=l!SzIxH>PQ_vD`GM+IK zrdX2{uVp0cKgh9AI8>i5J&BPk-4-n;FX`9iBcj_SkXO)R>=+!YG^Vhz_7o?!Sko}8?ph(*)Z-PRwr=j2t_Iv`!84`QF>|1~)l zwbqT{Etnl1Hrznuls(I7Bc<AU zUk0DX-6hlsv zxVtEjqM(EU?|n&7kV(IuK3!54K`90;qH5_4M9Cn>5u^jsiwJt4SEypJw zVbD4s)lgV@4J*>YN>^a?d;U{;Khybhs1hp>_*o5}U8RK#( zkstrM5qqfYS8tgkf6&rSRV>D(W^9k>cT&Df)mNpw)-}dSZ8}|z~zji*o z;lcw{&fVK9U1FN)`yRO8FC@Nip`xmyq9G<&kMTYZrVoVZo(>wT7%E#+vYLA>Qz zWLMI4>9#Y%70nZ5NCmg#RdpP@6n2+#$?Qv>M34at)#7XUdeD{A0YYY!w?WETg>82k z<~tv{)~Njgo+69l^K^wCyId7gZD-9krWsosUC)z3PvKW!x_eh2?Y@4)4!_J%{@NJGp#=fA&4!Jjz&5St> zeA2CJnIaXb30$8*7J2CKKGO{_kHX^rR&-p*1aH*f-)0r7gk(Y7b4@4Lu{BoPZ<-&v z+;z40RN8N{$NO__aQGn*FFIG+J(v|Y)T!A zlfWlvPP2~b`e6^3iJF=@s5c1X-$onOT7B*#2MdS$N9LA) z27nQv{q}qc>SMfmN0I7*G{)Np@1{5NK9ur2RYpJ`^@+xgP zC6L-ZhOz#qC|+;WiWCm4l&UCUfg$sr;~GeR!ZR>$68}-C#}5GXEMZ3d%_k|cHFd4- zr<|xIj|P*OtMf>8bopR|`V~}=>`Zb5*<ils^_n?qYW`eP)q7^b%tt$IHJK6YIS_$wBh z9CM5%Aw75^V&xj(RCN;J3zbr|Y9addhAKrwoRw{I045a>14Ccv<<6u{OzuVt@EGN@ zt>(CW0J5WtjsoOxFilt9|-m66Do;jas>RKVIH;8#g-4&Kumpx;LP|x)>Up11;J& z5e@#~*`omh@{GFIu)Atc7c=F)S){Ks_u+!>2o_vnj}Nk%fgNOR+^KT1svwB&LR_Ml z+6Xyo6W5*31>wvF#lJl#w1t$emqI^JuJ*D;=of#57OO3vvz!%K;3jE5*EfNUPZfTJ zy5hI{bT*7Ge^)=qsGL_xJI`<0FZ!R^q!>lpC6NQhOW&-u+P(hWTO3#0w@rSqC@i%v zb&Q=}lbrWons%11bJVj`b3V!18M+R1+jbzA*KrhYhhFw#ZG~RuPP>z-*h;zjD{`rG zwFRu^xprj9da6wP^*!EbC7O1=$O>fN;t-TIiOOYKVY-aHTq{BI$yJ*C6gTOnX%^EhBcBtCso_fCQMncp339g$ zKt@&RkrTEi+*{gxYinX*4fImlJlhZ%R3og#%2+d9vE9xMWk3Kd`G!ruum1#57X=18 z64YenT*>=C3Fy~*n{lE2G#CNle&M&FB)m_VJvb=sxeR#CSlLvE5q5cgR<8X3#sw7W zMR#9GEeZD$pbiUYH&lcQAg(^0{1)~I0C?e!79xSz1g}kG_d1k?#b?lHbD1{5qF>de z08_dklPI#K*|S)FE>EhAn2YsZa}nJn1L{s_St%viSM;J2y@j&$aAnzPVOdg-bvdBS zGJSEZ+O(t~{&=Mm_#{M^rtIWJIHPcm$~CrZggNfEA5T`-Y&nK=sl!qn`Pp?DaAVYV z;e(x7ul)SouM&Jc@om1WSAIP~w!pdW)2c0Wrs>#Ozqq*+TjH6i9l-Ok$zt?Rh**H? zPG*^TC0|8a=qE7AT)Z;7-aJKPzt_l0{d4s2&}r>Q;_dm)6L9Jf-GeTyy^A;sOd3#y z7hdcD7A-E78l+$qq#8&g($o3?FmGXiFyHsvu_YzMS(5o>{mafReKJEpV+&%*+n)!? z6s6ZQ;v=QvcI5g>-K&ZC{G+HR8 z8p88}ai)!==LsRYfIl4U?E}nVv>7_-`Q^4SVj$7nrsOBZVfU_wIKsxfB3KhSz%Gqy zNcG6IgfPgMv26@Oy{HI$HA`1>>4B_x;V?JKaY;@JgPLDWY4#nSgl`5;)*xA_V9S&x zl3M$uhGVxFCEgH}3S&zXV4l64u|n-H1)0m8ga=OP`Y*z7xB@#Wqq{|HIRTQL#-CO0 zw_t)zH)rz$f#a3D3j(kR!{IF^YZgGB+9?8l4TyZEu7F6A6bA@|<7sRsU^-Xhh(dY5wmyMk z?`tbnvMw?0o@UI+NfGj%gxQMZXuYK5t`HG=aTlD+Kx?{ zQ``d-FYO$@8DNa=zn}_fy>Md5&gblX zb;+cz!%%0Xn+!nwC_Q}@GB~wuSFiurn*R^+eX8g-JBlg+h>X?!%=V8h(||jMv-DlN z5efa)L05E8gv$jIg?)hX&#sp5^#1xQX0oOzHM*8&0Isz0Ch-S;oQQSmDSUGG^c(m; zpSP6Bs2}Opzvp|D5u!+qQc$Ze(;fx%P38Ktsxna*-xdhTO%n(Npp%R}DxH72M0^EyRjxged@Z*%LJpaf^9)a-@I#?<1 zv4}vW`#rmp;WoS;l6x0={tTDpqfJe@DkIT~h-gi?3T(R>;))1WkhT$M`^W{%Bb-a) znDNgcq|0oWJ~|C>`&O9#GLyeH;OG1amj1FP{>2ygf8Jy@%^z?G#b>&jAR)(&_1xyK ziU|xxO_%X`e^BRwHvf8qNMI3YsH8Q@=QUTH@uZ6|=E4T;>DwMT()8v2>l6oZ8dHWb z@?}0+UTx~GGg4Hzu2SbTBaPH3VKxlbeGynF+7?Tvt)g4<;kk*n8Nd&0N!2`qj8j{x zK@S1ed)UQV*VUS)BT=nQVfn~x*Et>~I~)1k+VDfb4XrpcqDy~|OdX{Jk)i6}-(~rD zb-KnGMx1qQuq15Qj@WBB-0QrPPW4aZ5RPbc&@W<&Qf@-yzJ#*#;WKkveF4xSDWQK? zEHbJId<7|d3SErab^z!H-y-VX4s$Sept4=VR=bWI*E6G`!%IbzT&khu-?c zc`}zH-FEahs_}VPWLinD=hhNTO`ai?2fi=_%<=5g@JxPeQ{Vjd=#%CKGFA1G8Ssl_B2R;r=1|_ z!l7eEIYLM<@BwO^!Y&Zjse+^jGqkBP_sm(Qgka?11vq58)g)adedwsM%k3sW5Q1us zj*f$Po~fF_{ovbC$S8F3^&e?Foo2dq1y($ZVeE<2dkkcY zl>2AsbpsqAYd|8?bol8aP9}&&RuH4$VWdQv%{QqZ$?N^iiKX$<)Mp=vD3%}Mcc7P) zgY{uf3jj40s4;g@jDZMK*o+(_#Rnwy>-9ytL_`p38Kotm@!%rbStIccBxoXE(^rsT z?AE06euQ~NG=Gz4ouxzT{q_QV+KCM0<2K_MWxfI5-d5iM?pOz>ts%1Q! zU!t+b{ekOv<(M|Llv$_z9;0li?@J|w`$X_S9{>}pp98(oOHv2!zj&zPH4LXz*>JP* zI|!+G-QG=G=~;(zv6EH_-wU;DJgTBP>dXhdESgx!mh89(28+rl84vatFx@RKW_!QN z`q3-LV=Go&2@Fct2n?!C>H9501!Q-B42KhRJ^uHXNE{x%>%uU`-@c`#{J=Av#&*5G z4uC>$Xobhz&Y7&on*Knu&vWb*A!}&18Zv{hpJgt&!_S)SXl$s{|1@)fWl5@a|MCsI za{gp+%0CGaR7Foc3EQoU32QC#d1jNjJex7Rq>$WYyf~3No0eI|Tak^RpLRdt@{{h0 zKa5^?SwNm>BD*G>Gf{ePo~|LYW_Kn!de*6KL4|%zo5fY1C%{`ScmPH@BU;X8uJ{1nGoX zWws3yAqY%Ap*{!_70#)u{|4hQvs!jnvb7*=^j>MBx*OG@{PqFq%I)253#?zL!zI>U z{dPDnorLeRt7Gj$x0*surq=uCz{*-P5ws-gMkvD(Hro}4?Msz*ID8Y~aio>&48LOb z-Ik1bb@yIAmv%;32S_N;-B)pq;fIqqE4^?4jG$(X4toC#m+@o?Z4pGxZNG?}f^S@? zO{RYjtMatKym$fd2w4jSU*+Tis?%w^qQr1IR=5!!MnS@s`554R1;1C0Ho7MN<>cin zu@z`B@zOzk_r0{eFAaf@o?Q@+j}0Wr6gzVVQh{iufpUaQMHjEfBVwt|Yx^MxG$v3V z3ysLZSO|sou7(C&51j6Ak+So$#k>cuzE^&#PM?EUY#RaYGR@IP1_G2lkLcGIT!n$c z+isvfMjrb{-Q*sWJm@)aN|xwLUzL-v^s5#2*W@e~mhTBrdmLLiYbRAz7=-DX3_?7V zVR!30I2v>!s^zEr*2Sj$T!p8qctZV`aCnTQuh(+%g|$^V@?7ZJ7(u;(LzYQidH|*V zsA4u2hg{BIl7jK1T^%K*TYAP6JvDabp8yvEDc3PZMKhYXfg;`lZeNpFovLV}cv(Q)#0O8Y|;o z`!f;N>gKnVLOYapXzE>dSG(53PZ<$3XT4I}@T{9z+d=XxzT-(8&EjzQl;+@H<|@qW z-;*b*4-C~~B-S(Du+@3#B-AT~;;2gxD02Yt5_lC(&=yg(`O#mzivu2GTV51KQ0OT( z5vaG>K=qBmnC2<5Yd((tcDViuVJ~}o)agvjLas4jySwk&k_XMf9PfVA>8Hu8_r!IJ@n+!4cuI#tH>DZ^|R1!u#be!)nJ3q z?)%*my{IfuGYFt>Te6#l;c1~Sp-u&e0o&hI-#iAB2x~ndk-TQ4)!%eE(qcgr zZFnn_K%3lY5C1pu)V5rwM*(^MXsqA8UlS+w#KG43PZ zH*R*)&KD(D`B6m;&jE;g)jeC6TuH-iR>uc{Hd#5t?W#gmYPW9i=PT&E&O^GQ!r3q)|g)kU!Gy2bY{IC;}EZVXY&?lbvj%D4O z>H2W!JpvaD?Eu?iT^^3A+Aiux+y)rXM61L31A=Lm4ibDugR z^Q%JWdN%bF#{Q6wA>CT|9g>-a3(y38#wGe;yg}BV|Gzp`sU6InpBuD&leID_cbrpO zL3CULrq%IC5lFZSxSg(z_sdSd#j)~p-Wvk4a=Pwe}s&*1{7 zE2Yu%+tmDTd1d*Z@~V)aQlJ1K^8FsgTcJ@!t{JAy-lJnBas+F-)!}Yr!j7{#G&6Kn z97-aA&>H`MOv4`Q5JnWioflFY)=$is!Z!Hi##|TNzcAK1uhIJ1_G9iIezx^DRQnM} zKC=3qwF`>~?|03+GSTQlUde1yr$bxuZeDU@WYMRfG{R_xSna)1_ALM7!q?Q?Hme5q z_u{wIJGqvnLnB#JKC{g|ZN6@R?Zo9?98SOtCW|qW z6L2*uM_Om^<&i>v9FNKM($O}qEP0Jj|DvWgnz-t;u{X?_HI{G-la{%mHEgBGiTn$2 z8tR{!h(D?4ceQS77d!F*5sYE-$IQm~54J!2RUPTnG21XVU|UAQfi|e(pjN+N>IRXc zd6YM?c}eghnT&Q^{XbG@i(l$o`f(M1`1c4D#SEESkPknlkc4Ss4N}6tg?2Det3#|N zshNm+KW_N2+3t9#iWyCgSX5Nfbq%c62-vwh(<_ymCh__vCb4^fUi^e5E0PdN1L)Zy zVR0E0Vd_4?B1&_e!?8KBR`Lh=AE$3T{MljI8JVvfAa58d-BR%_&8093zSQ)e7%g&B zOEhc)YeYFY%c=g>N27E@M?HJiY27<1m7j!CuNLf8k|rxe=zxQh+!Ti!S$Czr5BPlD z*}>r$!dnb{AwV18LhI3KReNpr z-bZjt2~{E&^dKQ@3}uaIEc(jb;vKQvcGpjabqpt0Q;7@cg-KG0QFW@^8KH3z#;?O= zn*n*md68=2+MUQO5TV5+s-sL6GbmyKQo9~YU-dqi1ot*dGkGJf`qjChb4|nt1%Inw z*vq{5ixXnU2l1<&RwnrU-K!YQZQB<20lQ;NmE4z&)Ps&3I=N(vcd~{4(lW`23{VT) zR^$^4Iama^I2y!vl0f<$2S(Tmc=byxuw);48P_-^!!IsB%pO$X8kCaVh9>^?Di{xW zXz@9}def)wcjfbDVFD&Jj0q3UHuW-ufx}I?!szfj3&jfabOT2RB zcX39B&apdlmAL^+!!xzr+nSNBW?K1pZe$H+^JPLTH*ww0Q_2awMwbI>6L}jZ`2SJ$ z&9S`(UAMJu+qP}nw(XwcSDoV2wr$%wwQZi-c5lD;{q9X}GI=u3Uz1EGvuE$M_FB;! z+gXEQ(YzM)KcD3hJPr=WlI|ms88=r=TEVxz%un_m5y_E6%jin}dTZB9jYq=Z_5XsP zfD5fThy5QE^jcGk0jC*7T1w=!2{c~gUME^3W2py@b(Abr3-_Yx(ygrG>)n7dFc2Fq zGn~Z3!;8u~%g4{x_bP%>R2+l*4{O096kvF6czBkzwP5e4k53X@ldZ6xVj*fjeC}Un zyMLGm|D^K++gw9lCxr2zY+(?|LW-Gt0G1?SN{U{CmDd(E$T~j(h=9+=E`E5> z<)1%oEUvte!kZE_s~74WZLPTTFE1abutH1Rp!YT9mViAfI2~diYxVvW3^z#tOPAjK zp(S%y-pbz_klyG$@ctZ^){aGMxEUiKzd_i61*X}*H*hUSp_WNEJ1kv!fSORtICf4x z)&5#5uU+w8dT>v|5EV6cwqkH{--0g&D6XRrnYF)pc9ad>mNO;1Eh{SFDOM*LJ6Gps&ds|tK-{y7foL#fq&DoWE@YBmT5CD6)8MPb5O~CIK z)V1&GZ!1`NW$I zyQm1eF}4JSCqtS_kWc>U7Lso$4W+LwR2NV}|JGfE@c=<9DmOJbbTT<4lY$b>sa*(T zZ}HdFYt(Ho)4ZJJk6|z2E@2V)JhiHo#tn*fHf%$^dX<@ZR%2hdltk7Kx8)zHUBwcR z*?F7`Fm1A|^P<7zf9 z#bK+>vz^Qf6y@lZ#ddtN^5l8fOH?Msg71to*8sy=EIYe_o~|%`qfueagQOG7w$OMkWSnSL=CEv&l@-ivg>)+Tmf>q8 zZbpnGTghUwQX6@v=;kCZaOcv2>f3U!q6QS->Kcon8ufPZjuKgT#@SyI5A|8j!#rgS z0lnfF@T?f{Q(ra10~7EDy?Zj}Ks`K~V?`_CV?);@EBx+L=)Dj*a$WqAk0$S&+Iz(Z zUb|+kY?iW45^EN%pwJK-8C9xYCBOpPHQk+=XR-E`Hg`@lFutPeFmYKPO`&<^%m>^K zQZU-oC3^!C{cfs(Db5Lz!~3=EP(cY-z6*L0R7Go|rVw+NQkdAbJA>Cqk3mCGFS8~p1QP&A(D+U4mj7WPchF>;p$3CUwt}HCB1+aw4Ct#4>r!pLV zhAusc$rNU8lz~&I`6~K4-qszmIw*oTxysXz+aW4ks$_s~s0&ffPpfiU@Halk8nsHG zill0L7~OLFVc~k$l?m>%zQ?bJdDd;f(KemIBnGT_*MU+~Z9>6vGG%Pf_@0o+srF+q zEN<~(7>CN3CUh0574&ER*YjiJ$>KghL$*hUfuKBuvLtmsaY?pb-L%c?6i&W|nXeL; zX(#jsWC<|52F(`Muk_6QrY+F{-bCCb=!h@P0_m!t@Iu2f6w`b!vO~X$!|szNv>|RW$yE=N z5QCN%4~lb|7zL49`K5!v0iG_LKpk0+@dxsQi8p=zb5Z^d1BTSCDdy>M^2q|ueLzOR z8hC9Af=qtyDSNwTL<3h4*2zYC&+tl#B;OHb$r|pk0;AYLLMM!+_+u%%fu!%z>Vo!r z!{J{`QYX7!JMGW98-he$+t9E%E0Ta47>X|D{(D7@n-75?O|=_DG`@22-YV3R9kMLc zZRMo-5SN^~D{gQYb(%*_v-Up}*(v4flkYd09pjck6)0B{`w208hL0e%sKo2tW2jT@ zLD-CGTQ!e6K36jHZ~AlsWeq?Tk9v3vyv_2(DTQYv?Ah_hUODrUy<#Z?UapeQ< z&z5tgNT{5|st^hPSmC7jVX&NQg?dX=)hBciVR1Q%8;LJqF%U5^Zmq+rsE#K}{`qKo z30YoyaNudGWfGoTRE|J#VoZSWe6@7?XbBJtz40xm7-Z>Mkwyk~n}2v!HM>3w-CfkE z73-u|)bFS?>3W+(Pwj{?^fI?y7_#zRBOdVF-cgu8RX95kJn=}>y&O^G=*s@JGYQGU zCLJZ|f0rRrM65SQ#=38(7X7wZOnw=5N69`ej=g#(NqFFm?$~mAkfT*}RJ7er9GXblXJKPy}jNV*0<4{h>!hXPw=EknG4ea0U z#RvrE3f-Q6E*xAgtPKOiPl+3HHG?QH6%_hgN0)_D@M=v|RX)<~lYB|FhaWODw$k(# zoFT1+mq7|*m*jB6$%pM;(?_iERM{9XTqoc(YA8A!E~tw=fZ**61CxCVgJYut%zzO# zYN58%45&ksGUfP5#?XSb2Bi391mBbCoCqdV;?-a8Zt9iIEYSeg1bVy;DeG8?{*lj< zFC*34j*n_S6Pi}92vD>i$%oWsZhPq;T@9~l`2O2+_uLJ33r~j>=eEmp-Ijx+0v(Tb zSmkf(>4{H{9z`~qwbu@w3xW;J8{lv7ZXVGATZuLo9-zDpwHx#?-`Djq5)Cdt7q^ytmmYfw>7>D!%*qL5M=YGXp_TZQM6GUu}MtmP9h zhRF3npMUOhu9{4-y)kR7=U(`Ymi&kAa_?p-GDeXyr~j_=A1i{Cu;j2|)oQ%Shg{BN`Mv8I@T^^y#HAaiJ4rq)^ax~ijkvq`^n{0j<4 zXGF!$%>kN_vk!)1fNqNGerM@X$U}v(OhrQ zcMH%xlgO9iB1$uWDSAGf;xywN#;_F9i>m`5@f9$pde^#VBiQ)1^ht6QTT2#_8Cc54 z2-V``sQ=4~0QGG%zH7ld=4=t7DrEjNMIa6hupM>aXzHmvwxoY~538y!^jsu&wJ9%j7d{B<$&WaB{(u`6ora4b!JT8$`4P{2Sf z+`|j}g~PS)5I-2tgL{pGd?!{;)kB~|n9ocUcZEft)rq9scPC*75&{iX$1nZhbPxqL zpfsI*JV>CtG+-d#$)9M_UJ8^{8qj!;=G2e_qLc%|kTX!BN2Lh-GcXlzRUku}va(C_ zD9@>>##Q@hAJHE6sbi9o>;q{Gw%t;K_UXVsNo9q<%ZqQasBVPLDd77oi9_t>&pvIa zQ>2q&KJ&GoNlE8Wf4aRt)D5|MmM7SJ4jYdN1s&5KSHo!d-TD|SMqe*@|9>C*ArF7) zRF_^bPJoADLLWw5^Qi{WDnt>zQkAH%Y}0TG33x(2^bteikc>5*v$4bk+9G_k+s z%IJF)^Y9Rc*((V6yozO|vL|n_IukI`hqksTYHAUXUs$q9uA+#TpKxaud5@20um~H=CcyJ61yHdb2ZvrlUMPOVUW(p`c9&o?coD_s zflP=hwIM!HZM3FnOP=UzJ}IsM9*nj51JnGN%S$YGE^jWfbN!@S}eYW#9~?pjztV0EFr1 zC1^IKMJqc{tD(^r=e^3+RYw>$6l4F!DiR2KyYNLwlD*?T;}3f#lZ`Rqy&DrhS1*&j zJMX-skL?0w%=B;dv){!4Ti@0*haG&nzl`ip zbQ34oTkUuvTCKt9VdT;m_$SfM7l65O6(FJLaIjLiVb-WAqfdSiR(rNXV}xZ$zP-c4;kB0fQ;PU)W_ zaN`$wiWaPfx$EXAJ0Wq@h4ckL;aNps9If?bEo1PaA$2i(l^weY3C z6YWhP_%Wp{tvr^a6Lh$jujrs-|IHi9={cu9s;L1V0S-`D$uOCj5}od#PHr(h>)U0K zS6A*{i=oeY(@WFZ4JXH!!iuXFtHkd-Jsa5&4}e;5*14JKHq#1!*9OXNc5=dQE$p5+ z3^s@}W9*bnjbWM^*{a(%1&CCtUtjjP52hyUmzfNv{puWsnBnGAl+=V!?z~f48eM(9 z-ka$q@D6*sD5(D00A9e2uw=iQrVKU>2*misWDL3j8OJ;0Nx;rHi|D}VGjNr1MS6ws z{te4FnaFP(_WK=*hscofP@mS2cE^7BBMgLKF@tu`@AlDyXLCCepRI9-x0$|_fkRcb zz0vJZ@%&|37LFdA=sHUJuG6Hpe=3?cM=q8^2YC=IB=KR?603%*WK6&$nE=pnq&^ zFQz|XMp-L~mmps4Y^Q!ZeS53wb7tXFkL%zkvd`Ux5AoBN>VmM2qas89Y7S}Ain|~r z&+tv-W$*ugSHj#5W8oSD0WD4d1EK$fTt#saB~?b@)W;z(0)W|wcRj1Fkl{SNP(8X8 zk5Mj&P60g!3VEEGUF!*82j869gOxMYCeTSmAn7dd*twdE?k$~^yilq+g&Qx$guP=H2QtSM2PGe0D)O>a6jwcuv;Wz z>|Oev+v0ilD?qzu6A0=rbD^>k)d=-MQV4VMpVJ846K&5E6aar&yMoq@Z#wj~e9FHf z*&D(3OYc$xz(wjHt}mGU+E6xYKiN{cn{*oV6c|&FR?oNy8Qw$pS9U;1ByNEww~}?s zqJLSEESW4t&3t7@c^b2RI+iR)W*f5Rx+HHW-JUE%CqNSO`r9Ayx}R)mMf;Z~iEl-0 z%qj8k?BBoLscS&jlRyCJUw>|DC%lIULvC}vsRDFp)Cwc{WeB%+pv*pR^5-^1FEzx0 zV_wW;)Q>j92Kr><{G(mYBI(95DXpa64&)$TwDj$1pt;Tje7OU-^w1XRWI{kGf4K8^ z)5@qI8vxi{5^Abi>apf$x7O;BcU{3Z3A;dfyrif7KfP2`#G?x;wKQ8yBtu+FCF|4uH7Zq z1MpECRn|w#Rdfybe*hn}zAy8F3r#wNB!H~MFUC|g=fm#f;B1%F4m}g z95&tZkGeZqAj_ezb~{T!ekf8$9&G|f-tezGwgc!yZVXftF$|c8p!w2i#cZonFU!>nvWh5XrnDQ?p4Xd@4K4C+3{0ZF2b*=MSe_f|kzv6LF@o#v_ zMu$VuRAiZTAiLmG*D6rRb7Z=+hy|dQfwRDIxA_$G;BQuNeVOtkkct)|>wR z3*P1}vrV#xpLur6GxmxL5PCezY9U<(bz{@p;A-({WM0DZdRO8dP>0+H(R2K=M3?C@jbRJD%NqVZ4Be*E73d13WdQ0ScyL|&2J$YbYC zC$kpPXgR%mKS?)3lv}!P_|bHC_hMsEI64qA@;IL#`}%Qsn?S(oSF6dUABQgJVoW&} zfd?o;+bG*i;*z_qT`Lyno5IN()aR4C|GR(Xv;H&NrSi@9>Pnuhy%eCuG_0*Vtl2ZX zlilEG6QUM=5{h7hZQhTCy-{@BM$=H_FiF;YU4-uf8kQQEef)3T!S1m;RpwqpSQjiV zQUtsy#iUg)YkF}BE}5DFcaxXPzeV2Iw{Qr=^0o@Y2@MSl5XJP04J22q zC1SmU%S`Tr_TLZ~!$ANshICbZti5KXLeQ@&IR;0h(WHJgr6AZ0GTK>f%<~q^v9r`=MAS<0&uB49WouqS7Sb5p^KbKOG~Iv7vudJ*?fRvuZBN3>Qr_1`Wco%O6Z9JgVcZTy?=`$stD8gG05lX6aK z>pK1bML)z;K%L5^l_m)q)rvwS8zXIXQ)(E#HK7LWAw{SbmopkKxCPs{-%NcX+ImQW zTnQ_qP~Y+XwO)^@YqeDp-StiKOAw9wHN{nfr|0M0dv>QARQa1YwI}x6=O6NU#7c~S z@eQ2hWEfM=G1Dmyy6R;Kkc#qFY4a2>zBW(D`!z2vKsrK+>GfazuT6=QRGeO2p?-GR z@*|e7X{D$QA?7PIn8`nc;4#v;TohXa{chuaOUOvb@;KL)I-nqBs=K;f)>Y2S5X;)G zTIWhP=2g_Ox@1FL(Z}~XGc$$T#u(NF_1Ed5k#Bf5x|xH*nOb>jn%0Z!_r|m{VfOP@ zbJjT2fT@J!d9}tEmvTRH1j%Y4yrtd1cZZrOeShkZ8b53%FIg3@-)0^-!>x zlgWZqQZKN8pbMby3lV6IMs~mM{y78H2}ifVRG}an?*p@vW!ElcA(4@Q>)XpvbqE>86P~E z6;2Sv4+d8to(N`mAX2MfT?G^eC2Z>B1qd;!40-j1r+$k0*KV96G|Ab$0ig#S0*FnyB1hbPsy%!bARQ2UCgio5Bxaaf?&C*Kf`>^#0bY1&{`Z zS#n%OJF?HJ-_l~5d%Nn>I(MDPJj5-VS8qt`nxi%y0KE0F!&B42PCaXI3y`VW*+%-%pW6`8Bz z(DO`!xz1T?Fd7^%{LJ9yRyNRS3veJ+n97vNqdpA6Ih)r1($Ha-KX)8-Y=f(Zk-V|- z>0%7Tz55C&w$DUE`H4)?UDglcJ8CJuIqvF3jjOo@e!Y!_7Sd)%GHfDT%yCT?@z2%M#B{^1=ix4Te1m&2*c_^kv4|10Gl3HUy2EG6sscMFpCYs`y}GLah%YTo30q#SAn@|- z%+)2z(KSW$jN5YUTdGy{=~=dn@J>ajoJ(2xyp}m$bCBuR?#*~`yMb} z^Y&2seQ>?J4dTzQ?+BX5y&Q+VQmSB(P@DMrzoLylm%8HNPq#s?p|QF8qKsKUU|`^H zsVcQ{#Ok*?MfD%KK^Ket7}L@gR;G=o-sjbJCf$8G0P28;Hx|+AvF|bOvy-2IA>C2w zo;H%7k57Shie&3k;!u{2ALb#jbXoqM&KL=zwhQJYX6edOu{P4@nujzPGOD++Tj$V8 z+vTe%5^^k)=n{_ae8=r4KMI(AS>QHM+|u!Fq^f#v~8^HMTUqiyOzHhirH`lo%JTO5CN1R zH|sicA%RvixyXCk{N2`>K4;Tq$fo-5H@rik_Qvff1U{wR{f2LSjYK>HJ3G12Azzhc zC8;?k9;3ZIFkD3z`mekS@dxZ!kz~NJ6DvuuDU% zQ47a_H`@w$#9m-hp7bN`9eQ5V&cU{GMm2rIz*@#v=4rL-(GfKPn1+NuRw@6oh9@s# z#uPBfN`e;Ch2_TCyjS-`2{5y8D3-}I<6gR!%M)E9RtVFjl^NoA4s(7_EKl!a+IG(Z z11zeL#?3%{CtEx{-hK~MR9@out#DR#faknQ{F`sTB)QD$(8W=YV$bp5aA0hQyjg?7 zvmJT{zgKcAWF+1)_o15nHpj;15?WJwf~QC%rlk(2@KrFWVtau z*VqV_TmfZw{u2l+5Jzc0!@4PLAxiX%wp{YvP||h*W()VccDEZYWZPnjZ_<>75b&ui zb^j$011o-@|GI&7If?NHS0@5rba!HClNAZIv3u{m$bEk4wZv)>qg6L%_^~W|>Y&bH z{Ot;0yfbtP77+Pd>nL+aq=U5NXbSOa@O+y1VdyTeDsFq^v3}@>iZ1pvkK_$6`E7Gf zNL2DWv?NzhrkNO86$!V8R3&bC8(_bo&OgcJ{jbHsgM083nh_>oXd0qDK{xdI5=Rp& z>zlI1@}MGhO^`Ky7aa7E*_=zwj+#*jHG4dW@(@@mMn_hd%0QiHtOBWiYo_FIFovwI z0Gx9K^_(c^I1)}=sUQ+>F{S?}jq8XG^k0onvCUL>JyASNkJhn6u3#E%@ zT@x2)yE*-;^KiL(vJIFusDP*_c%rd1^XWl1*grMQK!<;<(uMI=l3!Ko*-?`^_2!IZ zmglkSp@TEs`-wxU{qE5UCI-<2xvpr17yk5t8>E5AE9rAL3GzPH1*nnqAyscJ>Oxh1 z7p3_SSODk$nPW1S2H9t*a}M{-u5%I1yULu7k3K61q(l|l;jLy+0RYOHOvVqGbs-k8 z{RL)jiOw=uEQ?6V}e(ja4AR8$%aEeN$Yf{>|@0t8~}d|ij9H(I{^GZq-WuD@S6>uv^QuU8> z1a6?IaxA*o00|vcAgfe8@&qi%_ zh|11Ao3zD>9gq@InPl|~-3hr$b~>0z9HWy8R{G=BrKb0?qqXIk&t`N%*lqO}6IABB z;N{&k1I0n&{ppK|9piP`0#ABLf)&Nf7sDd)gzU$91ySkstGz_CS^OCnzl~3BJrN!u zNDewaAemYde`xzcWHO&eVEg!|c9t2ffYZ|`mx!P%>hd%z@L=E$mSczlbErT^MW>xn z5Z3sbC1Vv$3h#p(4J)z>2Xu2J)EH$_B@$F!x{+N(GTgmyXQdwpptJ=_N)6oH3WYZphKnMjd?)@kRyc%dBs}A=L@)j1gRpiQZC9Bm20iv zbP|>4pWTa=z~aa6W;ib-qqZJZdk$;ocE2gjgWyIWpECIG^{s|@o`+xV6A(r}ko34X!C)jo2zF;Dzn1miSn?)zQh zg(>E!fdaObcjJr}=Im)K@^=(BSHodzg^2Bnn7Gpp8)5Gk1TCEkN>IKCqqw|^7fG4v zS^Fb@d9zng7GCQ2dDOC&pFf8{rh)<^Kq0nu1v^)}#tbF#Q(i<_PBfWEC!;2+iYV2jp9yOUtHq~h z>n^s%VHYujnQ%VTN%R!+Wat+wF)=Dum}v#HtbV}opWtC~0m7$5Xf1Yg5)Nv7z@${s zZ!6>ZNvtk1u)X!kYVP3n!ra) zXqPTSzMzVmg2|@qbO<)TYJ+dW5-_(R7<6LgC|=TetJ~;Q4@=djNfJ!H@ukn^ujEYl zm+6M_%GF8rF*LP`*h+pG2<1A+0dNdTCbzw+fhHW>?oQZW?WdCX13VGXO&-fv164DQ z(DACUOpx0B$f!w7GS-mJY<3Ky*DhIb?mHE|yl%DW6MFx}4|hl4dy>k%g1szFGe9Se zmFqG6&T#dy_tusDEmLGoXh=m@j_fc4lWfcqr#q{xOJE94FL@w&%l;$)3@~o?K{WxL z8Z~(8a5RFn_=m#L} zmmF;mI&|Ly--F-*kNO1W^Q5TQx`aRt^JqiiBt5*&rH za@zbi%HR<<5)sz8RkB*|Ce2-|KT^5b1O0fqO$ODp#(CA);W4V>f)Eb9TbsokX~~h{kdR+JsG|ZF z$G}3@zo|rS+Yau_0vM;x(^~MF>u1({ge)S4Kza^oSj9Jq8c6&Js*-4X@^$+CS53R- zF|9zn7V!yBedl11`n5A7(njvgSA^JUgp0u2;<9arMcHEI#;&5+fNqi=b+s1iRnNmR ztV^AC*w^Ryataq?2!ws?cMGV3HK4Su@R#&NiynU~Lp^wVGhm8?dvLuNu%9r9y~HNuv#AZ-;4v z!noYjCT1DPnI_dcXI`{XyNiibQAVdb`nO+A=2RzwRbD~nB8onyL&q;a0$Ag_%ugG%KIOOgv8Au(Ja>j9cxT0g7=CK%2BrQJJQKgs%?pHY>r9f-Q0l zT6PoVVu2wVkB`HSw~#^M%3XCHUrEFnR7rqGPj;!u!J5RokBSco=;3+$2Q>ZV24D7> zL&8Z~Y9*cyaWCL6mYQaNtKdhus{O;36djARFP_;v1NeF%iaRA57Hrl*ydn?sK1L3 zd0U=VtzkOqAdwmov5WqD8=4vzYu^GBP_=3Z`;BzDEUy9$E~esZl1CgK5P?d8+MjO( zI@xv?7w@Qj01`G-9^je5wTaz7Plk{{k(kmc4S56O6xNelgCINuW_K(?sv|_a-X~Ge z8gdg+jdiw&P}WwO7&M1taANEv|J}030VUY;?#tKqUDLMS3l6j{!O$>e$$6{=cwK~| zPQ-}%l9g~&_Mx4=m^mPRsNpu?cFQ6Rz+=`?L6|^RVsFC2Mg2qZnckOf&E8?c-NlxT zBODNUH?(M>SmvI1MIZqFnA~tO2NES95;Ox(bzL!F-M2=z;eS|7lQ=g@(3X;f-vXV>BV6Jc@ z{NQs4bb7bst}jBP^LIo@m-Ozs30fJqp0;ns3Wu*!u~wMRR8c~}jL%~RC?=;}&-~pD z9+g`a_w&Busw9bT#@7FU1G}&XHry0aQ=TF!sM!;&u`brX@C=mA>`qwsYa|iED)UQO zpdDEQe7%P97w zV;k&}f^ew9R<-FD`w&L}cx;LsUEx&Y%?eWtmVSru#z^9Zq@C1fbhKN%e1AD;L zpZ?pBWcre8>K`?wf&}ZITvsUrF0C9}tLQF$Dw;8m2gFuUg-|Z6Bsz^l6JbEP2<8_r z>Q$4kq<&N>&M!eZ z=M!`E88?Fd!^STUArN*Y7RWWBF}BUH{`1hXn`VgheGmU+&p+HDAwK{C!dNcQ6P z@@H9E>9xDLSxoagMT%BhcaX35MSx;Ee11=;~@-VZkqV{hREkSc;X668Zd3-w&iH(-Px)X1qd`v%+W|r(aG0S`dQq>~XVaw5wXo>v=#>%<~^I}@+iY^|YaJ4@BiZ`Gq3Qo*p zhPmNv@@@;rsX|{e=>F_7UIw6d#uW+FYx~K?P{6$5-oj7LP z=khl%FnJ_;?ZL8D4DR#g8}NL-K7*O!efC)ngYChc(m>T38D0M0j7-b^e=@SK8be_! zp%su6v<>NwricK9!2salH)HWYV|+NB0#X1;Qdzv3UG4b~o=-jILb6tmY$sgr>tq+f z_v2nUvsHg%sxeO1`G;+4;sr)fnxt%b#s74qu;*A-kiu`K`N`BFtsr#5L})it6bwVb zpqDB^MKlCvS4-oX@Cx~SXjvxf^AL5;k^he^}dl`VQ9@67u?`d2T|*Uo{IUZB8dJgB(Pa=TH&9$XRdW5jkcH}ox| z`y<79$YyctWTmtbF0lh~PO&3ba|)s_bP`9p0n^y;v!-~dRzG#$Y1&$3&W8=6xkRPofofa0JTmM?GN<%Tx~ z@}M0S6SDZ}{rypQVA)z{(S2VY-j0dDbWY`OaA(p;t~yEwb8Z0K^&qcw--u-SKH zr<4Zp>JaNWDVHez70|eGZ)I6*UuYc?s8#gjsn0XV$icbVQ8i`@Oxs?iq+)J5Ctm7`}#@O8vA9}B+swu3Vl zP9IEuair_Te5p!FDvTmAPFfJ)H?*YwaLD#GG`*`bwp^eLE|;s_VOo_zYvXL z4ZAt}p^~SYMB09v9WTpmGouSZY;yQ9IvP+r`9dK_Y{kXP7*J-WiEDhw^R=!0AcAGzZpq~EGeP&4M0UDrE$ z+zlVSYQD^Gi~GXwReNKFL#>WPC&e(?6Fc-S?_@NpSWsd1m>Zrj)RjO3T~D47$~&br z`C6yf0_L!+0rPW&1etf8_IoLH={9Mt<`+E&SC>su@^G9W>#@%_p!^mrQe6~s&5Oa< zex_=w`f34%k%Z3#3s@&ta#o+@xQ+}E4lpoM#)vp2kQzO9fKpOCL$=qC^p7vd#bG-= zpU#8g$uyPThyq-P1{(s98Oa(}*gxuG`P6@oM5YTn0Sh-Ic%m=ISHvbvW|fK02ltWA z=jsSQOpELtpf!*oR^=9iuc5Vs4;tF!y5;H(fAKPK9-wn$L|9z_Zsx}S>WcMRWd&qv7 z1VX)Q3h)fj!U13O0i@a$Zrmj1^Yrj9m6RaL6gfW5Uk7Rou%q1|wx5bbq199^7^G+c zraX1=3D(I>AK;JP+N=IE!Fg)X7C<#|Pjts90N%(|V8T$x)A6E55&J*<1SFn=iSf@s z&)B-gLxU`_KsTj(=QnI1ie?m;`XJa*i!CcH1E!R0DpEeAd6nQ;GxqPG8jep&p1nwU z&^g_LRC@vISfw0LK|#xgb}b9Nx6-Liu?02~$y&j5+7b(4cw64Q+zDB&2ro}s-lp5K zfI~Q}f9C!9=&zhu-aP!@CD^a}8794pQOwUc6;URz{WEf za_z|h{;Xf*yCN}m@n9D4?QVdemwCx##}lO#$2}N5GU~fLy{@(tEYFK8vZ9Y z=G3WLJqO;~_f*j$@^(9B;T>vb>FGcey8qX(7*o%Mxr;l>f8r?nPt&$^_kcpjxP z+=5y*RSp%9sU(X+W7^+Dtj0QUrIn@73mK)BIvjx$mRL9+{wEMrc3S2<4v z4hhRaE%6L7r;&lmQgi{eRHgSA(n>bh ztARAAh;sk4OK(NKG4IPXki9dj|0a}TSImZ&wYdV&xNHN`0zwmmP$r4b`>Xdx-EHnI zk+$op_daAp6P=f$|EBV~$2-)#PV>nGF0GuurMK~xzw$)j_VMmwzvD3)*YeDAH(1F_ zF1Zq*6E?40nMWA(smHR_lEEKdaQYKC2o#SZ$DA(0YI)UQ%;h?JoMtEJ1=Z63oiev{ zdrt$>N+)dy9zAU5f!mq=y@U&?R@m0@3m>(En-#aVYvg%>eB@5F5B5gSAjT?MZ5wNV z?gmo<$KyTKL1i!JSfBdI|2*jkbs4~n=MUck;<*=5>x+3sHS`+20-_k}1=NxUO+E2o zqh0+%nl6xslEqVKPX%N@wFK0k{!Xee_yz$|8sO1^@FqZrpb*uEd|lg|)0kk?Cr3by zvJ=D_mx{j&sDCMnIC;aG2j~Km@8Wz>*${9wj~6!d>(C9k@jIgAso1)~21kW9^U^B*K?WvlhFhVYsN|lvGV-~d$ZYGI$PeHslS9vjnO|I) ziK2&JUh{Q!;mDm)bgp|nHPv*L(b1L1L-63l`obq&6Q7Bfn)S4OnkrE01MD3?Pf~{ zecsf^&keoY^$#SVv`FY>la^H za-UfS?)&~Xb6#nxbkTEX6LY(|;&SxMI(dVxRUE-&0ZMNu-?HqNtlWE&MNJ}OC-~te z+lX&;_pfn7*DeazgT*;grk0^$O=t=~%U#g5Ye%T<&SxBE0<`91+^Ko|feVHI9nIpt=JB<2=Dv7BzYc;%XhHAoIYl zxq8tEdLLM-B#1|GxVk@D@s7;8?JRl*X19Pp1o_OlUq_RdTHkL4zPOho(by} zU5W2-m)oT+jrVJ4>&}+q}Q`nrG5*QtgB>x&Mc& za|*94?6!1l+qSKWZQJ&atzEHgRBSt`Bo$VYif!ArJLm6nx}WaLb+_)mIma0D9dOB% zA4xt>#Q#a6t?T^Xx5;`5tQE7T*jtAzA1ACvc*fs)+TR*JJ<&QEMgfijbNK9a%Tr1= z>Oxxayj@}`KpGJ3>311AwV}c*1HpO7pA*i?8;$Vm8|eC36OebqLc}5PPb^7Qu2T(I z6V){~cX$14&cTmE5EB=^A%X*}2`LmWLHHcwe4p6UVxv??vt?9ZOG7iOuqeC6A=-u0 zH*i#}(@sREdPZC#ZOD=(H-7P^(P)R03Tv2~hiIALns)A+BANU+Gp1%mc znWG}jTql31MSS#M^S41`gCgv^b*=((-?9j9WG|o`m(}o4$zFi>hGE3~Sfde@PglQ! zpL4=?^xvSI1z6b})H~6(o12VNwh6w@h|mfj>cfz&aL$En)EC!RgNL(F>WkJ+r!2>y zrtGPsj1vLrz&S}{_4q%)3Ir&Fkw86jlXjL;{h4rS5#)TmO2LCJ%_0wEhx&lG!?$WB zd=_agsAwF5;MPI0pta32G%?`QU_SClUCu`@C=n$6QVuIRqjg#u9_%a$CoGS$G$S0@ zIpIo+Tsr7rKk9xuwPOl%Ic~!@z;*pa%*2=lWyTDWH5{?M-0x^gP@b^i-m@9q_qKNL zL!qpb$f4EHJgrSKMIDM=Q*Q| z`2%?$^GAHUP5cJGw<{aoJknuglEJBgGza?ki_!J?;5&J^|BKG8-xe8I*MY-cW7lC^ zG19?Ae0Ady-I?y8>(ThsI939aG-9>j(^&aed*T4Y72QrZ3s023XR+a~&m7`I_z+jA z@%87=d$&=Jc_HmZuOa>FQCH0Iz8h^$jX0m3gx7x$ek(D+;g&FMnsf?m4ijZVnNe!= zWf9#Z`ndQxFj8NRTu5P{R=hq)e~N-NseNx3S>=;j;-8Kyy(B4SBO+pgUkGu~|8j_R zQ@jmrt5M2&{Z|{^)}or;>ueOjoN^Pi(D|L2pyJHvR}a3XZ`&Q=(+}uIhT_3Cke--j zasqbmuYSe_|DJ5zKy?t;I@l0`RWM9dux6sp2({1#3Mg3f_456VwNukkxhc<%bPWd3 zOy^@DC2i#Ln}Rm!naAL!?*JvwgWInamP7ad?BI=F7@%?CI{Vowesb7L0ze$J%fWu* z!$D<&0$n3E8QHXfU4}MJ$o26Wzd-zqAZn>n0i?pDvSr$To3lXu4nwfak?0)4ThW-N zg{ISUpdx&kF~Kpvdp(tJu9_-7+_RHm(RJS8UXwSo(QtOsDtxp99*~ug!lecV zip<9Aj<&*v^s2p}jPMP>)1*tRv^|!+?%H8&3tM5!d!pfihW1m17A~S%<1ID^&tLul z2m!_KD7LQ#>}J>P<3EuMMpl{a&ixZHhf)#u$6^e@VH@b3rmWSUkKv{*rB5qZ3ie+HJctVCF;1o>ES`p7Q33RwStP1*MXSQq?<2 zIzCrt1paIW>cSQ}guC0%jVXR@s(_yUZ20$f8HY%t&+Zfy2&tW!(EPFZZ@Zg~Bc7PD zS(YB8IBu(S?wqSpBa+_6-EDkRvfnAP9FFvSN9suq%agJ>#vTG_{i zwgl_}!`IPY=|jTowM`IYNg%y+4KVxAfjNBF`%#RCy*>$?A|Jj>76>D>>b6opn|Y74 z)PAK@5KSMB5B~VYH;^7twj+He+TouKDD7{igL$b_-&OdR7x*M~f@XeNDJ`=j zP`UxwU-gy$vWm`dyX+Yt=R8<~XVgSX2=B1MgI#%JAu-ozu%*M{_?zkty{p5zzR52* zkobrB5Q#w&l3_T$pYP4i9Ji123~{Wa&_j0g|K98V=jpsO5-HbSGHKQ5jN2=X`}xh* zH^qH55&Mt!$)^)St4ay1_`CVI=Se|Htw_P`GFJ4qku?eOWUqE2eO$U|23wAy@2F&|12>;CN9iP;1u5{MEPE%5 zPxD?TTfNmWJA4AnRSWK}$A`%R*J+SLQJmAnZ1bgK6KQQ!hxTv3>elfh9&UL#9}liG z2I?p-DPuwKtg&Ef=Y$^O7#;;CvI>ri+lC^3W7C4?y0xASk`{AK>=p)KXS<=l@c-0K zv1^rxrY>r#ZJZQLQOB`NP-X56AC8v7BT5$AY4D%E-c~&u{{CS8ZkE5~&iiQRjW8KkFPoqNIW2ozOjqoQA10ihwdHo8o*z>~e-!~g z$#M{?*Gi`Q6w#eZj!3|>SQ>8ss--h1T~d=LYXhN?uK9j@jH!TB5z`(HL3<0Ao4O@5 zP=GoU;b@pLvLk0w^PPU41V#B21jfP^wgpggpcAjaj14`v?a$<}>)CK&n`)=QsbQOe z?{=@wGFrfSNN^8fzl>nFtqR&>wa_zRH;lt0K_zvVx;GS7R$Om;&;kspakGa~kZ#s@}tvhEQE?B@2D^=Zs5Y zCha>s^q5l7r;EjuPA~0)H^9<18no}b;2H`ND~6N19nK=J{tB!2cDZrk+6){qmDfJq zA%o+Qg*H5Q@*m!gdZ&`Z96*}z=K-uE+=JB2uJjJhOCSC}=?r0& z$qO`Q@d_MCfr@SAo^{T*h8;^E_1xtR^I?D0gKPP0p4Dun?z}91uOZuu5c~}g#$yalmHB*1nshV8C?n_$cW~Obo1h)&LOd$JmQm_QU;CgeP z3hOZ8G=SB|Ph(}H^B|RU#ZkB)F`P7DV!d3Uf1eS0ot?{#N-h^gY^l4+3yAK)p#h2( z)JtN(CbH041`@9q8&J8S8BV#{2H{eY!MY?cU{_VGD%IG`kY$56Eal7Ze{^%harx*M z!_O3L)rz_IoDFMv9b9rfyrIy2zT6*v^1+fJ*v?d-$OZqa=%~+HuM_qb%$BDr zqsD2kA@ov_@y{1sDMkXgR$Y6@mg$VNP2*LX{`_Le2+>mCnMlChXcEOKWzwi(^IRf7We%-H z-$%EQJ|7!CB^efQ@*Hn#pO-vr0eNf#8nmxPmpp0^T5gBWYDT0`Fer$uhyq}7|Cpej=mGicE)?#$86zw$mEbJ7G)xux6QaFLmOVKA>1`RY7r}*GqfT_f-O>I$IJCM1PuY{5;Z2)s9+_o4& ziOUk%fA81-L_B~2Iq!-XwztV{e*jeHZ>pL>A2)09d-6-M)n0C98V#{rPkjaYHZ3*o zcAnh?gpcM-QU840^jT3Va%K6b7}>#wmtbVAcV#b^u~ei{k7YrDxS4%_>s-itcBs>m zfvI8Z1OLoc_l-qz{ioTisyqM4dYbP~5+!10j5yRK25hw}CK@Hh%Vi#kZj??o2kj?+U@-@*TTEzI*=92#0g2{p z4%$G&`qNz&uhQ%W4eZ46cEd!$IYrB&^H=p;ZX-g8$xDB7oDUOcCM{T_$Tz#0>6V~C z$$gR_7=d%W3G3T7#T0<|_iWmy7tcoQz@rF#;g8DlM%UtMx4G3eaL=9KflSv@Nl#Uj z?-y~HJ@6dF>ndoa8Y&QXXN^-g0c|L?OcE(_BZj$&m92exq*EeTk<+9qwYHfmOFd^i zRqdCJ-0aw9ejHX<(TojwAb_v9oG0I4QWM_HZWk9g2D$oOE1>$5B0RZ@T7`(&Dnx>W zw2!%tK5gTd!si{?y^yG1C&xqX%r%3jcE;(>P8QZ>mW<2ZYI$6tR$#4@0jU_8@cf9p z7NS4~w8633A*w$r!&^WS3c-`hT3ppc35?KT$-1Z@D2cK~7n(wPjdUfi<%~;$Npfqb zd)eVQ860#r@sK$acNC_hSHnp47vBw=5#eWU85*C~NZF=oV-dCA_o7?gUW;q#<*+>? zCP%`_!vjzDr8}|oAFm$lfIYuEI{1HdW*AKkHM!k9PE*;<)OkRC5D-#6``@@$Z7g{v z+B`qdg7y5EMg8=U(@1SgJ*$UdH338)?~k{}vLMf@Dxj}F3%jmwzO7=6X;%k^h=i|& z9hJ{(Xc}ESn!T^}KSAT$$VF*ym6Le!j#;R{kf+G`1;y~|K#}BY0>QpSA-7ESLN)}9 z5KeSt&S+|`1{YO_+$-Z& z=I!h*?0K0PS8osy0-6Z=jy@E>Ae5=hc|m{~@*JPHKQoIgjF_FO5cg1RuwuMux6*J0 zfvFC?AB=LpmPSug|BlH8-N-?+#ISyx%+n83s`QaF7pkiE$L+$t5-2=%d`VRmIrvF}6H5Y`F_6NoRY%6!ymhz%yxk;_ ziKEgD>wBb5ddo~>xMw0NmEPbA3UOZzA&nmDNux^s)oSpSbZ(rUq1Kci1xLO@li)-E z=C{XU(6gbGfK=X(7sTjhllk0Yw5*gC*EvPMPsyiV;N6=vCyQ$pfi} zI6cuNw>>o&mH?|vlRTj0!S8(=sMxZ*?h8~8<}ztHaV z<7ulS7MML@dGv+6>OtP6vEoP=;foA0s| z2_%4$Z5P%y+vM(SGybZK{b7B+D$hXQG;h12ynp?8ELYnNCXeKZ-~D{c zZ1QRb!M%WmUMu(s1sL{|<~LrcINw5_^+PpO2HJZDqoGmb@pp{{@`9vk8m}hP!KU-N zITHT(346DArjJbp?a~`3(*>oF%LU1=&oGEhks(kVq$X^mbF^3nF)}F@sB$EUhAN*| z0j24apGOQEO`}3>AoLRQBi$T_O{HZbpM6!qfMiGA<(NsqD#;N31=~yPvhx{j0~0V* z17xP#c$vL)09b6`xJhtkW=WgMklH=##5}EfZeaUR6?D68Oy=AKZ8n>{V}*q&l&<vU7gvw4A=*?lnopD_mb-w~zq1GnDDsm0j6H zJefh~wvVTc#FQ;EYG-9Pks0R5iS*8+I8Wp&>za@`$#UE(ci0sg$DyLrh{#)xnwGpV zVnnyS5KS(N(EY{SQY<(;9Q9tg^{hDbA;}yKsM4~NKO1j!{z#TUlF--fwr#2z1>Bus zX!p@}R6pg&MhN#TzBy4oqJ#dkf4{%1@M}9_k(l)P0?;{PU9#|~i@Nywduj;ci*e$o zOTlW+T46#fl}}-Ff?bMo4u|Y}-?a`ovf;q1cPmqjGcXKs@}X|Vt$`sbsw2PlBqZ|4%Ckv!T_&+Tgh&uQ!PG(NX)0@|_d{s2 zVZ|}?y=z>VP>Q(Z=VSYVYn)9(=~qI}yoAItQt3JF;PgYCz|!z?P$ALN6ndrVl?$lOB#EHP^$@~<_N#Px&@gh_H#kl;Dy>Fv zgEv_zo1AJ)fHoSrTA>}xdrqrEoO?kH(T4;(nKsu2`{pw#S)CW(|)%$~GQO~0w0 z9sZQ)<~Np3Py6nZ3;5atMO$X^379ZQb&VAmxyy9Y_%V_Ia7ow|%?RxH_Y$i4B7Q0p z5Rf4-8HR(H%pz^e*53Z);`Q!!pr=9-e77wSV-UFu6ycKN{-QIU+&o>xsCT<8^`yOp z^7tvaYP&6Vizjt;eV0fP{*(n{4CAmL5@OKCZG0m6 zJEboT9FdQnXQh;dHgyG>k^D-*pc{l%O{-R+Rbkqi-jw-LXsdRpF>4JJmB(&p=pS&3 z?Zen7d*=~5ONOl<`boJX3dwA2&T=*liR;^DE#6n6Uu~8v+`qz`Ubw%RD6%diY-*g` zZ1)Me8Wx(+l>$mWc2=qv1uHksbkpnX=IW3O^qK77h-3SF@i@%ASRQyC(nyTUGea_G zo|z>SZQ6_#Um*xyy`M16$`^Y!wsc!Vh``JX@O-`>x!IYSE9zq*+}a{lwY7UYh(&&6 zqbF1YXly~P1$Um`p&0pQyEhLUEUJ>KV=!7S?{cYLH8+_W_HTv}Ud2uS((#T)k?^wu zLhN!qUpDW(G2Q)%^=qfMoV#fQwyX-A?Q;3OVOWWtpun9P>QKLp!j;^7dN3n%tJisD zPivs9O@1ahO%dIPiFR`p8{yA>6_EbHQUls~9z?zrx1VD7X4-3<6GmDh*Joj0j;G(u3kKXh_2d<$`}sZZ ze+TG*HmL?@ApTV$RU>=L_`hudOosFZ!He0lv+1~`0{%~78`}0Y%Z*5VhU3kUlP%IM z_O@=ZbSF2Xp5a8I()NM=G>JJhiTZx*;eny^^Rn$|5;ViD$~2GY-*p~>iNf;0hecA8 z`34(9n1p3@rFMEc^~$oyClH^0&jv6@h%Gz^e!{oEYfqz>8?HwV)*YG`QC5iyV$@eM zDZ1}YvUqiWwPyO@0Gcdf+`qty(t>dfGnRU;9+a;%{-sejggS6;lIjZx-J8kt#Vi@2 z1n;aCmCowHIYzuInKRR=hlE@KnT=`;>+6C70&e04-E(gtsHTRmnM9@t7e$iY_HCP5 z4sI^UvQ~A+aVya+q4pBs@O2kDBE0uJO0_!*Fxefb{GhBOQ*e&8aM98m+c~=6C_lCR z?yHlc{@g`QVBfp!Mb~pX8m@p-z=T)z4iL`wR^iGhY6DGiVbG zM5DT}x0qnVO(gzE`HCDf8d%np2$3#i2VZpf>$7XL+a^n-KDK<QcUSi>eMa6e77*^K{6_8^$x#n#|?w&X^^nKu%BhpI!>Y~8&y8_`90 zq&(cEZ_T?ZXfo)A7pCAjpU$DvZ_SXc%YxQZXCYl{5wPO5_4PqevR)J(ZsbP$)aqJo z;Cu7v&yWo>IMHteotB3VIio?=Qm1S>C}Dv!Q#W0M=zH<1=O?laWNoU)Vi#0OKhmz{ zc(FyUsC%Ba-Z6SYS6sh1`mHDUz+ADl2b1o0RR@=t91Nn=C09~I`GYA`3KDxiqq@R1 zCVJpp#v`NW^Sf^5>+4)+D}JpVHQ>ldga>&7w6!_AS&QQVkEhvMh2$URzZPcCa7acK zEc_@8X}r4 z`jY)ntj#UPl$p_ggNGZW*$lcz?UqfGgNIMmt-dcL8%g21-rcD!UdoF%qBzfe(YhJG z7=Bl_BR7T$kx3)L?=)HK)xe>qaE`Y%DyPzUnJAz*m5x2<0fl4uBT5%sBEjNX_eLSC z#%dFu$DBL4yD&spi1oEXCS~+ZPO5$R>_(P?mJ~HTqxnJ>dq(bbiDdPjs{t;q{~Q~) zEV+4co?Q#JeM>mGx$-x*U%*$A+I;8r&d^zYY|^?i^PCmd=IMs<^;5Kx@8S*CcyU=4!`c z=o4+a$oR;x9W4hDi3i-jXUl5VCL(OS!vG2CAHK=$eN;X2Eay|-reiAVWqZfrPi~v3 zlUsDHgq@f>_e+bwD97b{R)L2gIat&(Z2eTv=xvf`Ak4`7QZynsF3DiqcI3*^QocbP zvGq)cI&#^rW(!7%TeJek%)rg={?8Sx`m2E;Xw2p=y{=H=2#OMo1zc6$NbtbQtuu@Z zJx+M0Ub8CQ)Dn(zJ8BgvPJ2@JS*9z^#p69w*PDz@wpA#vq3~Ca%{-Y{iqQ6mefFQw^G;d zo_Z-l3GxL7vCM1_w2TYuv@m`u_4+{`jmW40-N?HzM0x)zBb0jkf$2b&jU$1h&AuQj zf^7;TC&bpcA&;6(l3pj8(L>^M;L5@?qQc`ipSiy%C z(oAH3XupOqH~b~0p41;C@oJR=wGR6Q3{nGy>t9a~E+fTdfg9+`oiC&~ zspuX|LbOJpwtVlrVgK<)5E$niZbPf9c<}zs+o$Iwl(nlsLziN#%eu4Fz0OS5PAxbC zX~p4uPCY6TM_Xu_<`nrUSZ4R??!LHc=BVf;+ z$jW13Bow}DzTI9};Z=*Lkd|@uPr#|ps7i?UwCIVKH3}mkZI~Bok-$fk^?d?@ z^0xJM3MgacKx@$HO1f)L+?46pLR;Zc&r)!($!w{9aQf3EKiCmLdgPv19i!F$*^9M~ zuz#~B^2jsxFQ9a&fm#r_A8eZjVt;Xdj&@(Py`7_`=hWr;gb9SRnwl%Ve3hvU!F*jX z6s+X1NouRok(k9-tieI^K#j*#;qqAf6@o;=brE60v0p{VpFGbV7)iXP{g8BE4UKaf= z-~cUFq_(5>m}BlH(aka%@Wsl>M6BNb5cq0u`$c$3zs|Lyjt%F4v3*MEz#99-OX1)v z$=$5x1zRx66i*!sG$kpB}HM(j(I6qb8bE*4j3(=SOhiHex>kCqM zfZgpCDLfO;>Xbu3av#$TBI+~Hnn2bS&9FZyC)!S7q$2)0X&RxwpGXSYe%ek5m@}0& zIfg7ciPI8ec%5JztikH@Qv5yIvX}OJ&;=L#sQ!V7_5uHAI^nX%GyCS@D&P+2lO4z# z%);+eAd-ADhm9v3qfr()54F-jVEo95#b=Yxqqh5#`}CWGjS;s9wIlQw9+3r1duFbM zeMS%g-+4Ql9D-jZy#=oSf`UsA&IgW)eLE|f;pt;|U?PdwQIPRvcz}_AqhBq};`xs7 z-S6pS*h)W?z#*3aGky;({TW&wVb%+t5p8qjX+TgX5$n1+KOBpMhH?{1V6U+RY;IsA zLlAdHhr50khy*iJ>{u@79xw;E192)a{d~Duri3$a>P1q zD`-=BcWwT35xdXpA$@f*xjpFL$M!QJ6Gc&{|NQj?q-1>*db11`aBk}R!Y=dX$~!!& zk!SUgqkdov!n_+T(>t8cnWhv;g^{)E9CoFnbU*om{I5~te=PP*BawRRdQc69mBu?& zyEkwyeMl!UDMD?@#gPtqvVVRsLxAJKl6bPrB1kaAnqu+akp;)4YHcUXc zsM{zS&Vd-M7_}T)h3wz6;&Ugx)mBGyBFPH)GEKieIDZ!df~^NN-Q$@{p(pK3<&`JiJud?aiVD4DK=-|$o+vW4Q-PHBNRdZNeVC;@Myl9G z&IqnCimvG8fgJY!TRKM0_X$N9)~?OmhRckx!IX$8%Ca4CSqN{@ZM`3E^jL>Zym7U> zpy07ndi{G`jBH0OqVop297^XRQpMs|6LOm3!`GVoD6-38t^8%fYvmXDPl^sWl0?=Z zOvNPKyA{Dj#{2=UmwnqPU>W?k4fvV^sgy@?8Jv}PtCZg)mf}wayN{3xhiXVdFYgc| zQKT~=sP-c7q?mLq{A3%5ria|TBg zPlOZ^^jq|w0ERQnh2ugjmi1z4JZTKMurt&VT%Vd#+XU?%_9P&9r%8xoPM&{SC za}KOnFAq05`v1Jm&5Hx3ZRA5*5~(X#^v&%g0BX5KzQGQ$y(2e?8FS0uJtB4rW#eYbLIKR;R)LueS=mC$=`nYSzJ#oVcS6KGh92Z6jSIj7sqvB)1 zr^igY!Ass=5RZhc^;% zO_WUQLuH+xK~C=Od94qRLQl?iN_WRZc@0w!cSeV6{Wb76#AIbl4-z=ru01cPBJSv3 zMAW{E8G0#1Z&n5dFC2IpQzoN7VN1RtM=9|_8YMSdv*QmzxR%-|GG8&xa`b1fhT-43?9L4hE4zTD9?3n-<`#0V=QlR1xrq+5fO7y3-d{S> zcX|%NIaXJ(nnN;bfs7Cx+8P>)P}|2JgPQsYu<8A!3&F~%tu%m)z7Pa1nSH2Ae)&l1 zf8^Jb%d}P%r7zLjGeS+jD&c1fFkXPrj#yz5AJXtZp>ePSz5xqIrby%bqfA3VT(TLD7gY~sCZZM zhD;O|ilGOXMJmS}|N5K4vF9l@6aFlMSnA}H8)qK4LZQbJuiTwA`wm>$rQML7_ zM9Z#aO|fpKhxJeCQ?g-r8|b3I9|yymHcI;=Spa6wj7O%SQA)5#)>`U?drJ zYHDIQ$xe1s#;)Y57Cn1YF#U8X(|=#Rts2>-hJB~t}^3Zx%DyCSgf_!^80D`kic~R$25kfD8!9$ z)TG-CpV$icIjgO?oT4UfhktG`s)1X5%g}=;lv>*Bso4g2rei7F3_0MK`Gi$o7OY5c zk~@(LvUp)d!i8)kn3_(sg3iOzN)uH()!SJEq_ayPhn2d3%qX5ey3h#oSRZ@8XYEU5 zhX8er-1MOkl8Gu`mOvsO01IpS{I;LQwmQdCO88}_Yu)YJ&q&_F`qQSSLopL=MONH_ zhf%dTxs)SueK5*dZY-z(BZ)cCsj(Oo8K%qzKdR#TDgc@XIo3bn=mt*15?|x0$cM06 zW{yW#`(JlFOwPx51-#s8aIS-p!jsZJxK4MfdB4w-kg5l96M&Q@L$BH6XHCQ&in26| ze-V*#xJri5EI5TvNvPSldS(R1SR^42@2MW-+8+yS+F}Ka!dM&lCbja zshUQHFhLYHPu!M^EP?pWT^+ZidFgEW833yy<^Q<^CU>Y&+KcYgBU9Br`k>yp)IZpt zQxqkEwMmmPwE`8q56BFvrX@S@hUyfq3q#%7(5x=l^`E7SrH8#Uee3Paeo0)6hUyx) zm%IFiEBrM!AMj$6O0+A-w>pj$owKGoXXOZOdz33ZUllBig|z%yD<`c*hlC@(R_}SW z?0BU@I6ITI4<%R863UB)9lwj7PNJ3`Ng zWEG<5I;e8 z?!Ra6r?$a$X?ttE>l^dfGfK1WVAjv|OMuautD{CVAJtw5N(gJz6?8Xw6zNxOxO@@z zYk&ql37k8tD0yaO2<~9W;O5jyjQ&(w^%|OE6{&*VjC6%o%e*CH=sz8|9GyV+fL&(e z1)yiwgBS^SZQ0A^!Wp+vkBg-*VpB^$Bkw+zZJzpI(&E{lFyc$w+G6#wuaZ(u|5By+ z((r*DFFx;Q)u!)XmQzSQ`?X>FF=&b&l--5B*3%n&Sd%TzO|W)%_UbQtzx5xdX+v|% zagGHgV9Ln(797lGqr;PQ-Cuf- z*z?Ih?}`a&X`J<+1V-?;^^ITto~2OgNL{q!#Qz&71{cWw1HRDBFD;j(1!gC!26yW9 zka2IzsAhKo3I8~oJL$#~-%e3T`^+f;ux!$_mSj;GHF0{RXz-?B|&1o)$LeU5^vT_hnse2B5MqV}*nx+E^ zy9I>$W}-@AC@2nGADWO6&Q}m%K@wyH2n~NC%nV=IIGi=EY)}QePJWlGW+sl}jwEg* z3T)g1@9ze=Uc}K#ksPR}Z^D6Vf0=w*YqG;HL_|9jB2p<;dJ}4O8>RaYK}aP8rTkV* zzjrrRcvWi!#ecXnwDyXHsKgX8oZ4)D=HO?dsQBwVDXhvoG!nnOtn@m7g3_}xZ{1V` zaw;dw#>=bA$LI87c+AP>e*O`Q2Wt5g z-FWfgDV5Ge_q}xQI?yoy{c2RH+#wf_^705oo16(XUs$!jvOhZ{?CyujpIb%NUbSlxgypNp|Epc8O6!WU>o>KJywtO{6_!eCn$JQ%n+IV$wZoR z8XK}f&)D3vqEmb6z4(pAH;=vOKXhkfFs<-@1pj}F8>I9lSxM5fsUW$5lz=<7HQ}XV zgFrC+dJkbCNDlYLcGcq1^ktoyukIgzcpgnVqmAIt(?-DJ}3cp z!75qS>dBkv>KF;*CCeg#75e-Vq0XTE*^60wX#7@%+lzsDfBTMtn=&Xb3eyey?hj^< zcYc^v?&RhA8iPwZE>BoLn`Xo%OIlB49a75g@>VU?qNlP^QlUk>ctE-ye1^>e2^v z+t|-TQd6xaquf)U>Naz`eDBuIcDAI4d2B*TRm8_B2s!sMM;tPh*LNnHK@p0FG7|cS zd=l9*Yp`w}fRVO;lgI!u+zG;7k>`n)HjVit>|~b&J56_*r@XrOT65{5A#60wsjOPr zDOt`FmjtoNwoyUfpcfgQp_>_~9p)?z__+oaTsKA8Pgv1GyhGDN#-%tvc~d|F%J5hv zdW~tC^AxnJohT>C_;6A3QJK}rjSZjl-U)dz@*sC=^K+Qy#3a{*AOxM_tFp6;GF@?5 zM~Kz+54D?WWXvg~TudQ2!l}3EoA-J*kVV5CBysVPD(K-mQ)pQ0+7~ANsGahY?X9ql3AeS=1d=s)v~o_i|(7GDhr35#z#8B-o|g zV0n>8i#kk-93WOs9cwPuuOqK4aAixyc){fqZ7VE+DhmVhCPAYahxBt0adnnD!@9u4-+eT8TnhC_|BMJtEMh^95?&{~obE##Z`&Xo z9EC5qpMl|Ocb*%w-9Ml>cDZ=tQ7|E$!ZQ-Da_g7uJ|#`3MSpC$4zvAJ;Wukq(@=ds zUnCL##?Jmu`r`18OHB@?BfsE3e*Nr2Y*T9hVuD&>2}0YFph*&>{B5pheOGu3nm2pC2ho_6`1fF1E9-laxyPH34x|` zNZOy}MXuoF<@4)mHl9f>qQ||Av-oEP4|osA`avgnJGX6b%=J>nP=u|~Uj?Ft{C`3| zK>9U-tjz6f)oFua*krKiUONTDbw@z-PX}2H z4V?af`{o^!&^6IGxatXech8pj2-e)U$K_VvFr0LfAZlr11(Hqj-Fl`hYbtPcni=%u zmtVPq8+{Tc){fae_A^a`Q7tp%f#SI~*3>jA7i#d>f;NY9rx$n|G)Hhm(kc?1LsH@$mkYs&i3ekQ zs5-$y=5#5HmF8u)f-C;*#A#$i6dJR^?Iqn{PzSN5kBq;pc@+P+<|@j9N**9VBQusS znCE~b4`hOqXbFo^V6R-w*;FPX;cPEvW5v5T4>@M0oP^zymN5o~BlS!$HvM-)cF}PZxwE)Pwix(zpDs7j9-BJM~C67_k$1}Wm(fvEN zS>WP$FORwO@C@9{*yzV(vTDIecQ;u`3eIdUoemZN|6?Oyz323?!{}k{kCU5o?@m|S z)>^qcS>p|;)UXNXR4;C&zBgOzGl+#JBHwV(B{swdxd@7oItqc48^Izk&zy^?OCyj6 zck{r1ciY8GArTYPk0+2;((s3sd@&QXaa7?tJPVk3MgP29>a9+?2p556(*+23OO&@vI&<1Zy{pK;zYkNr6|TOZ7&NT%9areVGO6Lrhfo&I3MI5 zZk9@EUnpYhVNoAO>kh8PAoH41#{C<(FTjwXE4njs|R4dNU_z;f}mPU{dZG@ zs`~3Vd4J#!!5O5lDME=>cnLP-cN%%h95l-M*~g+puBa2?k*?Yj;Ce;jQlw3725Wbk zM{{zau#jG5cXLue!!zDyK`1Fc+Glp~oHoa)Mf;thzrp#fj5r+PRUuFXs08`EVvl`x zZ6>(#Oc1E`9eb^C=tJ}LWRlC}YyZUG6%h~c9=++i89&jTyIQchQq)8GVA8`~c0{X_pN?MAq<3OxfNsd}m|f)K z_w#!~*jr=gfhA-wtjOzywfx4uX|yRqUlzD!9*Hw9wJDd|$DDClt%kIq16MS3)Qi?0 zOUE}fKh6!F$7)4J9W>Lri!K~pR?Po`5fD&e{eB1_O3az9@-_ zJ*ClSdhG&tw7fL}UmT-fbk#&xde`ZLLT)D4#%PH>BMFY2gmy^fCX=TyC& zp3{I8J@Tj5;XH+KpB7>m>xb{U@sfm4%SVsfe**amyb!k1f@pq~>lvnJAAJ=rB_3|x z(bP+qGCuMS@4%x)GZe7~7uk!Gh}X7%Iu93U;`7a4DAoMQysv}1Hk{}&=@CI_p7AGT z17ssKt=CKO`lGmq#9~7^P_4%DnTi-K5>YubuSDL+z6*E%3Y1bADALsOJWlD)9vXm` z_><0;u*__?-Jzm zV7EdMlm;XW{qYH{W;Zleip^?afYogEh#;lC3kP?G;{8h{#k}IU)Q4{k`Z=tr+)Ng3 zz=0*mvGj^Dfu_4Ap_A(&?w3W=J8=%L{iRkc%>M$hKu*8N4faiGP*Hg1P+t+li}%I9 z00030|IC=(Z`(Gvk23`<1Ogn#m%TRytA7Lid8|K&T`*UU;fxm9yoE{n;UZ_WcM(Z^ z10`lu^c0YU;i=~ZQ5yXC@mKJj%Az;&(%|{(8bdw} zO~(C=+OC>SeP5gl6)2Lm118ztpd|<{mLW%0xOc|=!OP~MU^AJ(+y?m0x?hO11b^je z8(?kRAG|^)-pM9XwQMj}qgY5~HKXkH4C^A3wrO44UiY}Zy#DmlOU&yeBNC@%X<@oC zm4y|1z=IS{r3F!-(sI8cl*@dw~1ZM zH5`Q*3EE6Rfq(wW5RK5<=xB~dj-rfnL}g<_{x{8EYgCYsB`A>z z04ex*b@j|MKx7kDuSnV&U72>70nK`hJb|KOUaA$dRILC!9XkXr5f?7O6hZEo1g1zr zOp0Y8SFy)_onm>08zWc<-U%-CLFeF7pEUAfKd`o>5zqvvAvNMV8sP+&dw;!S$7Cu} zNSn4J)mg7tZr?I!Y*~d=M$@SvUZs22rj__>4r+a&T8AZ3ZBuzKfMh_%Sb$kZbv}3O zz{Zzj?dK+rx*0Ka%Ll;Nkvu~MLR`D(6im6f%P3i@GTXwKyvl~d>dQ*AY96i=%_LV6 z&9CIw?}GIQ$@?`97VR}aO@FCkDN}Lod2=k|Y|@KZ;vs@_%w3v$P+i~Ngs_Qs^S(Fi z`5wWOawk`k1t$$zF_Gl0zi1g0B6 zm!up=Y4ELCt#8xXMaHP?7!X+$tDjJwS6&APoh2B9JoNwk`swx}5BuO7FzFl|XnWjW z)0Ue;4ih91cSQ_SB%D^IckJqDKOtr}h?i=qsJWdQDoQuHBT=pf$1f%%xYi*}e-uCa z>N6w3h~QvUFrlUu)qhk7O2Uu4!WIA*ST}v0KpDmA57EEXHP!3&Dt-9j3~4=y+vB6~$yHwmPUR8D2@wvqU17ejLai5tqbA)FxIkG7%76L3ajlGmI>l_Ar|O-h zUf3#II3a}`Q2ur>Nxe2uOhTJUT}V?X zp*!a+OWmU5!_&Thb~+gP?_J*s4!3sdE2i0;66^#=_S1syHu)C^^>=}F`xw!MdW6fR zR-Nsv^KH($7k}_kOC1XU=d;Hu65ViLMK|0#KK~hCkYcVpF8>O z!-t850xYq;=5dCl}`FDLH6b_bUCvd(I=vOKpxXyx#iI;*di*Q>j~PEg7O ziHI-pGJhhLM@jZ#EnlO$!OXY3H(AX3H+W09s2dVnUcc7V@|HBBjNle1xf+zfDP@UU z@6fx@;7~s`CqO zpas&HDc99yd9{lW5Q%AIq1<->szw|rv0kwRBEf9$0fYlba8L7mbntv0(7VGRc1UnQ z_4fowTuBEk|fX0UHi zGkXbLIF%Qu=%5!UZ+__GM>M!VaU#{_hksAd*h06&bq||R%0yf{&YH?FUH1KWjtLnv!t-ix#kZh@{Mj|1!}Xmrfi*cXyhw$52PPk+3( z1gLs87QY(NI9c_VMUm75C zro)2iWGRJ!Lf@)ZQ0MEqGg^$Hjp!1%UUh$9_20spd>TJ;*h$g+3jhHB|I}Dr zZ`(E$eb28TRG>%!OBP9erip-I*jq6S=&+ZzC}^3sh|rQI$%)e<|9zK~?Z&C7M4E~= zL6A6#?zxxe@P3eWZ6(cJ$uH+M+my+r`iWA>>iWtmt>mKX`G2jIpzin4k~Pd;SH-rp z#q!FEFb#Y^_G4;e-%7rAS=A81#meA+HpKr%SP3PgBD)`MMXltGA-c1!=j#$}^O{SZ zHf$lel?c9jy>8xiE+tQ`3M+9sO|4|f!O`#)1y*uyKoohoZKlu& zu&#NPb2$;%LVvJMcAi}6Anp3mLMwS?5b+7KKX=aGpjA=TYC{DhenudH|Q2Wkq`WTHl^ZLZ_pqXC{c*j7T@*&7S7N-#SUOXxvY zR#R=c3Q-(Q^db(Ozp{&5-dRbfo5RA|{m1qnGJLca>zp;)ibJ#&%WKIRh&YF~cwF7N zZd)_dcAOc+wx;OgNA%$nRR0X`iG4s!)-+{(F`utRlWiAHe_CwjT#B8j?0h$8(2wT} zSuEx^t`j(TF2$mKD0P!|`0mv^9LB+sQjr$9&05k>#U(i4vOKc;kKniHeX*@le&qC& zRJD!l+J?2&nPQkf#jk*XE>51mg)D7YJl4?v35H=G+Lx4fPq7-U7tq`U*NA@ypW>zGWt_xzB~x2e|>beUo=EkXRPGi=3ljg&$al% zXWux~f8QTKbtt!)NHa(Yj`TQpc6_%+w?ay^fPYl%HgD<6$m6%zYZ?Zd+qYDUm;-${M^dqQi^Bn z7281ne}Xk>wx{25oEfV5zn>+~?~g@W!JAy1if{7qw8X8Kxmrz!a(o9}M2=GIJJTPh z)E|3Sj}%fp?}U~OyXKJpZ6W=xSpSbRQqQr(bYcL9SA$L%sT$G;9>1CaBfT3E8V3Zs zv5gJKuU;e5oYiYYhfmVOPvVAi!EhjNDxhZ$f6S*u4V{;tIf_1T;`OL8+1lwZCu=;0 zsAnDvKI0{oW+IMUtkffcAB1jb ze;{HE$8H=(iev1%#HFEYXfvZW2BlQ*Iqcyerq8_PI_2brsAs(0(?kSuOEHZ|M8a^w z42g;7s@%h2Grdwg5p>gPd|bLbX{-4+UXJE ztDWId*T>ikC(OPu3Vao{%IVOHhmVige{{+C3kukG*{nJgj)7$o_o)-+pX;~sc|l028xPMvJm$XqC+XLB;jZR@IMN9i6Y6?on$tjAKPkp4|J*3V zQ4~&dJkZDy^#g?*`7S0Qsgrp$=mivBdYr!yiL4dLizp!00960?B!7+Hc?CK)zFB)kFptA5%=0`0w+V)yyp5t^vltbb? z=P`+cjHDB~&cYxD$0-Q|oW?^*p8@4Z;V(H|e_3py$fG3Aqj1XlBT0lEec(J!(riNh zVv_y`(VYS1D1VdibTaf%ZiSBUh!lA{xfF3?&g26MoTp+ z=u=@{4b1`#VYVjW<-ziym4`wIus z>->%JNX~d1K|JNgMTqNZaDw6YDvP;mn-m|Bbhvk$yhzDl$JZT0aqiBFF_dZ;1kpebI0mOEboW3wB0-FCluk%Nnk5|t z(0%~J1D?;I^4%C1Yp{PsYAUwyFL4gwW8>g4(P3LO4sPTazWYk+e4%vrz$m&rBo=cL7wx*x<27e1^ zF}kTNbxWHg9{?3FYk23rI}F`9bvZNDONXLbvGtYrGd{)3D} zrsf*0w~fpFOV#t+S_C)d$S z_YViu@au+}tzwX^qKy{RK*o*qibF>;eC96ZUZ5Lx{{qR@X!PL&djIix$6TLQk4|k& zt6we_*WA?G)x4CO1rvP7mEY_ZbSIm|O32mX7WRlaUI@PG2A7;m1#o}804wr(gq%>s!+vlxyxgYeup8s5%;qV z)i)YxxnYQPnQCXLODb#tiCE);AJ8h$g7aG!splL-AHwggD3*d-MwV7!4{B|k!itv)N@>3u zBmaRKJ32>yYF1g`V7Xvw1K*=o$rW93!OcAo3W{$!FlKjL2()6(LCY%Oh5fzd3Z5La z3!B2JG#be@`=o2y9%SPLf!r(PUd_=v`^Spts2i(+-u$lSWsOuyKnc>?YPQ~r8Ssx* zO46#N+5mUwz0gug4GR@>%MU7R+Su!Q1db$VR-uZ6)U*YDK6}H~bSsP#o9+|sCSlwa z`2iRUOU!`cKuJuo2I^v1bpHUiUQGot4++V5cs`uGo171~ms|w~1Ap6Y+c*$?_g4&9 zqiBIx4wX2!+5!UXz7^R8wm=IM-GH`677<-okxG;X`S&Gdt4*?^QZY)<-Ivy}IcE;f zkfTE}mlC94in9blo?<3>o=M0fP%#3}M5#{ZQxVKX05BepqBo!3!UEuHp+1W*H&CYM zH#Mv7ffegg=TtCX3RJ(Y_ZLWA(;r-IktPg0^KRQ}S=>J4fv0|x6-1o)5;-9aqzi(4LsH-UOzF;Z4z)}Dvyy67`Rj^DZOyPoLxlSWB zN;7mVpv)vDdd_T_n2m$g>$Q5PS(st>2WKl1oK{-o_v{)A*)~Mud##})xUXE>AVKQx zH(3{XDSsxWTC>CbqvL&_&gg`a;Ej#cS=EyZ&lT&sB3{1tdl-iorL!-1u+CpqQQi;l%$)OMak|p`F1Gz zH|_Q5@|*e7p-45hX9!}O5*z#E(tTQW)~QCl!GC5Xcx7p2-BD+i_d!2d^TH+N>qNb? zO##~by?#17EaU?cd>*dBXtXcAgAO|=^q+(e+e9V+;AI2rbK9b_=J-$LWC+v;KbEtnk4M z`JHz%PcMTx`kVI*|2Jc9I{`U80qB8~H8)}I-rPjHnRAzA-DQm}&GZqt{`Q>OEU?=V zf!bU^ca+sOyD4FJx?SgYt7~;4;oEGcqkp~fZD!Khp8EbHh%T6Wk7Bx@?sp!m4)v&` z!3e%FUIbqdJtFNh=JlN!3BA*|1dXn zD(3S0<=u~k!v6sP0RR8oS#59IHW2>qUvW@?q6H!;l;gZr5l~>;ry?8BVPD!Jpe5QO zLcKtw5@kjI`;L;8IfP95z<6a*JHtI?+e81yz7bxr9}n$BGwu=n#MRO^rV&_g?IMG@ZODZli+FX`JA>Q)5tkpggrIPUF3w-|iE`Rh>63zOU zD5Mozoja*gr9AWfHCJi9^x`7(ncxkt+`RE=Q{?^GirFju;mnM^gm z>?w8)^lo|DCbeT#vk>|TzkhD|2qkL+?lF=gma5|U8bh3gW%SFYI8*@7O2IOgE7}?q zE9iI!wr$S3IjT2{f(>DQ7BWfB`9vW(h|yiBM8w={IfNK-P$$ zQmWc#IwK@AqdeF6TSl~EE#8tUG@y9`E7tbM)*>(T%Lvo>AZ49lJbwvQ6tL&8QY*g3 zwFx=}3UNyFoC)c!cqJ7Ge#Kx_R2fwrLK*;1&YcI1<_B&`E1EIIDp#hoWb&>VHoUqi zqe(~SR^#oB07{E%e5iNZrfcrC%+tGYvz6|3ieim&qThO#@Tqm#_X=*70RPt_3I&fs z-IaGOp0{lowUr8e{eLO~7rwPMxRm@)whxl*gXekp;bOlpiTob=8q3$9Mlx~3>Y`b5 z_|X34sQb0^G=j0SE5^!8+Gv`4+PZl4Ak;6h%ac0Bj?|0b24@mzLV@T`1V*vRH!Ega z4GurC6|DtMI)vHjc}0a}8>j!I3ap`er`wm|5x~pU;pdI32Y-bqD|WvM8NFh43Egym zbbf>qQTnCp66#zsfl1S)(7n!@H*57CqtMIlFV15p`haJG_pv<_b}d1CZ>Q;_b8FWs zJU;Zya@SQ+m#!|@>3n{=IG!Ibh)bO4ttqODW@IqDP;_(zfgyrHqq!4(+hvq-Cz|g@ zi}rUtzj30SRe#Yy!Bjwoh=%j~K81HFok-o5%z$+^25T_jLqkDy0B5Voh%}UBI4}c~ z!#(+KD*Ag}FhvPEHnqO6+2}h{LRYiVU<)VOGfa(PtBc0_9gm%&vT^xFsQ0E6VE1gr zqsyXaj-2T8bbA@mzK%9J9tdH4LdS*>%*+kKofv|<>3`4Wv4E$hY*(1AcQDELK&XWY zYB)4Q^^E@QI3NcDB%Dk~0wESG=-^(71~NbcIh>G(V;dgGFr$O)pq7CN!NlY8m!vQ5dw-CP?aG*HWsastv zY{ceGt=X-b_6ar@L~t zNB`EHv;T=m|9a9z*ZuI*LU$kk0ssL2|Lj?7Z`(E${jOg@s6p2Pv1R!o$EhP=*g9Z9 z*A`u|6==GGwkV4TMXDqfI}P&Rcd3UTvE)RmI}F2$ppYrv%X7~?yrf9qnkq8aRB$wB zSbwPtJ0KcUW+NJ0*ev;+2*I4b?OPUte^ZrUzVphp{i$PGvmVocUhg{UL<9#!f`iN+ zqeznQ!hW)GeXl>G{K`g}YDO;XSMURAfpkO_3R9jTEqsADTG~pQ*>z$YVM%c(qCr6T zg{@^u3_!Xz$pR>>l4IaON&;vl=89nrh`F%+BDQCg zC#gO~BGC$6UZRhmo==^~EvVcgBhori=Z>?WI!b5VxriOY=!(h?UpaUsxHDs7=3Mu> zUe|RPo#l-T-w>&~WcB3Lq(5HsNauogaLyExgn@s;k~O7&P_jh7i&W0Zn#f%y=YJv~ z9o2z=RAgu+50P5K>efixT;^nEYH>g=xo+-@P|THi!7x`L!*#g>+SX!s2l_G2Y;Q2$_zrmm{^+TQdbE62%bmQHD)wG@GcO`D*xvN z%##xivxGsdLknz+pulz3hQDv@xA}IhjQGXWYA>~xx zoFb~xj6lE;E%}mC2+24F7Gu0B3CZb>`nx5i1JeT7P;1>5637Zhp83jpv1bVjJz9Nzn_6u3juVgbqAt2M(MVAk31tWi5^b=4yBQP`795XDDfLVeV zRoYrc;s=F5B?}$p7>kL*(|87x3E=HvR)E&p2ffXpiQd@eKpq)Jf&-(pO&DK^6sTx5 z({w%NIwHZ%Fjlp{Y(f{_PHSeC!tpwH87pP&Du;#Y9nwWAGnrURSP}%pOt~hKf{<2BbmrG^f{mm7m~l1+42nvVP!Gozg*9D3+OlH^Cl~$_i+|17I{JM;Tgvw z1v7xT*Sjpk+hwZ~aWNCtusX;s>y=fcY9l=(JoK>etV-ck%$9B8RibmZGR%clqI_gB z<`L!)+*T5bmR)Uh(VI++H9`xiRC)_Xbuaj-*TjE47}rksO)!oriF@^(tgu=eGZPRpY5!hbWVI)l1k#23X_HLW(r$m~@YOqk-pnli{cX zUjl#68XLiv#mB}*gEmIP8lyP0{azP(^r~I@r%edYy$!vy;jz5A-7|OSwF&prF(4cq zWORU2x7+u;!4WwfjE=%72)|Q5N^gATwxQwsbM4h|eK?-scr-a)4hH>E4LfbTosEb6 zHZ;_5eYjvWH|ifLt4cvhy-7%ouk)iRWNeo&ZUqVg{-BpTZUrNMXEiHi-7cVw(sn)( zzg=)9<24CyjxGN({+EH;?6$uhSU%J8jdycxG?M#4XA|3hQX~7rR2d0syrxE+k~s`h$t{jTl(;Fj}y2LFKz!ZNsCfAEHIJRbdjL%4fGxOGK! z%G)5%HXHJ*1GyHouffaWhziNX9;OmnWO*m|HvqS~Zw0CXe+-7xU@)4DrjBoi-}e>@?C@=G zp<#;$aw()ACh|A$f*9SsLTo5}7dzUzteE9Z68lGhWFnD|3Xw(n0TLkxa! z^(BI@e^vYL*q`}2$bJyu9M&4G#KWHg+Of}z!YUL9gHUfe{xQ&TC&%@689Gh`t#&yM2Z5xRhyi) z=(_Hss}u0V4nMX5xL$q{+AupD2Im0#bk2ABe}~U1$IcB6Za98kS)Pc5wXue8%aNw`9COw%wuO^MO^YuFenr)6k1royul*_@aek-BPdD z?(gSqVNv+IRQ=TE0<`VnmuJ+jKWO_nF>{1GLtX!+*!Md?^xxQNb!UKRI|Q5=iMH=c ze|rkM^>rOL%xSc@#j*_T1g^3H{&9mr3_7^jUkw z6Z$*&ekMP^bOp8QJ8O|%?H1{(A2s)M&!z%b^_h(3=CRJ5ejT&>y;MG$Fs}zpGb=S) z7>5g)k(7NfZMSVtDyE9Cv~YBHamx)(SehZje=dFYC~BrOqIyvSHD-Rd-=?I=~Fs{eUG@ey?CiTN!cG!v&RZ9A#f19^+ z?|akuy@VCXGI9wEbny}-xyvZw=HZ^mOAxs#;Pw{YzWbp{W&^efSfW&3TzlS%sia)E zk;uHNk?8QXM{u+3EmE=Y?g#GJ^}UoWs=+G%E?vKP<@>Y2HqtCY9*`=QAZdyQXqs<> z{s*HE@PARti0*iK5Nlm9PT`=-e?{V$*6xY0x5RzV(tfn7a2rZh#b)i*Rl^n$>DwaH z1-9mS_}@BfBn74$vKi&7YVC_9n1sMV9aQb-JZws-Zsu2w`o#t!l18O&yYm(3Yysgh z+-om+WV+@!+uCpI14#wWrT0{Bb~ga9&=XK`bsf>!u)kwGrt9YZ$I=E-f0CPv2y?j+ zMM9cu4kfN@_!k$}25#VXDM|Ek4Gz3~yMFiS<8ASmR3HB!5`V#ua7*)oA-qqY;NNg- z$f!p(9j%LtTGdf(W^Qu}pb;INr4?-_NgW=HiLMI9^??s^B1uM-CL{c><-)DwYqdKx<4@u>RE7rZqNeM>$wZk?nDyHdui)N-Hkb7a;prVyYj5 zx82`c9x?$VyAYNq9}{B0y6uD2NqA-mh$3(sUP{Pme9Nq2=8;Hy0^TfEr&8YH~TKsR^oEunPy&2sfxwx5r9# z{Wu`-FG)$kjrfwrJV!&WE{m&jgMdg(nS~T|N(B8vTYy;!NqXwb2lvBxDPkZ!Wp}aZuCu-p9LLqxVjrJ=gUe*>c`y1 zS}Bwg1{)BTRFeKh$sYX~XQ@vrA)897l(B^k4v9#~N=}JtP_?#GT}v%yo-8h8i%y%T z85sLQrj&10+N~1rk6eFIfiT)&=|?CIk<8y@LWhl@Eqx#yi3leIWdbZw%!yiuM?BkY zNUE+?O)&LrM`RQS0uHNCBMScSj6&2<9x@d%#bu?0s8r>-65g#MQj`_iM-<42D9%`b zHU#L&EMWwRg!nXkgq=o;o6d&-ZevL?Ljm0ZdDueaj=&1NuXcaQLBG|$jdq!kIkrTM zV7P`{yGPS&w1;#-Aw2+aE7M%5k}2kfReppyC#+m|q~?rr(Un?z${5O$Ts_bF#_7xLSU_LG--4aTgxk|ge_v9Hwqo{wm$L9EQ%jUs@p|ToBi5BQPYjwmE<5T z?{~dZnY2)~6Zn73n73k7rOWBWuv=0Ab*F;TJI}&GHCOG`wV#&wxMKx>OmI~knIPAQ zK(6Q#Xvg9NgQ+aQu1D>wHdI~Uv|MHY+fYA)zZ0t%=KhyhOwU_3FP1sz7ZT!(Nmcj; znoVcUR1z6wg_^_yG!pL@yzb^n#8ZgR!_QwI%qZA-{E_bB}l)~pB)t%2fJ;8GA;-M zU-5M&>ebwzjEKF3|yZrx}7E6;hBZx_?~bTPkK7$;4gMw4$b=eZcr^p%0S zh58N#Mm55w3EqD*FlrJzb70g=?q0K>eqG>2lVROAdwVo94Tg;^X-KM>sa-JD<{eGM zFzM-^X=jd%nzSxfvd7cF%eB-Ms1FDIljemZ&>DZ(;`|Y4)2inPv}ydhBhcDjUGv8? zN1#o#Ge@8`Bh!>|7zNXatJBChmmE}5rlDPT@4mSx2L<=y$zdIDe34mj9dU93aR}!0 zB<2v**>^X#{uUGP6N(v1z)0W)@tJqT$2fKGuf*(s2{j)QpLsJr{{jF2|Nq=sZI9D9 z5dMGOUoi^O9iS$C$+io<-Aah#q>~Vw#Ob~OLT=(V7Oov^r`rYb-)|f@Y1$^;X6fqC zt+Z|G*fY;Oo;N2cM4V`S?WNwdd)3oqnNF|lXEA=`O*^pptGl0i5EDcEg_2q7v-sM3 zemM?D;ds;^`-8DJeP6d~IrpYv(61NR2&8`&fE9J|Q;HfjiMoTak1YZG!2Vnw*`M+G zn7I5YMLoOE{#-Wf&vqZ$%M9<3*bb9CIZ9(kT(eLPOWao4&HzAlHV5;cZJy65?lWm$k9< zJ@T1mDNxSVOnG1w5t5@r-AGYM57=P(hJpii4FOlA!0JO0WqqYVG? ziW3oil{(yBj=HdrW&%~Ssx6A^pIi%)V=XQ#ZFtO9m2Plv!qsLVZ*=CF$bV=*l42!! zW`7V~HY_nUgmI0ni8j?vKN){Cc0+#@A}8>&Dihn0K}7;$EJQSbD3q3fcQAQ5e!HND zo9T>meNj0+w0)E7h3amHJGZNVoQB*as)|mn?C>@^_a^5}9T;XfHd3$ghxG-MFb#!y z+YY^5)3=3QP_BF__^mgsRa4mHcAso6DjUWYf|yLvgVxU)_Lzw@LwA4uOqHO#3o}ME zeE0zUF38m6m+wD5aeG-{E0{@)N%f@HTcG=Avmlboo@+9FvF;J93B4JYv)-y7jDtas zvzZ&@d7%jC`qArfI4XH%2*)RmIZ%|Ne9F_3;(r*ufp;=f5#2iG4zo6BeC?xwI?jK20ev2qjMMYf49!9`Mgxzw; z{le;7rc&cNM5O5|$KxiXn4zc+;7?&PX4s1I+inLF-R8FQ7|?&5p>EhYaWIk7Kb4um znV+JB2%M@nc@0FioVj#4XWUpLEV7DdgKXgG20b0mItqOP<}`O<^m-+ius`!CL70zW zDYdbdF6|COX3b?HH$6lBuws_{ZfAxPEuVzzJg?)#dB));p#pBC`fMq^L1#yzzGPHH zh=&MdF3;u{zJrR*jv3KDqBmsuONr-n<_w3&6dLL$UTXKKfIoc}ikS`xg0Esw8Yi0SiQ_rZhqd{GWH~<*f+j;f=Iz zy~|9nxuky;zuMNHLM`t4oGmhym#B`-bm(P>0#g9P1;w;EUQ-e!hXi$ywJq?|m~fF6 zGfrGLQxM`fF2-SwWx$gC>PyDZSS+L+q`Rzi2OxJT{2MRHQf9acAs$S&Vwf!o8WvdS zknY=h4mSZ`qVi#|K^%0dZcw{;P$6H)(zFwK+Eagh;PXGqloRv6plZM2@L@Bu>)`>s z%|ogoT;@z^b5|b5V_Z4u1iN;oZ;h$R*K|#6$JjDqAFO)7C(s*JdntSKM7<~=g8G&2 zwtC=eS&2>%^nlfAS9`Uq{x6@tyC&-=d;adK;%QH4!Y~{>>yO66Hc#Lyi-ryF6OS$m z@3SfB;>erJelQq5Kg+W^q}6ZMG9HJ|&h;{K5cG$mhIb3cX=r#|e4OxNml25tYJV|C z6c#z9sX)b-V4)lkf;3oPqXK7|D0+A+_&gvS$%r7qaEI1wRHX1OIpI2npRX}D5iUeQ zp)eA<4i)|P#sXL$C$l$+iXV^yAFro4%G#lV0hTeY*w0`gf{OQmZf_2)FPmgjb+)wg z-dvRy70zzoF8i~C2dPkTW82D>fq&LK3wgepqvB!s4Q5NB;$5(ijtNi% zM@bPtYCJ>;F2y8qVU zs)#+l3yj*NTuQ&ToJ;b5HR%qTg@0#V^}<>A z%9-we$+<+vBweCLqpy^D!sttKnNOELg@!W2k@LPXpVt4ZPwS{kTE3`Ldyo6Ht*h*_ z`EJ{T&OPFK^SZhX@60gZcplJI&8DB?ibh+62rww}#kj$IWh$>3gcsxrd!u%32e5YJ z9)b*O9L*QnCxC?=5xdxCmw$0Fe2{)y_Gi#34p&qDIhFuW%d>s@C^-F^D5`y&YlNSYErrHZ2+8DB1p>>k!k9>` zwBihE+>ab6v{Z;D4=TgP$@8b=FO`#o+oPX%Au{Sn`q#4C>7YZ>YO5jPl*m3fhV4Nz-eR{+`IYyWsR0=)I z<)^M|K)krxdHK6`KAqPK>%W@Tf38?EJU;Yyr#$Vy)#<{meU0ww%9QD_T$h{P|3VLq zh;KLj?{sC&!d1)t2~DzU?ez?u1OTn-Tid3kt8VUjclUp_ZhyC)u2EuIXVR5hENnGH zeY^2(?)js8j)F@dy)<9?6 zQUV9r2{%@}k)jyMk&4$e%(wS-0eoGK)(DpQ?!Oo)QpNTU5J=5aIdX51|5i5Q)lo z78^q#5yo*&3NfC*4{brP;_*p;EVPhRmOwHqY(hWJw7! z((3c8*UX_)25v;a4;H-xt^8b2=ScWLk8N0kqJQ`B$jweWAiF7VRBC0swvz<9jL*45 zFZ=_DxOV3TQ8#ER9#1}!_6TJpD+jDb0n3n-d|tznluvb9Iz$#fF1a(0OSfg-{4c|j zRFr8+BDHm7hfJFh=LP8y7mvEsQZ8N=NWxS7$XM4;7VDaV^+W%j5$qij-|aV5@;<*W zwtu~#4?uFhslLWv$N)fg4$G?EwPTbhxbq=6LySm)!TfVxb&(-|QBXKGb-8Uo)&}Yj z)mUL~-Td$pbl8B|%|7`+`6K++xeQP>|^IDa1fQ9Pp6-EWT+pGfBX@k#NDmQD{s#0i~d z!dRme*Cu{ds?gTq#7so*B&cVh_1bA!r|qP)<<=T0Q=yE~GF~Dov^hF4L;tKAh}Tc6 z1|sTEHPR%Loe#~WQ%^TOG9ibRM0RRC1|J+&KZ<{a>f8Sq$T&dcmL9tEv=zkid zy-$7W+q4HaIKe702uPP|+W$V&EZSOeramQE*E|sMao@iC-Pv=Jq;X7>8RMbPQ<8tX zy6D8Z$*+d1x2Qzd&B=F?zw^v*>|H~=LiEC4we_a)RWX0Uky|;s^J5l%&fHi+^7B>y zX-{~us0(TSn$}!P?hT(WYJGnZ`G0-cO6eDBEp?}AEh>L_QCFMZ+>yPsPA-Ofzm%d- zdbP7rc!qX4CudHq%KBcmex>x?-uG(%s#TBA{WbAKtAt;wqL6w{`gS9UQD5y`D<1#4 zM(90ZFSk9(Z=TEd{*rH8VF(6amfTz)Ju!G?&tfg^z+vrYK#wX&oGMXv59lSjnb!}ejX43<)9i=e>=Puvt1<9|s0H*-ZsyAM$+m|gZ z#^W&NG-cGMB!BNrWES^VXCjUviZDbo3{eaa8JGKG&(}+mhd~@=2}aatZD>gT(Pf=j zg(QGIgWfa z-JJ-6?Wyv0kZ5$BDZ3Ai1bAH@F0!Ud|9~{OTpq%fwIVrZD9wIiK<9%?bL5K9ce2&8 zdY8bF1tAbuT6q?tWSsyMAZWDrHDSh=@sR}-6vl~rkHBE4J)j>LMOl=w(7j(AWS1F| z1tBsqPf@Z?kSq|0>@<&f`d5nlJ3?+xO7XpRdkWAAk zMwe`o1sZ<_%@Cs(JM!#xt3>x!CNGZK9pw;G+i@_W(Ebd(oGdd5O%nIw1s+?x-u&dR zHa|HgKlyR@MojX2-qt$!A|7Nrnx~d58jfGw4T^vqvYn*3hRC|-gJ1>~F-#gPN}-Kr z5I?6>*ez<+c7WF=;^4Hf?A%QMeFm(s39{>LaD#s=$AYx!VgG%XlKi3Aaw0Vb(mphmE%M%T&pqeLRB+{n zpC*4@O&4i6v42U#sn$2{g1Mo8>_+O6m>TNMmCRC)#W!v*7+wwof86uE0K^IGJii8B5wA_e-UAWa#0(yc*(PfL5mDR39n`ga=7yJ@{?*mx&>s(a z!>eF07>)X^5}XjhchK=b)K-nF@l1H})VY6#XA#NhOd=Vd6iF~1_IhLA?+3%-An=1x zs|05d$>8Dk#xGWSA6r=~;ok|T|^s!U$rc)}8tw4G(G~Rq@VNh8(iP^H6-%EhTpf-J?BvF4b zyK!F}AiB4WGjZz@!wiQT_Z@y9MiP@SO=g+M>)gqSZT2db@ADQep^8y2VQ~!N#x*Jf zi<51)Z}uGg@9G{&qK;3u-b2h}M?FsX@d-x?#1%UzkWUb3txF z3E&rG%BUuvKa+R?S&e zV?0B)t~hBDjlOG zBQmoZ9Do>9o6hpyZ1F5%Yws$g87{GwJENKAO?-%rH9@5GAL?$lEPwLxz#tJ|=S6Ix zM1iFex8$zPh#7U7R@1dHG~2ilNw6}}z|-LS$h>vz*Rsdt#cc@^KNukHWRT69IL5lu zhU=ThYZvjO1-uDI#8SDXU{MrMR+vtHqZ z7XsU8fFE%^=kVoif$&J+mT=0Sz1E|?>-BL@^PE4jmOGlP*-l`6hGub05^>5e>wKI( zQvs(Z1hO6rrd{D1H=h+0%B2X&Y!AOnoBIqG&kY{EK6?iWw;5wNcY)8hLjjDR*4trt z)!Ps-(|9K?zTbReZq=K!RVrl>uA5{_QoTZQB6R-etBJ^~HlS6ZV+$ERn+L9UEwDteX*I;IE+kH#htp5#9hnDVf9?+8Xqco ztmW`kq!O_6mf8Xc!KK7Op2tZ*rKi|k$Oe~x=(hV8$*nXyu`yCw3lj(Sdu>8Dj)+1c zFztzD8Ap^eWaLtgx1aX%v7XDj*O)M~RF8-OX|SHtz7;FmWpEBBpx)JuIjcJA)Ul-` zkwFJBZ;5zi8p?~+#Vt6WI00&;5jbW_K8zJZ)+@o~DVhwBO-M|gmV+2JWQqbu^<~&Q z+Fyq$b38LTKZKmp|C@`buCZ}9dTJVSav(7eQwDQpM45|!cs}0K%xx<|EDnSdUs+(E zpAP#VC#HCse=*W_blT{b={XZYgL~V_kg~?ar&`SJY_&~km)~;X;gi?Fh!)68vdo99 zh;hkCy5=^(QYvJe75r;4xef@L1mbW}9&IqgtY0pGLY8i#TItW;qRsJ;30=`DPSHgA ziz?~VnZ}>h-|mMi$@r#uB15D?@?474NN}jD$qsnU-0QX+Tanh;{YS7&2=h^m$qwSd6rO9Mf=lGzhmRE_mTGY=s$SM?8{(37cfSrNYImA;ZVrzzN0M9oC zK!cEV%V2dY9(IQ2&Q1TlXEG4Cyf*uE;$){1zR%ju=L^Ys-Qs0yGt>Dg@V*+guR^6s};F&A&qNW1fsxpoLSZ zJ3oZ$Y&e(DIN%O*D{)(_O%gPFjl;wMXdTXylX08E7_>1p6`qeANb8A7l}XAS^@3vl z#fm7O_CP16X&X@vrYHHUiKSJj*juU=T4j~7X5g*RrbJ}_u;waB=eU>~d=I?iAcgQ4;qps}MN~8R1lkgJ}zz{`OkuNri zMww0`u$#Y-2>DFcJWHyI-VnkE91KfKt{=pCYDTx9S-xLG#dQXQi^V35)d-DAkp8V1 zbA?R)|d@PfwWxO)v@k|!mXVX8UUnOupNxI^tf+tD~B%Lwdmim2k zKb1IeF1!bp^s*5;>ziLNFx@I#8_Zozr9XChH*>vz*&Bpq(p?*@=o+pwb5=`)Z|^p4 zbjed%e1CT=1Ato9a={6apr`CAG#&Wa1ea2MYr$#$cZbv%^pz84041PPt}a2e_&;_D zhQV9%=4plnrdUS7idmB#rXsvwcaV~6#TL7q;*=6vha7bMI2s7z60dVU?;bY84001U zFi4;&UPP8c;>~f6mQqrGcWPIQSKcSgM|u2EqV_#vY=m`PsLOO^MJPn({lEu5o#_|= zT3S?Lyz&;Mr2@khY(^kpiCJ@WzmpOJwWQ~>tOqXGsxE$cq5WygY1)8E`Djvi zdeC;7DBsCV=O82D5>F{DBoyD5x=3|>kYWk=%=WlA(;dg^)iVJz%uleW%W7XQ^^%ItcyuAr0X-W zlGKtj_u`!N?j+8Ploi#xPzbt$L1H)johRU!VJd|H+wn`F7IC$P4wp9D0bo(V z%(_fB*C)(ldzeEq)G=tJYmA--w%cIXnpL%}So=BQgEaA$$^a!{b;tVvri}(6I;pIz zq3K3{0=N%OlUH2Own>!sKUmuzaU7s{9Y+FLugA59LR_Zt%B#+p797X3~7IhZe>Qo)N>QdcTC6L%pFSlkhQ*3 zRQk%`dZtOS<*LbRgAp9`OGtI)*CdKmsVE0DCNa#SBbgg6ChTZLXAwMifaMF)8uT-; z;qCDK5tJKZ20q8p*AtVi3$&9=QFVq4>a%2`Mzu<^{(AeO!wDOkTSmf1Ov8k;bO#L=;n*w#^sg?T_!3Y6o{`ai$qs*R z{7GDYLB`JZjnV%0fj#Af`HNMDFWq?H^i>@#(9`vZ8ML&0@x=jRvaxov-{P6>qliv@ zGHhl&DqfOl`_p85!Zb|Fi|?SE>0waBADB)dCGwD24zM1vXsFN(yH;~4@?}&|m2KJD zNR)N|WlHb|I1}5Zy$Cno`j^Jmjk~fR>CE(}mL*0rt|$AEtl=z5-n)l4whxxLihbqj zVNoz<%t*7Gl5s_ypgl)S5hiI_}7<&kFSwz z!yOoeU%>`E<%2SpIoHoY@_GdD11gMMZ2WSoZUCS|TP~Nizd?O>k(+=~|9=C!4cWiQ zD4q!^9O0xUE-CmogNo95+AFIfpZ{n>;8i*b`}4m=YRzcxTpLY4r@_b*QHOoaD8{Xlh;)^ z9~QavkZQTRTDTlo-HrFm--Pt7ZU80S({^Gp%dN`QWnfD?U$2XF{2?jkhz31rf^(70Lu=6Z)&wZGPBgH)(`t_Wxe=#2M z0|1KVS?MNg81(ke>DW)mur)?=EO-Q|HA#Y!XG;H11d>uJ;N zG#Vb6lUH2wXR@4T4OWlMv~_l>??lV{CCevCUaNC)6bVLveZh)5G6(!?u8O}jC1gecc1Xi~MjeAXi zNoSRUYt3xg-X5TRKj7AN575g}kRqZZ9w%LwCq>5^2uEXNxYwBv!?R@!Ph3-V88$ET z1kSt}EG9n4Y32HT+dDkG*|G3@O#{#n#&(4x0Zw(oo<6;g?&8<84lGK*e0iR578$a! zZbUTr#Vhlr@y&ud4XO}q5^OR*nd6iS`C)&lLG92`&)HCq>%UzemZ>NrZOZ~?=kR+W zCY2tXA$Z4)B*;As$wJ=3KM`G{kQ5cZWmi%hX=F3tfp?K!+Bp^KxqxU{mWqk5Fe5Bd%plB$f?)B^MhTW7AB{oe zy2s4(OhMO(4Q8s3wl`t}6%@OIZb7_n@gCq|TDQlG{}#VUPbKZk$8`ji9Hyi@%N7b% z*<38@zWH05W`xQHi9h7p1dbB}q^%C$I~9e`7=@+zIzFjgh@N`szC`xilN7kp2kLg4=vOGV#pJ(XE~RbqdK zYzX2dJam5gF=0oCCZ6wl7x<31@o$g1!LJh;Jab*f-XC;34x2(> zJvnL&c}4jrhe3?6Wp^e{hOnUxjdv1U)MQULL`4$H1xwzzXfy}+Jltgj_;nID94pY< zcMcJAL}r(}ELHtm5q9FYoe18C5@oX~*U54vY_}zo0qg^^n8M4WMJCmZLboSZ$h%VM zMjX$f-$rlO5?9*fKA>jhIgIkJ`uueWOM#;dx^5zWgEOWsjn!4Ks!TuMwh~)OWMsUx zt~hXcZ;>O4GzkTUKa#LYAW9m~Ky6MwQ37oR-t?d^1MIvlXOZUDhL*7?8mWALL3kuK z2TjmorWYOOQNl-w3W-}_s68CQR_ZX3I9Brkxo?~KuQVr&B!E4|a1kmZNh)zM2h1hI zUngMb>aKs)8lt<;;hAaVBYkls1=6@A7rLe>kt7%+bdznKf-K_V$P^cfqE?Z{v)D4t z6g0wt=8=wV>CLLoYj6CIRkQ>zNsx_0tO`wc$4|=)o#8(d0ep z@wfy7(PSQSf+LLUPbK3$YPoT z@FLGMk9b}&6#S!7xMn3W_O^_YzDyRw80CV|KR8hM)xa%2k3O0Eu`O+dF#I%oniH06(d{W;9!xHsLFD&!eI3SM=%3m=&MW`XxbbCLkOx*GmVymR{680|q~C-KFKS&7Sa6PGS!8UFMmu)79*<;8#Pz^{I$W zPbNRpK}W4h)4!VQQj9+AsQz?;CDlEwZjYYs+KZ6&$H;~`Gxg{>%8&dKl;Jj+tr5LD zKkxE?cTZ6F*=!)+2?SNk@m?uAmpS<pykTTKZ>V>_vUE=`=}P|@nfq_w(?X$RX&e5b1C0DeQ9x!Y z0!1M@33WN2;K6EgX^2oP5?5~d;b{0uNV#Q+IvLJfb=rD{n?<;HzF&D1xx$FTtr0Vq zY8!B&a)Tc_QCwx6gzp%PV#J}*UU#I6lo>B(*O3AL-o;Y$M-Aa`1j$#JK+IODM`B~* zly`xpE^}&_ZB-~NoXvH)1pxXA;~S}pObdv_P@~5nyK`x=LCIvJn^S)u9>O#|l^%bk z$&ky<=5Gy}ZbbMYLsQw6)tN-314(ISG_>s;<%}j_iKW-Yq4;MNm9ZvUi>nEPp+!1< zFtL_jH8t}+R8#Y>sqz#ObNs|b@#`*Wz&@ieCH)So1G5XukEbcz03gO2fY*uMz8H<% zRS>OMp>X(CO=bg$m0}QU#(Oqad_{<2vlyrCI__@lU>cM48)g)RP+e9x+u_YaQd(12 z>r%gTp|Z<2I#!`oHUKkH^eJb=fU}2yjEIX{%EN2Z6`|61dDA)b2|j=r(*Kq}Z{>Z1OP7*mRFzk$j_Ca`fy$c z3fv9dhUF;63$DjTQi(-~ixZwOgjP*LD!4}Oa_@@tv;J#Hjb)7{`ggX~Ny$v3Fc$6_ zew3)#cvO_{11i<${v=onrm(+2h#HL1Shlffx>mdU&UxOq-APYY0L_OglKsF*P2E-p zMcpFu4G5LQPyzoYVC{Y z+6YZGX_ex<5_-Zq_euPjpK}JSvf)7hI(34N#$66G-AIFBz1V=oGc85T=ID|d%Z1Zt z_%*fSW6_F%_vFE(ZrL#!T5jpZ<6Iz4opooNcuQih2}q!i?80D81-ABzC1=eKZ8ss0 z+GsAHg`SRm@0uRFt&dvYTel0|YHssbbOETGnb&qMi#NmHX%lSP5fSjt5y}Z;54dhr z(Z9K&N-R@pJ1pgPr1@Twbzy8b4>zP;jlb?sbzNVMdUuowOm^vPAe*)98g~c1$6WGT zjR{5(16IU!_TT)hkT2?~l74Z5l~b~X#eAN$=Gj)tFjB8Wjcc2ghG{X&>nu$QV0t+B zh;7WcVLvzO4S#+pRvx})j9woQ-MqyaWT#X$)>i`v*+Sjxlsr^6z&jDzMs%=WzO@|C zN8Koou8fxKZI-{{yoJ`aT7MG7vyd6z(4|DFgR5S!c{3`Y7Gxi+mX6x?y%8waYhENFKPI$h|tr`S6kRVU0BFlzB| zgAt49&2Mp#s%ZIxsg=V>0=o>U)K>Oje@D#!}C5@gf^QZ$*_1o>(yPIr7=IQ>NFz0U z2wWVPKXqpaoENqhA#!*XCOBqm12>go7@P{&K2>2DToP$42Vqg5r6eam19l~ht@t^$ zbod8hm|FY>-!(h%1nKNEm7XEid370tz9^kPW|evSt0{1MS@H&c&6v%|KyVadNIoc2 z0q!6CD1Ad- z1|og!J_A=_>Vb0IT6&KKidoP|gpS8&<|;xq3l__8J?X0Gx%$8!Tf)yB$+>f_Pov9? z-DRfA>?BC@d%j;SPwy4PpzLt^n0C$g@5NG%{RM_Mw$MeVEqFSHMA3k%I z7cVkUI0Ure??bb7--%OiN5JKgTUn2m9ZpjChoujl_Sz8<5mOaM!Px*uZ*Ps*tP}O| z&=PS?ftOqbBglNQ8&(e_p_^yHJ~X1Sz$d`hQAtkk80zuyaYSr>Xd=Yel$_Lh{g8?W z+7eL>`M=dcZ<|bP<5yWs?p)pfcG1*p`xnsF!!5w=s>$-T^KHHshJx!{#<7wGQI)UN z>n8Y67^acGFa(slMsWhX_H~A2|CH?vCssEsP2widTq>_%D-cxYEy<>XiQ6W!c_4f9 zTD2UgqJwm=c{2og$2<}}!LfMHrM@c2&m2=hkxt_y!;(Nh4PnF>Mg!_vs43KRDzoz$rz9ylQ1mgpI-d|25Y-Ml<;_L0? zG4%Z#fG2()TYMO+HUT5tgI-cVyScfPPq@+`66ao5JuYG_U;a%l z+h&KbzDtwzHRUXLVweKGTLrY`K3oMGdx8e8$7t7C#?_!v)b$>KxD-(@MHaaFae%2`3&Ny!KPsPo% zMH-kC2$&lO2o;$;S+xIL6+KQP-J}K#nQWIZ{Vs3v=epI{UCMbbGDyAU-#4+q20#Hw zSvjp?I|r}P4t$vLDte`3b#EptFG@I|Ut?&h-MhgD<#cS5XMZ=u+ml5S!I7a z6Uwz&cOg%d(iO6qUmx7Tf~;>#_ZqY&ubXXr@DmbO;5|3!dWNU!B)MTyu;JoNmaF#%QGR?&kXL83%89fHcg+iK!Ntr1Y zG`f5CD8?{@sPx{6UP5UV?0s6KihGbw`+IN^hJ$)BeU$vh$T_p1zhJ5O{HFk?kE=I6 zBYz~yA2g++sE{ndMOy~mP{X|MOfo8=+?bxN)`ahx!h}u-3qpP}qehqL{&}VdMm5~l z+mC~I>84s0#q2qmO*p26T)P9?{9ztiGGuabid$j&Em%YEs;o@O&I5W8hIIJDMhG?B z+C9$NW(J;LQ4UY*t<#`nl`#f{sbNs{J1_wUJwJkNXz{QF;>HA0X*3(3>J`Sa`?-tN zq{wbLYU%m)n{N2HPSQt-*#qaJ?WH0w>FBCyksE|&MB{j^4)BO-bVIQxRs`qZI)V}GVA*xC~N&=b#o;N5ii`6%Oy-NmFhfQgz^zT1B;g*^s|Pvh6y#N7X0 z)wp5o%HD9VND2E5&WlTi}JWB zs%|r~vM~3s^ z#NT!31I=yhEhS5ipR7_n{}^^9EJs2JD?^e`qSw^~4CQBH5?hQ$c^f!zseSD08F3gNeYprdyC_6E+^!DDr~i1T_UpkFRZOyjwrDaiKnA%*i^Z zX|;L)gj~db^czwzN^ANuqUp9%v|3{pjC8`}g>K_}Sc8-E4L~Lo>vc)fi@MTl={s@i zvt{O&x?`ay>HuYjfvKba$U3xX%N4V%n>0<;>-Klg>6RmXUIhK#KjBOKH4589!pDF@ zsF^Hy==*6tvZ~XXhQ^C4W!s2ngx$GahGPL?#0Zs{vP@G{7Jxuigev{b*qUKnCaoms zOm#)*?ok}3<{&cw>pS9 zq0!xC7Y{&b_RP=Hu-W?i1%q{)VcB&Kjd`qTOmSI}JQpdrt+OUTs(1Jlb7$HXY@r)y z7TPJ=la2IbcuxzAimP`s1%$KHkzb%iFbcYrP2|#^6!Nc?Kbf4_{f@e zG!w9P#1nE@s}A{^7dQ-!#4x$Tx<~m<1n`?2Y}CA0j!@hJ^k%)OoRlAPEn=oLsLbSe zX6mES@>DWH@nQ_?-mGhSm>men*2#xU?5r;tkmVEztMU?H)u!8LqssD`bie?-^yXJ! z4$1Jo?>qSHVT-ds@bP-E)VjQNHebcs@+PE#-x6wU)l_wF!Ti{QEJfh$H zJ&eEs?9B9O=+PO);0Er$AN=zM%s+kME+Lb3TV86=T1F2vrwv@5zwRWrGW)-b4m(Fu zO_ns1AYtAJ2zb|h|NRMofT#t;H4VS{l|#v|ph^GY1u0j~5U~&#rdMovEKUt}1v$*C z8~d4lO36%pcn2)U@$K>RnQxSiM87}&b(=T`O%NC-x-pA397vgDy{+!S#1ZjXpdOTZ zEeCoMV+8%qeQp6hHg}Gj;pe(2fNJf+uh>C6_E|6S_h(FA?kOZ!Jfoe?ud+I20v01UyQ1(Z=;-wg!IwpP%$m&XNx{}v-n z8HXRhZuhYU5}aa_pjmp$z61>{)hr*K;)RjUgEAVdb^$#J8hO0xUen5VHAK~Ml>=#?pMQ*)pU5UH~1gX;owvxJeBT%U2q^^hzO!{@=lit@>P1| z91cGdu_mNVMriA8Z)z}2#S3z_SZD)ba+pi29Tl5_ktdm>v}U0GeRVQ)$wQbCmVDjns_xDp}y&!F_R#Y- zzoTj`z0W4aEQHvU{pWhvqc|z5#D#MIjn}5e&HmGzf>-2L)=5uGP!DrlW7DBjH}P}z zXSb7TUI4J`oVs^6AukxEEeys?ir_fdagi&Y3S?4E_d{BPdLA21X12vh8u#?%J-Pm- z`{+BMbEb3V=QiB?8Rr)e-*>@*^t@cKyCh{v*-AUwZSXWNS1Wh4UbWP`p>%9}BWj|TJ zeaq#Kvd_8WNW|L|T(iaHou)s>owNY8e{&x*-d|tcraj3&z3LQ;%2erIwV z4D4T7)$&uos6f!mWc()i%*Qruc9C_6d)ACh3g_GO+>vxwM&!`>ahg|PY4tz9h2`6; z36pDLZg|zHEM{;wlc=q!!Z7Y%5QH|H2ja(A4@u9f36Px-$#LRU3da*tsiLmP=!pZM z8^ZC#vZ<0MY__)*lhG=Ll8XJL2EBaFP55-)X;Z2{)^`ng1o;aq@5rm+Hb5J?d<1gw z{R@?b`E-Rr!Hm|Rj9!5U6p}Uq5VeR&oCL41coO{-ym&y;JyI|LU}BKb=rU?I(hs(F zFYR38G@hDrT4c#oo)1w(h&={OWTSxkUTP&Cr$o}CbqT+n;|Uz=iRq3aKj-@yfB@TH zPi@{zx|-qpIY$orfk{Hx><6bLbb+DAvcS=Wzps(A+`sQf?6XhoRhgaiH$Li3Vox#` zzGWfzA{y0(H28`Pc(fH8le&+5$#l&NqQ)>~yMBiN=^EAY9w;ar=f3m8YOe!QmxLSj z_0qU?9Z8qV?5`ZJ?4L(I3IF#3(fJRg(eFQjFou?4k_|rY=CD|^@KlObnvf4e>@aIT z|2!M@&p+>S0@%(cn@E+EFB(gqV@Hf7$vJngK^Up&KOZH`RA@5PERR6fqUyz2r={K- zE$JIPi}lJj>R>;409+&-DvW^3o);f_%U7OIHGG^GJU^$z(M0jstyL9#(SiviqkC3Q zfcLGahYO$CnRyPN?$N-WYkeuueomy!=!N_D)=K`F_))0)+gbqg<*41(%?UzSDuV zs=!Xyyxv7D0-q;Vv&6n1JdnkMD$>!HLgn|iBc`Kn5y1fqR-mIdF}5w@me~^@E-s?y z@~-<3X4jdS*BxFJIXT>L8LE#Z2e$b?2qcEH(JSw>ev~Bb!z*jX^j7rd>_qRz0Ldbc zGG|Rk0Z%mGcv`qhum2=eI@3Y76L}+>mD&ATTYmdiSN!-?X1u)QKNW>0%vQha8qrO1 zK4gbOzFznb)2y&&QqWt(AMc0M@Wa3ZMFL|hlu>n#wjyCrI!~A-6y0aG%fj^j^rC23 z?nH{2-~5pw^MCG=rFwrK^i1HZLy^k0p`dJnT0#LHu7D(f*=_GvMxF8?e0DzZCD)(U zqh<6&vY^M22P3qhJ|Z1Sh{J)iMAEDKgRf@dsECkKB&jkKBgwIVfDs4Z1lN<563Zp& zbzr3gA+S~;#@X?JvCBrhmx_{T29bqKg1iYtbAYbDv$N!MSRb6FIUXkq_qkqS?OQFD zUmF3O?tAIhe5s^@AcD~ON3$R;G|w~PAj5|_T`c#+X;LJ2i)bdYZ_on zbyc#Wzh(yJWvnhG-v=em4Ihbjwib3--ug9L{At-qsC4RWDflT-x}Ch67QEVaHSPYA zCzO4!tNcuWV|v8&L8IzO=T33;N?!^B2dk539_IhHxh5)-H&swYe&Ld4oNwfkDb2;D z!D{^nMRk3}7*lygmR}vCl(aTx0D(}sP`?$^6NyxAT5(E{?NO2&ft4QZW{kR}IQR>s zcrS0;cTv6M=&3VTwRuHc+S#+f_U3q=Z|#%h%fYGGHb409OzkKGFXo5#82eCh-N%2k zyw{k+nCtLe4eT9Dp*zXdtIg_QwSfBOg>U&M5r7%|A7S|~0RH#mr^Zw5 zTERr^rP5|*ZT1Q2+x5;G?fvE9RcEKrxA1l6*ZQSRu(r`gxsape!AXCv($K_c)p?qV?vkygoNuf#B z4cZh$sP06ZVHpxJBM#vdWz_`h2F0&+Pb~7_dD-uI-(t-=0>gr0a+B1@z@*$p-pI>3 zrb)1y^b@6+Hv2ndybl?R6b(=Tg1NckQsn-aKWDRpX)0d~vH{?r4Fu^`KzwJ4wcunz zLL648d954%&3?yA&_vvCc-VHU9C=5-=@SVnuc_GYVWmFP^iDF_t^4fwUoaxk=9%0l zo{{}iv+h+e4N)mUp9NWL7ac(UJscu4)108k8%)YShEa(2qA$Hb#hfUQY~H&^&Ki_w`mR?5U@ELLS2MBC zm}2wOylr2iJXwBXA;#3Z@C5oKY^CU6lo79h7#RpP!ZcfuUW6% zm)mu=ngOQWslq!pjNLyI-f8m|5B4&-E8o(Cn#k6`%tnZp>iKobgHA6`pKRV%{N=+a zzD3nj^Y`V3R(LqM8Jsmh!4q5nW;{hnFqa70>U1^rG*psSbG3bz@ZbGE38H}6G)X~q(cJfZHYC7VEU1%kG}nE>~! z;^1B0orBtxXIrkM75gsSaD!`8kE&q~dbTvC?qh@Vo6(^Q#7@|HN1UhntG~$BIj<=b zS7Q}$psseZn2DfSm7rwY7X1nipnWFgaovK|v71pRlwyHPj44su6yn!=uk^{T1CMdY z&+`oNcZiVYD^4kzL21tB}aq zC&HJ@pGr_}ey&O_2z-TE{r%l_rL|NIXN3EQ_BG2u`&l1;2`E=Cl}fjEjN-%p?|r2A zWAsMzy{;9kVRN1L&nOCZ0k?`vtl+bq*Cz#)ud}u_&E(i@`sPn8vEnr%BQ2e(=p-|2 z8+o2sKHfufj4#&62Z*{R>bdsl3ne5`w}GZFb50A2V<|;o=}E`yG3a3n z)VKPbje4p3U##=I#A=S&Wd2%M^$fbMe(Yv4H9Uskg1FpI1BlJi7U{Xnb9P&Irbx;D z&R3ML*H8BNG%Dstd|8-JXb^^PX(8okQwFVIHdL3#2Ft`ZA4DXxypHi6WBrGa{zHzr=tJ)C|CEm<6&myOi{> zm+WR3WM_n51!U>1X}-lXgwuQ2NLqFjo!&^3Bold7LS4tqQ{g9DY6YM%U`9vb%qd-M z?CYCSK~1yF^9hAG!ij{;8QF7X>%SkD&5F))a%b>Xb}$hXuK}b~b{DA?E_W^>tXr|R zz*RyhmINXj1X0fkhqMfR@|sEg2yM})vxfbS*q+9A0qWBSl3MsmORotH{HI(gWz%t@ zkB1UuDx_?5;E)`d9%oe|Tke&gG;-d@RiBN=ql>(quM$zCYR@-o2izH4;*!xeX2dov`Rv2)zJ{4p5EE+_(cMwej*3s$1}rPN^pKcEXu%{| zx>o3im{OA9Hn9rB@X%CLOTBO=H2M^?#72t4KvN5lOGR3bHM<^QHpU_$}%Lx(^+1T8V(#+$WR zEra`07v&MDAS@n4bb`U%Q#z6yItg%mjZT=5#T^ zjwnz=Sz@E+2tTIw10#3ELblXWOU=7wEhorD>%Ajb!Z0gStdqG-XRVHZ&f`J6a#Z*V zQQCA?e%;`f{v03#t?qjmP$}QWa>tN|8r8M?|VM^5TIgdr_Vs{_O^#~Tyd zVcRK%&icmSwcG$^%)}xwqK*~xq+K#zMcaAdoZcEqm zuq#?Y-i!;~uVmGC;S^?Ji$6(ou%E8=#I~$+b$7-xj^Zq6IRDY-JpebEDdl0nz1P@$ zdRjEAUrw09!);Cb!jq}sDgk8vW_vS_R78x=U8*Io@QxZpxYRazvu{m$xpaA~SLy2c z{5}zk?)UzyZ|HT@7WAQ&yY>$gNxOoZd`8B3|Bt_JgFEY+d&Ft$;dG$bJ3sfSwGxa5 z0*w7}r{gk$Q`l~KcB@9F6t?)qCE@BHpI+Qtojd&l^DQq)n9m1JtR1svY~O zG?Vy3oacQm#+|R#kARN^al$Br;An4mf;x{d!@=y!5f4jFZGMMSQ3ezY?WaG50{V~5 z=$5;Y*dR6T5S$IHcrcbVb>t9SQZ{$($?0!XN=4|Pp@Zmuw;e2br87t%yPByn#aB5qtYwD#}LM3 zIasA!s93Y_p9!~mz78jqJCh9wm@;4owPsEKdfcx0 z-k&X}E?M;UH$D>gEGO2jpfhZ-!7ft)!kNSBk`U6^z;l5F==pGnaF#?pHie~f>k9)? zsxFBG@_@2UWg6mz)r$gJ8s|$?=BNikCQkS(5OKaH&gL5HYe^4Ofln0s8qDnMFtZmt zBgOh87fQ7mk5ZjO??>I>$*|--qru6{GO+!`yaIu@Ey7DYi5bA9oIHaCM6?*Vel*9Q zugfZCR`9%7O5d#mC9W`LQf4fGqc89Q;N9>lNyV}qK}j*u*(?UhH7EvcWn?=lJ~O11 zdMR|N_3-qeRTQIGCTS=cCis82T93lTQVX+7_rCsgH*6Fw3*ZeTu0bEO0cs`JV2yS+ zspG#@fDM%5W7ZPLSHab)OaJKT%qiPY72+H!agR7>wOGZTmEUONTb<1V3X(i4qONMX zID+sH<7u2GWurN*)lHGW#+A%IDR8ZZCd&3$Q9*?!2F*nW;)|FJHHC^on#`3LRFPO% zWlTGtC{xXALdFGz>CO8nibQD|+KtsOZd? z@|LK4vMdgal2l4KWbM`g#8dibS$UDWa{q(k=ROpGBJ0x*r zvj;Lv*S&x9%Oh$+H53jiY+G0wnHjvd8=QXt&Hs<5a|+BP+O~Daw(X>2+qToOZCihA z+qP}9W81dv+xzTu?qj{Ix00qdYn+%ZkiWNUjQ4%fW!XNdLPv1YPve%}-f4y?{dZ#9yi%4qCI@N~ww4r?DkJYp5%B7foSc;;O zhJT;&ZHfsqfnci{RF1hk7AqgQj0s^NozdEB`)St&3lJlQlpxm1=@3uQ4_a$f24t$g z^ruY@kre_SB$C8woyAPs!tyJKoLdv%4U*(G=0C3V6VO6T1+NjOXs175PCUxbHF=zV zlBonYs|J*HxT2a4Xd1iz#$VfL*(@I9<#9pl=t%wE(>?jWQIIE(+}(rC*}AWD=s#MQy>c@AfSE* z<}9SnAOjgU)bxaf0=n?`un@A#UW(;*X4_vqS*i*qWWEZH2Yk^8-~I?kaz%tQqDQh@ zx$f`9)=IWr%;~Y^kFb#`QrMl_B%PB| zpGaF9;`kgEEhqd`tZ!aAXUq_!zKRqg{U1fDp%X}Eco1Mi8_6NY1d}9U8oVgO z0n+-xlVx_n3)^L0BDS1^*vo;ca6u#Vpy@kJ9k`}kV4%ZxLB@V7(wLamg2Cn ztxdLK^_#l%k&$ z(;4Fz!bmXIf;;tUInMS) z8Bptur;bNzXEBd;Pp-?i5>EEIrol1y=D7PuQ7@j&2WsY~;lQ&7-x)*xhEwZ6whN}& zp6jnm2p#7WTTJ(U4Mf<$#5;RT8L`rXu`^w!WdQ`S!aIE?>|B}MxJHLiD>p$tUS6!I zfW|p{mP>g#fY^q3zUz37T{Vhrsq-e{w4TKRdx`Y=qIacgxy}gXLk*#jB5|fJoDPzbd2vTdNDLZL2N?u!=w1^h zZJCrHW59FZ7ekOh8(EQR(@gYk@blejp9p0s664n%KvuR!loFj8`mf>JY=63xM|zQj zZ*>HQwl)w>ai0^_Fl@T2vgKX}(}2d93hD!HkEVZ=rT-_j#}hmr)Rxo?r9z!7t+`YM zJ_k;Sg_ob6OoIu6I1(i)P0Sc>AlLJ(mu|qei3IW%u5q3{w>uDYB+`=jiHjOxHl=R2 z*eM1Ez~lM@ECeTRJ^Ou|C+^Evt`DsKQLVxAtai=|t^?;E9T1$u+Vg`(xU$B`24o43 zi=m+wyv3$mmWkGHa6Ak1>^u1=<1i{OS5Nmm&g=15sg)FLN;UOHza=P*5}J->7EYJ4 z{IN4g?U1kXvk%A1AA#TY*nr9!Zg=HRA{j>&00AXL=L>$n7GH=fN6a|=1X1K76bV={ zek`YltQEXImZ7<)zSHX+Y?LRjs~Pe6%LIFYeoBhFzUP-pu4(Qlc;&e6$KLjcEN>;h zrRnpqxdK0AGcM6In;DrAvre6Rtl05)u za+JL-J6tE+2`PR3QVojb3=C;GEDmA`Kl6s4gCr@bimTY2^qydAv?h_Zp0v&KxO&+3 znZHip^*TKJiE;V!`juH#xnH(Uun<@;{aCzRl$xg1jnjK_8%RmH$WIKq*z+@NQldVT ztJZr*Ti?amJ(K4P)Mg&+Wq)6-u{jC&vTC|&ymA?~*Vi)_Tt)tz+SFn0yOmERlr`lk zL{ie+st0M9hV0SwMZXI6=53$BbT3v^tkmVnBZxwxRkNeG_PnX386BKEkW@a@qiQ8&xuDx%H42asU%d`3gWPM(QA=1wtH>gz|eCM9PIHT^i z8*eNz?<<0|>Q^BrVHz=JV2~kD)Tre3+sLSjb^=-)JP+;6sEa2E`2R%OBh0G)U_5sv#SLPEmKH2(>|T^e{4+N(n%A zZqxY1<{`M|FQ`J8Uq`X14^>+Gl~@#p`s!CohZ{vm_PS7GbM2%BcjxxOb(Op z&+0;CUR)3bILTmSyeL}?u?lZgg0=ykmDhE|>yK0?0`4DLYd}XxBnS>$CZo$m*6vDT zH$~9VBO+%Bp#Na{IBUoa*fqGTBWkoszX5@Q5D(!dA@SRTq;@b)|GUD#^`r9f_up{m z%E|XZ%TI4L$zZx=w5ph;R5R4hN`{g!;L!Qm5_(Mcj4Iy#Q2SK~V~L~(D1-sR^wN%j zBN@`M__Q*@c2P&UWM$KsETYm z0D>)puR3wgmH5EL4XuFi2!l8bp@e#hANjBoM9S6Kz9 zbR+@rGUqYN6DChad*_Opg_t%c&QQJiq`)3#H?T0N($q&wqNHckKLK$PXx*#`g^yE0 zU4Lprx>|hF-!oq1ra=G)TZ24h9TeG^$?tDP6v@yI%viGb+}fSuph!Y z(;o{R?tG;XVuXlCG<+N(y0Wld$^Q;0CgVajctqs#W8s)A{m>4c@Q2o$;)ZGr= zih(g1x13RlRBTQf>_*#}sj8Lb@Ln!@qe@;J@vf`)Kh9osz6bbL&y?S3HI5{Y+l9Gn zK>(@9b4|g#cL(~wS-ty&v)U~VX_`b|3~pOZTQEe?7%??@y4o?}!ixpgvpmcP4l<2Q z_c?$U_Q3iu;crY&csai#7qH^WWX-u-KWX;A{v1}68qikau)Y-Zv{S=*68QLB55 z$g1W@P9?4Vi~!W(OqHc+9S%jyC1J{)m%6?~!CH1AbzU;QN{{0MJYK0-Ey{tjCZ4rL z%SM((y_S+Dc64+=F&gudk^i!mUUz@XnZL-EEbL8OmQ9%~U$d{9CTl*ps9LbEo0$nF z&ci-iH-+Aej&JUDo8D+uend~QZdd-d1NTxxOMU|&f$Dpr76mJ$uyjZ3fD4^ck;1;1 z?ArP<@2r++j2xR5wRMLngY@Gwv+5wxfS*_tLRWdz?|_@T;YQ~)O*D<1C`SidYxUu} z`_h$j_A2&zv?+DR12waR7L6sh0CTVmiGcXI^p%2X9ze!sMqIPEx?&#{JzM3)my4ah zb+-l3*Z;7s)zy7fEi|h3ok{@SaRNCZBZcb#S%G@TmXz0jbrJu1epJo7OKIpkTea~k zQg6Sizjv<6VQ563MQ+=AdxQIQJvqkOyOZgugsgVpA-au+Y!ky*s*Q!3pjjam8XCYo zNGcK=<|Z?eG_n-~mH(ZajBbDm#njCJj{^xPd<}t2RP@yeMyU2hZgRJsh6@54HWi^j z8K8%yWHR)#Yzl(IAg~DV5dAGl7k#TFDICng0PU+}% z$smuFayYOXr(;Dpjhu9s#88J3i+L(2Sz}-ML_mPE_rY_V{o*$brwUn|fqK0-kCP7w zu;n)E*b-d9U(LBlk3q^geSAy0Df1`gi5{NjYQx6E4(|$)(r-=ep;uJNsNQUDSRR*G zzMa+)V(FT8-CHWNUxB=Ff&RDg_fHT~ba_#{G|2tSV*}$mnE=`94=@U1FZHZu>=4!m zPrjSNalY4`Y~b8$C`%|ktzrP3YE%IfAPM41y}1z^ig3Q$dpu$7nE+Mt6K*bn07vj4 z1;x7Y5?G$*+_=BGP+A{3+wW!6-)7C(m+O$4B;;tIqjPqj4H=q}C6*sOUfGUbyMfe# z=tbWOPm&#JWEEe+`xGq%v=^O638Z=;KkVZOt0ke(%$muWR3ve?CegqIrSJj-!j<<^e*aC6m7q>HR>=rL3W;D@S91^P6Pqb7nJLz=P?-%ItNx3XR+EZ1;dj%Z=@S z2f(dJj7W&Fh%Q87tkb?;UgnDrS1KTxPOfDmEk~5gP;tw4wS{Mqv1U8aFbjS=L%_xZ5U2`;R%n3|iPmtMfG2>g%3z>~<@@ z-7*T#!;c}<;?8IUNQ@%Bw*^5#4G2IQj3LkP|85uU9ZttgB#XFUsL*CsMyZk{38wUb zR)i~}PEhryiKiJD3gDmwF=amAcc2nY@0=A1t`wXa&zmMJQ=`vKX@fh6J z+#hQmhz4^>-@b)n~!;+oh zjmLNVnM#We$=amaHt&a$!hd_f-}Vt8ca)=sCd6dAmqmh|i@O~QIO|uWbW>a%e+SJk z-0U<_tSt5D33!%PdP)VjELFij4e$LKuvN_?C@-kB|iB}w0p5R0b3kfa> z4s*4)vpboe&&Zb)iF}x#&aB9B_|u55kpWcA8NzBL>U4j#*uxHB3bZ&?NF19|$-SSx z`5>c1BxoG1o-2WRvZ5_RS+EK1M$_Qka{OgF`pviL>yFbpAlEwRYskIPs56T9=&6%N z>bV_hozvN~bLE8(8z;E?IzvDA(kIfng7^AGShum}@uS!NXz&`yy%&9$bC%fY+j|Sm zmGj-3cL4Q#<_-?XAYSkuTJ=gQNnzTjc#-@tjti(vzOrsaJ?Z`6uZ3fFyD0*%HRSKH zEZJxxBxr6STESb9FKao9@b2ci+MalBJu~?M|1XSM%dS!k@t=oFJ-$q;$Weo#l|?LK z@sV;|cw-^8qI%igMz!D1bp>i}?(N3x*G9cg3g&Fq4TM$T5DTY9$M0gRQ6b`rIg1uElv+gflL^ITEfvnp= zL2@FUI2jbK1M6`j%sE?-8x|L4?Bt9yYW2@2!LOv52SJtV^c{9hxrg-{WlDcqLvtSm zFinU@-D-r*$&<%Q1lHN!0wAlQZQ?XG*aAn9AHq4fbh)->c2rWJ;VXfL6sxok&s0XAQHLp4mM;#f#C`5W;sobBgIT z7cEV3@=BcJrep)$=B?9_+K*9Z9TUOa-`k^)FrlQ z<{C4J*JoDMcHq>Xk$Ae>IJC4=0QJ-?qK`8(_Af`5`vtyj)sTNI&zgb~;&XHn)`NR= zSlc$zxGKP0EGh#UDlZS?YhV^h$9Lwh@>1wvnx;Wz`xTj_fj|pQ{_t&Nj;cr65_Lyy ztUM}tkHLvPKxJjC&eN{EWY4w$@T!FK+f@2JetQAO@pfGC!nOblm&P&Yc`5tGk6M+8 zofCA(M6F(_AC$lE@~#?~K3!R8UBTpwVrk4Ro4}6OeEhx;smdUr23<*BMU1A1i3`1W zwUhv|MC0lf+)_Ty=~1))z2sE6tKdE>*FV7N(6G~3oZ(-|Q8~CtKUd)Z*dYkG+%lTT z;aW29Dc4AxMev?@0N#NK=u?HO5k4n!)gE;)xXuY$eHI=zokpCBfyf&Sx8h_Q*>g`5 zr|sf{?{fp}hpgU{C(a4HhoA(dT&y7;N}d{uUL*Lyo*}l?90c1A(~k%{US*@)E3&Fr zR0>uO#Lklc>uY6W#GNMx7)mQjbrj$FT{{*9ztX#<3Uif2)F^6}*it|LxWWAE68P>` zOFk)dslG4H+v&2XW<%kp?p$6oOu`gkb~^W;L~5cZumV-cA!4;g#-cFdWU($db)a%l zfB1++?N1{C!)H_8b%R55KUHyfe>>=Du4ylzcJV|&{}RVP0upQGPO|jF^itxw(|?3q zPb*Bg*RR@9+~^spb9xWp6!TGa_hfy8{_l8`{!du%pD!I!N`ch;iSS7-^$-l42oMo) z%?2T!!de?UxaW;cbo_2wJlYbD%YMsC;KeK*CyOMWm}zku6RI>tFvkr+#ka?zIcx{0@YwS!_BU<9Jg0iJFBD5wn znbTEam?_!Nr_`s~;#bhiBR4gThX**grM#wpMb4kmqM;oWJz8jL8Cabg@o4D(5bFG@n4E%S`nya zk-pp>?cwullU#{k<=ig~KFxi_?(5O*t%PP~Mpjl<$Dk6BH3(X*0x+=9AcEYcZJFDj z?mUS3Q>7xg$2WquO=jwXwoJC5mJ^nhVgYOE>xHo8A$yH$eT}A!2w+j(1%Jw$p`K~v z<*7zGK%K#+2$dTC_CbkVlt?(ESMtmqxW%O$caAn)dcsGgW#3xNAg-}05=RyBcn0C?`A}Z3Wb-U!E<3WzRb)?> zL@=Lg+ySO=HhWzE#>Xh<+U&Fs?6_^{?~A9^KbOHHfg57hQDR`=UmvfgGC=33o*o3Q zMX>92(xR*vsqYGzfGBER*^^XWS6_N7*?iR+T%OgGoyO^NfC+wzIS%;MH|C7)RyIL7 zj2Nn0nMn)G;S_?f`J@kmXg^EVQIz7!gLUT}PhY$Z_uNLNi0-KuK6uLc?q3(+6C56R zYi4KvtWF@u_#5zPmbNAc*~4~QR4_~rj=I9Xx!0TF#Ps*pQT5S06e33V&&fsik|r9J z)Y7E`Hw7oKfL{L`X6ywg;wA1>UrLsO4B17wuhHuBwC75HtAy{$9q4NhpXZ-5lT3zC zc1MY)1{)a|n%<j60T*;=mN*Tq43@X_1m1^CHRlBmZQ%?^hz;v(*my{%)iCE98VnCQIL<2Ncj<(teW4Y9!$=&^F%MMg(*AmBc`Q;DO8bW zSdqDKe9w@zYuOsN&7$Z70rR(L|Cv>X8nXY%&~$&OAxa_sn^iUwN`^3}MCJ&B^qpJM zTj)(9(jvxE(n|FaeW_j0;Fy3I(>5cgg4Y{^Y0MB%ZRHFl`#`7wRBz%mX8Z5&_hXdK zw6odVGbnTdkNwzRX=kutxTQk`c!26&e?tGLsboZlFch#|`? z(cL;34qql)ssUxrrc5@NyDeb}Rxt(T%!ZW@JoO&E(E zkUUTY|C2%be80q$gQdgt$3bY@pFzbjx*kA2E7IBlLSN#U*;ub>{S;>2yJDjac zr;T(d%S!rskGc^gf1Xka5osa=s6%4I1gdX`fRkaZI8(!TbeP+>NcFdw)Q=6f_<>60alWIx+{^3Z&B}gWR zL^w2c&o9w%`+$T?flED1n?3>yIS}W(2cld-Y~ti{Vp{an!-A8tWcz;#aoSwOe73!F zewLI_Y;rnyl(I~LQdr~U51~64YwfQ1{kqJ|r-n7_Z*@^HZ8HFrA%w0a2DgpsUq=hR z=9mR;5$f<|&r|Y#O>u;@{j%K%fKKPS6KRlP_)HWrK$ULT_ZfS;fCnU*C+D}_20qv)el&AUZ&9nHm#gyga zZikI`=eO@cMss_e?oKdgR3!2^CcI@|8Tfvk%kW0>lL_Qc6eXX()nZ#N*s2Le-E!Dy zLbrmLy++29V3Pf8Pb~dAu0XU{K$xN#gJNB_gj}O*Wh@~0*{pX^8Z(#dQz`}6obL$@ zV4{`n!%2HVqjzj8q-8Xm`y5`SNbd-(xmS`P0>RhltNMGQ?3-qWm1d)1WKb~H>NNZW z)5wem3x2KKs73VPP_AU#{;zGYbw-YfdkScqj;xZy^ZHG^cP~5ew-Q?Z2Z~CNZf?XZ z3u&oXXEI=|Fb=nD^(_`=jBi*`a)oe7u@j^62<-MlCBr0RYobc&BoM9a(B6SOe9s4Jj$HsmrwMlIpo^rzC?Or-XRmwTzC z=B#~rm1*k?v>e2hsCLJdH_>_J#f;yRRzp-X*w*WMH3xn*K(?!#*`W#usWj(coYx$Kg=MIv@5fxPSmTEY8VULZr-DNR zxt`=+xbMz@94KxSi5A@7jrnG2bCXT$4gxRV^0M60BI6Nt3eF6Q9v0NRFn4A#_+{7* z;i*fm7dFWC2lkTo-n_h@oM+Dq0a&yx~a!-=+2Fleu_Wc{jZ zaCjJmnH0z}paTOd2Zh~HaaS`As=a*I;LunrC+Uq$2&&|e|7-9FfCrF$0LMkCHU$YJ zrQN~8G=$KkTDFOB!fv7K&v1-M(AXzQlKd{GYuVS-R7B|HPa9_N);Y3%=R$u(hsj{x zW|yN%e;yxX7&f^NUa;PI>GC7&2iDqP+QC4VU1mt@)b)!C^oHrbRWe!puq?s~V8}@= zw6RBJgN1Hq_$*=o%$#@jZuHG&Prp9}0iL#c0}XBh;L7cNd!dcRMB!MOAxvheg!yo_ z@Am$_TMO}@Fc`awd$Kozc4C9Dncr#GV_3q}STf>?1S5pJ+)-zPE>e@?F=D~B`0b?= zXTnK$*Om}GCZwYF{2gkxgwEqX8ni#)Ds-WRSk~yCJOH)+H3B%w?671(0V(SN|-s-*{5!Aib`iQsE9SI7OYmF zh|=#ql#_h@2;gCzF2bTgvUjA!b+E8hFDM?S*DvNNE7KXE}_dq zFwXw=?J7Sh>~lLt?$<)J`j$ycN{JB9Vt>Y6)*=@N{8?)$>GY=`#U^!>K&Cy!1aU43 zZY+kriTB9d7C1I1^y~w<>KG;joVyBJWs`a(bg<5jPF$bo5fK3|zy@G0 ziS*Z6dr6CS#FjYpG;UnS3{&AqQ`v}X;IFkdo;gNbMu}a?ekSV4OS(Ky-*p#$m}fD= zw88_EjTIp$poz=-79GNWX5UPt;eNU8_`7fb)Hf6UAy2Mk@6+hfrg`R^&j*%6Wz$xf zFm*g8MJ3l-JZKez?21VQsieP|#+k7}&K775fIlAtN$;7GB5yyv8``USX4UlJ zY>69+FTJ8T_xOJSV=!&j3W8<4U_fp%II77bs8hrmiz@%{^Hp|;w@XNRh;st52a(~) z9v^;oDQ0pZy!eZDq-;BAbYQXJ3}t{wM5LN zs1?Hl3VwQuNcBjl;wx%0yIzUCrYE!7%|ix;&Jb!YgKCH!i}m71EfQoLg2p;4E?&~| zDlRI)$Zg#egi%oM%Zrwo;1~WHexq^+&duU+9*oUm8{QYgd`Sj`=TS?*>Zi)~S<>>2 z9yS8T0;Fk|!RPt}qvImSHb#VE1SOCh<}okTWaKJ?f?k3aN2`7?!bqbw%De}Uyb@u` zm@Buni5R4{zOQ-bWoz%14ocbdT;cxM`dN{xB|csnM!n?9 z+%GfbkFc`6?ZQ_rkflr5j24YzQJvZ(S*~6|4?2-S3$BT2^r=#4;DmtHT}%HJaEE&* zc>Yn9J5U|jo31P1gACKPmFAT862)U~s-jNb8BX;_Yx+mQZc)ah<#HRjD^t$LW1}~V z>V61GtWgOgr@U4KcE;O9*QbKCQZZyjhu2E%2DGAgqMwE2nd|4he|uYmJFd>((1z+V zcV{^PAV||gv|K;NYVyrq)TBZ0*?Ey-GmXv4>I(nsK5v2{)CmqS z8Sc!89d0Sz!Um)$ zESdQ;dW(>V9AvrN&Y((SW5qV~>9F%2J1}Z&_9^wN$mz06zBYgv502Iw@UO@oBPTX` zy7{a5toRfcNrHv6nh9%!Upy&e!c%L16TcsQUz30Tha2 z)+*@+0eykPWH=_jSJY4m{viRM*m9<>-dGk>xr0uRfCyC`;!F5)nEckQ;L1~F-{*x^ zeMFy!MEmny=7akrU@N`#a4j785MIbN^X6WZn~ZxRVmk`{vPR*^Ei{O}P<)4bB#Kwv z{r>Q2l$8azGFz5*SMM}(5J#|jLCS5XyIHB+42DVFowvmGkfoe@V`AxF(pW}TdWJSobu#4< z8EKAyOAvLMw|>Bp%sYE^gjw)qH*^60X#WZClS=b;X!hL=^P~ms4PVZ~5PDjKJI!yb zp%yH#k>KOiy~q8-?a-m@A>V^3+gX$Osn&6bTu82RFI$E6G+{|X#n)pS<57yF(xv-c zK?s(1N=xTMiR46k`$v19{>gps6pkX`4swBcf>G?iCfxvnVXqn3)0VZa=}=EMA*)!} z-dTO|tsInvW}<^4i64*_BP<|gms|f30iE06*(+>~FK|pxgMrDPSVcp=Pk1ajCbaDKlJw8t8c1Ks&EHK=rsM;2c#~{x9vF z-e3X6;lIE={~AV;f1xQuEo0F)thUcjpKhzp4gjV%g#5sUb!F732QU=EWvIKXa#hKh zhW}tGxkH)l5Lv<%Uaa(PIpN*V?dLU=;KckP51Urir^8a^GOBqo(^&VYA=r0N=$rq| zt4fvpj~?l7~V2;}Fv!=ObIZ$$Xd1>sEvoo8j5A?V)wx)3x%VB*xN77Et zLDB$hfl*6nj8zgHi*U7n7!#I|%8mSZumyB>qD^ z&`K&TIF}zwluL?3_miC$zdR-5W3}L`6K9s*@-m|lzqUI=l^4x*;ne}ml@@22 z;Ca0=eT_bLoz;!5wmT>D!^w$1LW8Z~``5qT3~*+kq*QQRa5~7p^{m!hA(5!9Jh^-Z6#TV0HX{gb z69}L&OhuuVwOMyCj~0xrv+i;1L>@9(A%I5-sly|jXSV?;%Klp6x#Sm-NHi;q;lK+P znmIG_3DO?01C8r>;-@XN;-auKVVSVdUyqEPa?3vVHa+?;^5eDG_rKAL8j4dPtaUQLLDf;E~B)oSX9!M#MI zR9myu2vno0TBwX141BlVxq>)$Fd(JwRl|1U!?8I-Inq1-162juGk=tTyVIhb`(brn zc4Z_ZS_kZ_4oEH%D3`y_BKi?370F`BLoGEPi@Jj?xAMhz=JfXkuZ)al-^gsAaf?T| zuWjNo&@kmTLWcZIFkiOiO~h7(UwStO_%SI$f{#~VY>v%RFaj%F@+PlBHr1m zP>aDuN=Us`kE_U>X&V=l^`ZVClgvso?BQ{g`jI(ji%mv>Y;u{ybl4LY@dT<*;=!pM zVgq{R-YF!>7VBJ+tm7GB9>6Q=>tg3wcb4(5ZflRps6x*Y_70Q8&NjRjuUGdg-v4bE z{+D_qQzzkfZX>N~33F;-_fN1}mENE=Pfz*JpRiK-fBu9irG0-RC5OdFyU@o7d6@RR z9UoX_`7;Pc(dt8k*w`{OjRkl-wd3qm8R(yxu2jMTz;8N6dIx_wN52Hu$rB932mhiT zibp@Ph`0Ec7*An-;@x<()@!uJ^u_{F2cSZL=gP@^hmlezo=J>dHa3V+0U0su@-xxg zx$x_9QZT$y3fuQ;$0~skC+$%Qez2maquMZV^JXfK9hR+$@I;MDo%RUjgDMCvlfzzv zYrP=@NaB~4>>=r3YdWjUUlZ0wm40xPQ;wwselxL@^C`j6ULn#h_3tq@b=&twzdPtYna|6UTlH4!(zc^BOdB1>8VFib3cpc1g zNT*?j;I@L$q>U+){_;qNPgaQZoXBlC4KYywm`Cr-P{fP18<-bxObZrs*U=I2T7Hqp zsHuOBId0|+5>ZfYb!_vq$#+6hPa;VYmU&ZHJSG=g^#~Rd!C$I1->Zk5Hm(FTO$kg3id(C-vaZ60e<~cF`+;%0&Er8zk|d9 z4UJP{%W@k6evSc6X;rFebc8c7+Ww?oi8jQYfB06EGCutT&C+x%pC*N;gsrctB}I_v z9cgBd!x}&x{L`R`@Q>WIdO$2juwFm0yx)mqaO#&7JVhiXJw~qKNQimpQasaaVYA{A z4zX$BR7w`r$>Yat*w- zKZu`rpULPRVK3G#XH~{UjIzeV`w?27$c@OBvBY>Nn#>)^(L~P5FsJ2I3vqW@vt>$q zJTUn45?1D5p_?xGPXb-t?d_U7DSBgg>c3=FcF|EYb6@os?=P;;k4-qD1t+e+Y=*;O^EmrwpF1gUK<%f1|kT#y@v)~lTz2yak}C=yo?9i znue2USvSOxqi1SiRmEoi<7wXlh)%9Rz_kMMS0t8W6)bDDRT9mIY$G{yDy46Lnv-0h zrm{eGw{@q@t8q`7S4Pq6B2N!-5ntFG!JBk7@%6j{ZL7m-ZMvFL?pxV(;ip3$bu?9l zH|S`Mifbk`1=D?aQyIBYbD$eq^&;V~jjPjo;6|2)(^s5qW-``F&Zfx$rQ35QC-5Ac zZUQHg0^;)&xDSMm*I}wC4#(~&3opE*Ts{!{79x^F9~6JJHo&jQXcFiRvf=5^T6TXB zY+;sakq2vGQmV{W>`bUxjR2uqA>(6jEc~MX{WjhFU#4b;*$POp0ECyxKoKWbs9yr~#uIGDkW3;KGfFVT>v@>%@CnBDX7et{M3@idMjOvo zS>s^39>_Lr^Fn1hul@jczu4>m0`V2v9YdUsDzyDs(N`hoIi#af zSomLBg`_Hd>o`MmUULwGYdiURI*?-XUTUHHw=9|ere2YPYXih#GDB=l@vzFDHu#Cu zU^K0S&K<6v+0}|P-xqFASD|AWA7PhN%`!>64ksz*2N7Xp)qWdyJgoioc@v0%HI~N} z&0>aq-il@&g|>+Lzl}5vL4nnpS&!W-GK|>Weux3i1Zx z4yxWE5nRHA2QrdxUS&jAq7{}x@p5U4!KcK$QggD% zdwD~GY9ODWBTzSgg@V_9ktgn(hRlU1BVXte9Dqy%SZay~$;b!~3OSBZjyRoOR}>kP z%%k49DK&!ckx!yHoghX^xvHb2V39}3ZLNaMA^u?fp!^Ec(Wd=R{(IWX5P--tX_XZ= z=AUmNz5;BnnUgZ}moPdNJ-%TdcqDIDCHwZX1(j9Z_Y_PEu4S0G4qoVl7pQD32L4mV z!|MRNpc9a%X=vWDwYNuLp2bol|18V0C4%)PjQno1dLn^yk{H+D9G&)+&GHVyqa^(3 z(uHtlQRdMW3g(vqI_ZTkX21*>1u*0yu(``?2z1bzyaM5*3p8ladA76GS=K(p`iE|N z&FmUAMCavc%>HM`i!6o3^NvfrSm*`Xb>?~4R_;a58R)uJn_Y$Qb=9}-f1dOK8kM#& z(>AJuXo(WF86q=O6!Esa9&awnyqapF9!b;x4VnFW_nbD^0RLIE>Fdjj*$N{QzMe^B zjS;o7q?$TftizKE5|g8j5fO!^%lvo=DkhMn=ZRulo@>)Y>|gij@X$SDd9cu(Q9(2Y zr5u4CGj!l+J!Kz!=ox^nU)OR(UA7Q7LLRFq7-<+^Z0^2dno%rrSz*yD2aX zzl+s2mza=PWJ#{or_2^y-Gi4Dz+b4)GE*L+ZVSWAfy*Ym4Yg77drLZKx}sqgNVf zxb;PFRGN4}^@N+q*YL*L-4|<}$<-d`(dNpPo!@ZjLRc%o#`4LEilc!Np-Wcdtwf<@ zYjd-QNpfpds9^G`tdS!gkC|0O?h@-smr;j+tIW*G;j;J4odBpff?mIwfRejYrvzGY z%unc5y{Fh#HFkEKz*WB8(taw*m!Ra20UJhZ$n_xK$(d1A2iKX1kWwwB$Ai5T8F4by zDr5%FR=8&B9OF3JpXHmQHr+xWy8^vcE(ka~{JEPlBpRnWIq8(YGG{FDgdCOUZoSO_ zh+<~U?qqA`9lko*x%(;AkC2ZOR?__rUqtsXLgAK}c<7nX3JT!j2Jwn2M>F*O8h5Df3rxlx~*U^H+B=?arG>cA-u8s)Y{_OCV-S zM>^Ec!M2qM$!`v;Y=qHzWC(?K9*v!wF6ba>Z+c^j0LTWjiBu3bD#mqMy!r#rn47j$ z__~YN8ljN0jfFUi&X&kJlvlofBBJdh5@ZovtT}NG`>-p6Va=~u9kg`+lL7{SeH{kj z%CmQYT`xQya)%)1d(RaoyyK(IPClrt_TffDRgNK9C+COEu+RLTl=GVT^IK7w>%AX+ z$5HdYr!n;flrCCpAe!kV<&O~ZpTZ@p9om{BtJlA&u%4YHfvc85*g~}Z|0ub&7 z;uwr9uPP6vUKo5Ud@&{2R&HK#Ygy|1|Dr7pHU7_x^zYIK)cKztg?^UN68?T}nKID2 zG;yoXGZ`lqk_MLK)I|NdAv4jaM^TJH1+&?Tq@Bn}w+q>c3c+oI%R5PH+%xW~<5(x( z7V-hkC7w=Y@ZYvs`Vv3&ulSJK=*wzX&gQBU1IEHZFAW!5mgc~XrC2DpLa;H~6}agf zSw$;jQLCH*q%WOX)l3C%hep315t_f^%uyt}P^qARs~#|JEIlzlT6%L?qfHlqkd% z&X+tVhR_gmlqTP$U(&i~Jl=%vN$o4ivGt_DAFWpZkI4J6HlNl*H~}`eS!r zI868ebVhF2Jfc8>IP(zr6%zYS4vV1of9D-upYJBl3?zLo!|$^c>u117=EUyIpoihuJpDx z4-rBGjYhCJ60rmr=|ME~$0<8QmHJmx4Gp>ggVr;f0pwF@8nYLqCBSCDo&+$;q2^Lj zaEGPiOTn5=4chq-S>*ciMHoRml<=7%AqhrQI-=B6f{0n=o(dYN!~~vh3BJc$C|?yh ziDHvdZ^SsdYQ@$Y>9AX4i#b>gS&zh#kUaei+f!)V2LionDE#e*jD#G(s}F_g;!JA2k~5L5H%aPo$h~9l|?q$?Z|SD z*2HqnlA(Wau0{{_W_@Kgnh2O>M^;texFmNA+7I^BX8NT zxO7(eu>8Naz5*zYruq90Iovf6+}#NlG!TNjySsaU!-BgVPVhjG5G+7&4;tJZg1ZI% zF1*kCe3iHUbG5a#wZHC}?(LqPo9&rSIO(cl)BC zjhPsnqsn%!*(I(q7jU6q z5+rAQZ7=nT1A)K6T<*g{=0aY+nWm?o`b9{$|hBk3a za9%oB>e0%gcy-h7l6&z{@V%r$N38fV6cF$`j?C4Cj%z}(=rK8Ei27F5x|M9XyN^mD zPs+dk%Ea>sdk||RyH=k&7d%L$;(8ez{hLzx5Q zroeH5X`>l1^qU`*zrHdGZIG?$G^jc#_*S6RrUsZoWKHQ4(;+qc`t$7YUd6KIL);-R zo4s$xl>8K~(r<^jrKvIN16#eCk*UE&=AXQ%%}KloDciC<@O4f}kr5u>EwhCtckO;8 zWx(00*X-|Qu)zCb**M%1d}`Dt=6MUk!!vZ0)Cq3~$J(r8M78tsnC%{Tl z$nmvk7}9oTm(7T89fIY`rB57ZS=?(fRSiJ3UO0&@Jh~`l<8V3pMrjfH7F9~POcANF z7rQl&5$Nwama*XTRI0z%AM&Ug?_EAvnf~&mOzpQT(=}Y!NU&T%zuge7=086RyBNc` z22%*We?=;P&CDr+jX&%y-t;h~5YYEt{K?oUO!$rUdRINK>FriQDjm22V|kfQ z*@xBd{g-w1EuN~GYDHx+rsWpz)Pj6PWx)!s3`CVfS>4PRcV@PTTJwQA$ltg=wk(Cy zY&lD_t*zW00JX#Q4q2kLIPl4XVCp^n{ZF!pZSMFK@=+5Wy*OX)VQn7+TJK!giR3)C z^IX;J@Nps5cSS5(^Qo!|Bj+|g4tP=x@tf-Eg-$hP3@T4QDRYPHbMJT0^JaoiXGvcO z)co8TUAiLaIXQ3edn@&{a`m7ubM#>_PpGC9781e(l7h>hpm%|c@PN3%=ns6wKzku{ z=d-Zk;3%o`I4Km++Yow%g>&AK~cwpukmxySg-Tsw-7 z!E?+`Ur~1k>es2f>q~yuitD0eMc;XOd@(q{e5wAb9*)s@{iT~v1kQS|sYoOp4w0NR zp&ZLePJLJ$ar7s}bv{aG@M@Toq`@*J1vTQmGhgfjJKr# zFZO3uqcHl&R9lN^NFbCgM04ctPt%ZY&uC#XBj{F%%q|2RKIkNJlHb4vEfUMkTZY>1 z`F`1*Sxmm~2{VK_zpZe;&1ORw0HR8EpVfMI)-}&a()25{a#+$1ha!I>|g6Ub-jrd zwCk~9>FK0GTL8hMIJdNw_oRMiQMg_`f+MSNRF;%V%Q9S#JAr)>AxF!tqanH9Cp9zh zseuxMV&+GJq`Nl2T5YvP#)p^5>sS;qqq3oWcY1ZA>@x~2h)I&-=QR9Fu#WTBJqU<3&W<6 zDYeg?M%5my)`0Q0hV%DiNBGf(hp{Q$$>{IjR=t9`>&WlO5cvnz)4Cuvj$v}VWsv1Um*UZp5aZ3Zq(=UP z8ta9oFS#$3frp}{^^yN=O6b%VN^@_WEqxy)RTShva94q0A&l5GA$Z2B(K_ZJBzbZk zSdz@_c%{xunGapU zT|3I_5k4<_v($UDf`8Xa=R@K!>Z_H&5YfeVvayXf;wdoMd>ttKq;zaRzr*oSC(guR zYIiY!u1~M4RlsX1%<+B7CF3&1%6p+n!Ohn(qvnh6?`|3qc;2J!*kafSK=K7Z(%@Zz zz^$&GtDlv##o?q2ikKzkW!vmg6k41EN)5I)QisHwBt6x`xSVGudBjPb%qnIV5|>s4k?i?sqZ zX?OIe^OK%_QH0AG?Oeg&SfzMt)Ob758!V*^HGYi{1yV-ANH|GvanR{@OC`Zd$f4LA2bGLv?u=nLz zQw#c}jNPh&FB)GUi7V_k|pYiBYO73PZ0q)jHr|ZekEO^!a3@! zz^f_-POkd37W;YT%;k3UQ{xV>Symd+Sg?PWH)z#M`*Y5d@eh+JVs2~ow-Z_LK9D+5 z)s`L;9?2t#BNJ(Xg2NuKjbOJ&6FwPBD!DtFVcy@U+)1HG%EL;Yzl+UK5ndCbhbz)| zf?C4P4EkMRk{D7{-Czm!opUIR4s--!Y|0n1Nrlok;FAdpKVTDO&xGq%F*D; zaff8OT?T6Slg%r9e-*HRV=2@-VkJJwFpZ8ovhT4J^j7A)4$nPoquwE8nSPPvK7co$ zXs-UFP&{j^0!x1XPKsp$X;w#=9cbD~}8N9%Cl=1G6(71#i6p6|?tEhfdNd znTa+Jf+Z&KYUOm_P^*xaz7ZRHYLyuHx}oO6tgbynFqrD?u+kWRFXXr#lkJT;gkRrn zjn8^P7m3Zh^&#Pf)dxJ@S3$qh-)Rzr9!F=lpJqkaD#VeglqNHj?B|lHdP^r4w@|MQ@$xiEd{R zQZ;|y$SmOV5iv>6?aFw0y-S&5hNClXsb~?OA^eA(Pi|$*SY4Kd#M*vpkd@pi2hQ?& z=E;Jb5Kz+;$KCzl8(tL+18k58d4EgCg-(ZW{s_Xmz~r%yo-eF_x(n+G_)&d#JYV%~ zB&7x;;4)I}Y|TUR+RgZAVz_*?xcgX!mGdIhaWU}58n5yp!ApZg7LSU4yq~4=40M}N z-rq*0RvaT$4|d)btZ0-J8PXmiAWi!6v3G7YDK;X!wfUvB>&9rf6lK7+;v?6!n-D%f z*zjJ|pKV45BWu|Eb*1ApwoOr0T(5UrSDY!>yTgq$yK#B5QIX#0N3Qo%42|ajC{_?@ z-tINfbcUqSu=9I8mjhlGZYro|m)uJIZ3PfVpX8f`Y1?oc-8yMgN@z1mQygosN4F*% zX?noutb|WvYt&pNLw^QhCnaUf(#N2Ea=oLWv{gLwOh)o(6D=R(?^ckg-)LAC;&bj` zefio)*|D>hbRtBQ$lD19*1B000e!l4f5c(c?K03oML~0WCB(r*Qe%h291i7z zu0STAhhQgh6Nc_YsXepi9+4h^!3P^Jm1QjbJgn0R%K{R2BW4BSZ6fQ)KyBI$^Zm$FNh zuh1h57MzNkhD1n2mWAjM1`P11yev(Fq16S{Hwn;rkcsLWcWyE_a&<~^xod%^L^~JQ zs+*IFi9^*{5xtn$laMr4IfAX5Vh+_$iB3@yz1=?u`v;wC9XJ^K0^aB|5|^#}T1yJm zy%R@al+&2~wmM`$>ubHjr(x6>KWu)9eLR2>h0C@qPXb1uj`?Wo1doSJ4WyzT@JM@< z9ALR^cl`t*Neolk=_{=V_-#pw!D%^(eEwYZQczQKI1Wz; zKwERPPEF2MEp6G1%qo~S=zP^gQcX_Zsf=nLNh6b|aydyXc3hf9uF?*Ell0XsvciZ$ zLN>?+>9et9ND{qbnI45cHH@=G%(8e`5{&IeRb6WHc)x~3)w|?r zDm;VnkTStxF7=))Sqt`k2V7By5*r%qF4#8}C zA;PQ`8Qh~3coMHp*wRYwwLmAfRHwsSneIb-64Oc07UGg{r8`?1@4TH=sMCC3jiV*c z)!zK<_gJ@y;7Wc4=*Hb_nZIMXJc5mm1YJFaRx7unTiQJ>mb$_(1{Ym~{lq?6P%hpFtj2E2?Nf$oQEn!=lgFFz;rN$H!62_|vWIr4;MIUD18cR&t zpC@8=zP?`W`q_si>1%i@zs9U$ppNkoa5{Qz;9Bzji7Gt1^)3@H{2?8KB7*V_;icaW z)pl2_C7p?>G&f@heLHmk#aBcUtVnv3WX^4bywL+Os%UWyaoA0+82M_l`QTzgWqj-q zt5qu8HGeb90X)wc^HvtUMqfahatOOIYU3EfsbT1MO$EnoF(F>SE28M2n<>J`d?qycIv z&e5I4A55PZUyF<2bq2Uw9=q?oIIk%UYX1G>hhV(U3pW+(r+G@Q4dS1I$_wUPU~bgd zRQXE$h>Fmo3`SZ!KCx+^aqw&Fmc5va+OJ)R-7}$6$8xjHK35CfQwnd}WQOk|9uH#; z^nZP32(pCTdx1Sl4}4Yd^9zcw>VnrAIrt-0O+eo(Zhzt^W=B&j?%Mfvd9gRJOcl%h z`)ERv-TAwS?6onwZ&7iu$LP5Oa=^Rb0~3J+>xrw2PFjE5dITEhV(@}#M^4Ftd?#J; zi0-Cr9vH7^Rx$C>$rsv* z5PZj=fw6i^efCb2u?C}ES*2(l_$z)kK?2&Q+T@EYMo$5mH3+xckB|EV6 zh805j9#FPuL+o=rU@EPCV#=hxyRChT)ZJpujc_~HCAxYS1xH%2C9q+mi>z-yP{X(- zqX`}EvDs%dS6r55x0CQ>U*Qx*?v0#JtI#a`wo7mnO8B9Z>Y(vnFVn;^XWR8y8O0gY zqBgRSc^{>k6LEWMFa0y&HMp^#3(|1US0wF@oq+$A5)4@{I2T??MR6{y8Z<0`Z*XxZ zRv(XN7*V!fJ)@pr2xj>pnBb*DQ=yk5!(^ydmOSU<*{;-md@UqkNHjyMhtd%4=ur76 zcYev@=I_$Iaw&3lIW;_d1V-|ql-HcpIV}AYt9LkYR6A#}580kcfQU+i@W3SkPQ@RB zFGek_mS0tE^NE~dnCS9V=(VaWdy2$bhd0|9MEdr#n+O{pc?!nzm#&>nq$QD0*u2^m zU6&;JtR<9tawygU9o1BREr(|KmFV+Vsoo8DF6=p?G_Xo;+PD!9Udj&0*{xD+@85zB z-XCbH3kvfV&FJ+&dlK*2z5 zk=d1YdSq$<$IiJSL+VYFN<$X3Sp4_sw`&MeILV&GSlYD+8Ja`xWa-$)#>Vq*t57;CYKT?(ZKK zGp7y`$n%I8e*<5{N@Bh&;Yr9vdPEYYU0`nK@w0zKX0ejPvHtRj97jZ|C}dEokT#eG zYX5}dG(~#oMJaI6mLZiWJo+V%eUWdSa=)=o?nULYvW|2P-V1so;jh}J<*jP?ertYU ziEZluGZ`&q>+2n`#vH&F+3*pp$8mfv{j79Qr~^9I*pnEfZ|u-6os}W&!qpxqlcetB z4|{B8oXio=qtbmDC3xASRobMJN9L6or1BagWR?+aAJ>kdtGXYb#S|E{`u!7V$xj3D z&>-%5yYPPaU9X+~c*$XeV5Qt#&a1lY2rhSSu(BMhd7AJhtnvwP$r?78rDM!?GkR7z zA$D}s=B?z~bV%vQ6xRy7HIwYzNFP$zC1`zF`{8D{>F~6lq12Czj9;$-Weg{5KePr| z7l)bJg!cF+XZ3=(@UW@d$HF8J<~O~^ za;~zH-!Uweo4BBZ^9J32`ew{Ij$pJB%!ljV_h{`*Da-=u!3(;GA>6PCII(xi%Qp`(T5!SFDmFFO ziYlr?Ar_HRmDGpdZ9IWpAbMrC9u z43`0>eRJuRJv0^aiFVz|sV6L@Xd%Tf?p^B#FQ<4BXj(lVA=i+- za;7IoI@Tjg`Wwoo*S-9%U+sH{h?+`XDifH-w=``yG^!4-cTuGsRI`lgGbij1`7NlN z2yTu28no^+@RIF2_Zw`$JyxbA;ri8ou3wu5T7YKKUh#tfl|f|SVv*vNH)WdiT^d$z zXfi*g+>Y4qrA>b>v`t*L<%@iM!h7&E(U)qy_sUuN^2|=|j)|1Z<@AVPAoPLE{EpZG zy4S`WUUsv*3(VmPnQf|9W^)i7;L_ahvC`<;m}ki@zlO=ngx6o-H>f%2(8|$X^8^n1 zTDWP_@A%Jr3Ajad2|b7$xAmG~v2lk{m0LVDhrn7tO{>wGuTIA?SEsTrki}IWh|o&b z3QMS_Y>?)Ruda@$Da!R_0l9VR9Cu9B9@i-HQ*5~e!{~>UNX&2vB;BS-Xm05MUA(dc z4!NQ+_FRl_KhI-~H;JT>tQdl4VFgg6(iQc}Qo9;^^TFfGUoZ{%7fqiq*I7^9#mQjQ z>b=^RziBka7ON*Un4Rr_qrSD=Otwi6UcV-W&m!4#$EY=BNVK5;vAN3{v9FdRu;)MD z641wlxeFiTe=1<)Hk*yxfbUg)jv6EQcg7L?x|+pG=)m9}df-D3c}X=fNVW>-2Xs$+ zh_x!H^Us+@4aEJtwSE6&fZry>Mh*0i{7<@))CuZEWNZLnEf@fxg%ba_6atMXA%SWj z9LTmBhyw8sRAles5rUx(!iTBke!i8o%#8_=Q3s(uGj=ei8@+}Ys)O)ET15c>MM-fb zRW>h27gjGTH+wrJSs)B9?Eg=b@=E(5c%KCT0DA)gl>e<1dM*U6D<#1xQ$rfH|KVFf z%7KdwIZy{Fko{=|V#~lpD3qcV3kab3i_ik1tpVbG1~q^;bg9t+09j1{fbK648>CzV z#QO~TQ6vNJ2+ic@00RJE{RK*c05w76&!89rnXh+H{qTbU0O%5oe@h{d7b2<&Vtxii z>bmtG5di?@aR30}zmPw|*g>K+L3GccqLw1~7B~RF7Rn9dFHk(>rzVK>85A2jUV}ve z0GMC`0Ze~^d?4srAc1F1697WC1R%;GNF^ZU zTA)`3f3z2`d8)bz6*f%me`QV#2mbTS>>aFK-Tv>VX1luq&7&9;!i_n$hmRnprE)u0N~|cpoc{ugnkJ~g8sM3 zK(KT`FM*g4Rvi$~pNPLXNI)EPKupi(JulSw(gA99@itK4U)8*j zf;j4c@E{{PAd){0a`=b+00dbV#QEn^SQmOZ57E{AM<~)PKuJF|s_xna2!Ja2-%BmnT@U&tRjxXXsz>w@T>MW0yRZ~0>;IA#B9Qm3^r5aK!* zQl`H}FVjLZyuqA<+D;TyV}F|}GbBduA5*nPbSNK#-pz-~3N^-mOCgXOGNcD$c-Gos z2~V?(1^|Ey^$h4HK>wCPpb!LCA4L7^>3k=pN)SX0O7%o3c*2?_~2eVcOTCz zz^C8>5`6$5x&a12@K?p3EgxAygv7=Gz~mtCe^h8c0GT)Zr|EC?q&Uc-9L=E||8|vTMhJ-!h~Zh&spLHM5CtO; zJ}fRfJmiT9o)7{y`llX8VQ$fB9;hGL0{^G$$*O^THUcp{3;&;dQ!7RwJeZ1I$dwU@ z_Kzk>jX^JA#ZTZNYzOe95MASc`qKa8HE}YAYO3%Y9+GejPs04aw)VfTlmF_h5SXU{ z`DqNIepVP4L_*6GdMh7#K)dmOOCc~=69n;51+n5Q$-+Z-`~(2lp}$2m06 - + @@ -144,53 +144,53 @@

    Model Creation

    All Axon models start with an input layer, optionally specifying -the expected shape of the input data:

    input = Axon.input("input", shape: {nil, 784})

    Notice you can specify some dimensions as nil, indicating +the expected shape of the input data:

    input = Axon.input("input", shape: {nil, 784})

    Notice you can specify some dimensions as nil, indicating that the dimension size will be filled in at model runtime. You can then compose inputs with other layers:

    model =
       input
    -  |> Axon.dense(128, activation: :relu)
    -  |> Axon.batch_norm()
    -  |> Axon.dropout(rate: 0.8)
    -  |> Axon.dense(64)
    -  |> Axon.tanh()
    -  |> Axon.dense(10)
    -  |> Axon.activation(:softmax)

    You can inspect the model for a nice summary:

    IO.inspect(model)
    -
    -#Axon<
    -  inputs: %{"input" => {nil, 784}}
    +  |> Axon.dense(128, activation: :relu)
    +  |> Axon.batch_norm()
    +  |> Axon.dropout(rate: 0.8)
    +  |> Axon.dense(64)
    +  |> Axon.tanh()
    +  |> Axon.dense(10)
    +  |> Axon.activation(:softmax)

    You can inspect the model for a nice summary:

    IO.inspect(model)
    +
    +#Axon<
    +  inputs: %{"input" => {nil, 784}}
       outputs: "softmax_0"
       nodes: 9
    ->

    Or use the Axon.Display module to see more in-depth summaries:

    Axon.Display.as_table(model, Nx.template({1, 784}, :f32)) |> IO.puts
    +>

    Or use the Axon.Display module to see more in-depth summaries:

    Axon.Display.as_table(model, Nx.template({1, 784}, :f32)) |> IO.puts
     
     +----------------------------------------------------------------------------------------------------------------+
     |                                                     Model                                                      |
     +=======================================+=============+==============+===================+=======================+
     | Layer                                 | Input Shape | Output Shape | Options           | Parameters            |
     +=======================================+=============+==============+===================+=======================+
    -| input ( input )                       | []          | {1, 784}     | shape: {nil, 784} |                       |
    +| input ( input )                       | []          | {1, 784}     | shape: {nil, 784} |                       |
     |                                       |             |              | optional: false   |                       |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| dense_0 ( dense["input"] )            | [{1, 784}]  | {1, 128}     |                   | kernel: f32[784][128] |
    -|                                       |             |              |                   | bias: f32[128]        |
    +| dense_0 ( dense["input"] )            | [{1, 784}]  | {1, 128}     |                   | kernel: f32[784][128] |
    +|                                       |             |              |                   | bias: f32[128]        |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| relu_0 ( relu["dense_0"] )            | [{1, 128}]  | {1, 128}     |                   |                       |
    +| relu_0 ( relu["dense_0"] )            | [{1, 128}]  | {1, 128}     |                   |                       |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| batch_norm_0 ( batch_norm["relu_0"] ) | [{1, 128}]  | {1, 128}     | epsilon: 1.0e-5   | gamma: f32[128]       |
    -|                                       |             |              | channel_index: 1  | beta: f32[128]        |
    -|                                       |             |              | momentum: 0.1     | mean: f32[128]        |
    -|                                       |             |              |                   | var: f32[128]         |
    +| batch_norm_0 ( batch_norm["relu_0"] ) | [{1, 128}]  | {1, 128}     | epsilon: 1.0e-5   | gamma: f32[128]       |
    +|                                       |             |              | channel_index: 1  | beta: f32[128]        |
    +|                                       |             |              | momentum: 0.1     | mean: f32[128]        |
    +|                                       |             |              |                   | var: f32[128]         |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| dropout_0 ( dropout["batch_norm_0"] ) | [{1, 128}]  | {1, 128}     | rate: 0.8         |                       |
    +| dropout_0 ( dropout["batch_norm_0"] ) | [{1, 128}]  | {1, 128}     | rate: 0.8         |                       |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| dense_1 ( dense["dropout_0"] )        | [{1, 128}]  | {1, 64}      |                   | kernel: f32[128][64]  |
    -|                                       |             |              |                   | bias: f32[64]         |
    +| dense_1 ( dense["dropout_0"] )        | [{1, 128}]  | {1, 64}      |                   | kernel: f32[128][64]  |
    +|                                       |             |              |                   | bias: f32[64]         |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| tanh_0 ( tanh["dense_1"] )            | [{1, 64}]   | {1, 64}      |                   |                       |
    +| tanh_0 ( tanh["dense_1"] )            | [{1, 64}]   | {1, 64}      |                   |                       |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| dense_2 ( dense["tanh_0"] )           | [{1, 64}]   | {1, 10}      |                   | kernel: f32[64][10]   |
    -|                                       |             |              |                   | bias: f32[10]         |
    +| dense_2 ( dense["tanh_0"] )           | [{1, 64}]   | {1, 10}      |                   | kernel: f32[64][10]   |
    +|                                       |             |              |                   | bias: f32[10]         |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+
    -| softmax_0 ( softmax["dense_2"] )      | [{1, 10}]   | {1, 10}      |                   |                       |
    +| softmax_0 ( softmax["dense_2"] )      | [{1, 10}]   | {1, 10}      |                   |                       |
     +---------------------------------------+-------------+--------------+-------------------+-----------------------+

    @@ -200,28 +200,28 @@

    Creating a model with multiple inputs is as easy as declaring an additional input in your Axon graph. Every input layer present in the final Axon graph will be required to be passed as input at the -time of model execution.

    inp1 = Axon.input("input_0", shape: {nil, 1})
    -inp2 = Axon.input("input_1", shape: {nil, 1})
    +time of model execution.

    inp1 = Axon.input("input_0", shape: {nil, 1})
    +inp2 = Axon.input("input_1", shape: {nil, 1})
     
     # Both inputs will be used
    -model1 = Axon.add(inp1, inp2)
    +model1 = Axon.add(inp1, inp2)
     
     # Only inp2 will be used
    -model2 = Axon.add(inp2, inp2)

    Axon graphs are immutable, which means composing and manipulating +model2 = Axon.add(inp2, inp2)

    Axon graphs are immutable, which means composing and manipulating an Axon graph creates an entirely new graph. Additionally, layer names are lazily generated at model execution time. To avoid non-deterministic input orderings and names, Axon requires each input to have a unique binary identifier. You can then reference -inputs by name when passing to models at execution time:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    -inp2 = Axon.input("input_1", shape: {nil, 1})
    +inputs by name when passing to models at execution time:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    +inp2 = Axon.input("input_1", shape: {nil, 1})
     
    -model1 = Axon.add(inp1, inp2)
    +model1 = Axon.add(inp1, inp2)
     
    -{init_fn, predict_fn} = Axon.build(model1)
    +{init_fn, predict_fn} = Axon.build(model1)
     
    -params1 = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})
    +params1 = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})
     # Inputs are referenced by name
    -predict_fn.(params1, %{"input_0" => x, "input_1" => y})

    +predict_fn.(params1, %{"input_0" => x, "input_1" => y})

    @@ -229,13 +229,13 @@

    Nx offers robust container support which is extended to Axon. Axon allows you to wrap any valid Nx container -in a layer. Containers are most commonly used to structure outputs:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    -inp2 = Axon.input("input_1", shape: {nil, 1})
    -model = Axon.container(%{foo: inp1, bar: inp2})

    Containers can be arbitrarily nested:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    -inp2 = Axon.input("input_1", shape: {nil, 1})
    -model = Axon.container({%{foo: {inp1, %{bar: inp2}}}})

    You can even use custom structs which implement the container protocol:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    -inp2 = Axon.input("input_1", shape: {nil, 1})
    -model = Axon.container(%MyStruct{foo: inp1, bar: inp2})

    +in a layer. Containers are most commonly used to structure outputs:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    +inp2 = Axon.input("input_1", shape: {nil, 1})
    +model = Axon.container(%{foo: inp1, bar: inp2})

    Containers can be arbitrarily nested:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    +inp2 = Axon.input("input_1", shape: {nil, 1})
    +model = Axon.container({%{foo: {inp1, %{bar: inp2}}}})

    You can even use custom structs which implement the container protocol:

    inp1 = Axon.input("input_0", shape: {nil, 1})
    +inp2 = Axon.input("input_1", shape: {nil, 1})
    +model = Axon.container(%MyStruct{foo: inp1, bar: inp2})

    @@ -246,18 +246,18 @@

    layers (aside from special ones such as input, constant, and container) make use of this same API.

    Axon layers are really just placeholders for Nx computations with trainable parameters and possibly state. To define a custom layer, you just need to -define a defn implementation:

    defn my_layer(x, weight, _opts \\ []) do
    -  Nx.atan2(x, weight)
    -end

    Notice the only stipulation is that your custom layer implementation must +define a defn implementation:

    defn my_layer(x, weight, _opts \\ []) do
    +  Nx.atan2(x, weight)
    +end

    Notice the only stipulation is that your custom layer implementation must accept at least 1 input and a list of options. At execution time, every layer will be passed a :mode option which can be used to control behavior at training and inference time.

    Inputs to your custom layer can be either Axon graph inputs or trainable parameters. You can pass Axon graph inputs as-is to a custom layer. To -declare trainable parameters, use Axon.param/3:

    weight = Axon.param("weight", param_shape)

    To create a custom layer, you "wrap" your implementation and inputs into -a layer using Axon.layer. You'll notice the API mirrors Elixir's apply:

    def atan2_layer(%Axon{} = input) do
    -  weight = Axon.param("weight", param_shape)
    -  Axon.layer(&my_layer/3, [input, weight])
    -end

    +declare trainable parameters, use Axon.param/3:

    weight = Axon.param("weight", param_shape)

    To create a custom layer, you "wrap" your implementation and inputs into +a layer using Axon.layer. You'll notice the API mirrors Elixir's apply:

    def atan2_layer(%Axon{} = input) do
    +  weight = Axon.param("weight", param_shape)
    +  Axon.layer(&my_layer/3, [input, weight])
    +end

    @@ -266,16 +266,16 @@

    Under the hood, Axon models are represented as Elixir structs. You can initialize and apply models by building or compiling them with Axon.build/2 or Axon.compile/4 and then calling the produced -initialization and predict functions:

    {init_fn, predict_fn} = Axon.build(model)
    +initialization and predict functions:

    {init_fn, predict_fn} = Axon.build(model)
     
    -params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})
    -predict_fn.(params, inputs)

    You may either set the default JIT compiler or backend globally, or -pass a specific compiler to Axon.build/2:

    EXLA.set_as_nx_default([:tpu, :cuda, :rocm, :host])
    +params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})
    +predict_fn.(params, inputs)

    You may either set the default JIT compiler or backend globally, or +pass a specific compiler to Axon.build/2:

    EXLA.set_as_nx_default([:tpu, :cuda, :rocm, :host])
     
    -{init_fn, predict_fn} = Axon.build(model, compiler: EXLA, mode: :train)
    +{init_fn, predict_fn} = Axon.build(model, compiler: EXLA, mode: :train)
     
    -params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})
    -predict_fn.(params, inputs)

    predict_fn by default runs in inference mode, which performs certain +params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{}) +predict_fn.(params, inputs)

    predict_fn by default runs in inference mode, which performs certain optimizations and removes layers such as dropout layers. If constructing a training step using Axon.predict/4 or Axon.build/2, be sure to specify mode: :train.

    @@ -286,18 +286,18 @@

    Combining the Axon model creation API with the optimization and training APIs, you can create and train neural networks with ease:

    model =
    -  Axon.input("input_0", shape: {nil, 784})
    -  |> Axon.dense(128, activation: :relu)
    -  |> Axon.layer_norm()
    -  |> Axon.dropout()
    -  |> Axon.dense(10, activation: :softmax)
    +  Axon.input("input_0", shape: {nil, 784})
    +  |> Axon.dense(128, activation: :relu)
    +  |> Axon.layer_norm()
    +  |> Axon.dropout()
    +  |> Axon.dense(10, activation: :softmax)
     
     IO.inspect model
     
     model_state =
       model
    -  |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adamw(learning_rate: 0.005))
    -  |> Axon.Loop.run(train_data, epochs: 10, compiler: EXLA)

    See Polaris.Updates and Axon.Loop for a more in-depth treatment of + |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adamw(learning_rate: 0.005)) + |> Axon.Loop.run(train_data, epochs: 10, compiler: EXLA)

    See Polaris.Updates and Axon.Loop for a more in-depth treatment of model optimization and model training.

    @@ -308,44 +308,44 @@

    multiple prediction requests and run the inference for all of them at once. Conveniently, Nx already has an abstraction for this task in the form of Nx.Serving. Here's how you could define a serving for an Axon -model:

    def build_serving() do
    +model:

    def build_serving() do
       # Configuration
       batch_size = 4
    -  defn_options = [compiler: EXLA]
    +  defn_options = [compiler: EXLA]
     
    -  Nx.Serving.new(
    +  Nx.Serving.new(
         # This function runs on the serving startup
    -    fn ->
    +    fn ->
           # Build the Axon model and load params (usually from file)
    -      model = build_model()
    -      params = load_params()
    +      model = build_model()
    +      params = load_params()
     
           # Build the prediction defn function
    -      {_init_fun, predict_fun} = Axon.build(model)
    +      {_init_fun, predict_fun} = Axon.build(model)
     
    -      inputs_template = %{"pixel_values" => Nx.template({batch_size, 224, 224, 3}, :f32)}
    -      template_args = [Nx.to_template(params), inputs_template]
    +      inputs_template = %{"pixel_values" => Nx.template({batch_size, 224, 224, 3}, :f32)}
    +      template_args = [Nx.to_template(params), inputs_template]
     
           # Compile the prediction function upfront for the configured batch_size
    -      predict_fun = Nx.Defn.compile(predict_fun, template_args, defn_options)
    +      predict_fun = Nx.Defn.compile(predict_fun, template_args, defn_options)
     
           # The returned function is called for every accumulated batch
    -      fn inputs ->
    -        inputs = Nx.Batch.pad(inputs, batch_size - inputs.size)
    -        predict_fun.(params, inputs)
    -      end
    -    end,
    +      fn inputs ->
    +        inputs = Nx.Batch.pad(inputs, batch_size - inputs.size)
    +        predict_fun.(params, inputs)
    +      end
    +    end,
         batch_size: batch_size
    -  )
    -end

    Then you would start the serving server as part of your application's -supervision tree:

    children = [
    +  )
    +end

    Then you would start the serving server as part of your application's +supervision tree:

    children = [
       ...,
    -  {Nx.Serving, serving: build_serving(), name: MyApp.Serving, batch_timeout: 100}
    -]

    With that in place, you can now ask serving for predictions all across + {Nx.Serving, serving: build_serving(), name: MyApp.Serving, batch_timeout: 100} +]

    With that in place, you can now ask serving for predictions all across your application (controllers, live views, async jobs, etc.). Having a -tensor input you would do:

    inputs = %{"pixel_values" => ...}
    -batch = Nx.Batch.concatenate([inputs])
    -result = Nx.Serving.batched_run(MyApp.Serving, batch)

    Usually you also want to do pre/post-processing of the model input/output. +tensor input you would do:

    inputs = %{"pixel_values" => ...}
    +batch = Nx.Batch.concatenate([inputs])
    +result = Nx.Serving.batched_run(MyApp.Serving, batch)

    Usually you also want to do pre/post-processing of the model input/output. You could make those preparations directly before/after Nx.Serving.batched_run/2, however you can also make use of Nx.Serving.client_preprocessing/2 and Nx.Serving.client_postprocessing/2 to encapsulate that logic as part of @@ -417,16 +417,6 @@

    -
    - - -

    Wraps an Axon model into a namespace.

    - -
    -
    nx(input, fun, opts \\ []) @@ -1258,21 +1248,12 @@

    -
    - - -

    Deserializes serialized model and parameters into a {model, params} -tuple.

    - -
    -

    Freezes parameters returned from the given function or predicate.

    @@ -1289,21 +1270,12 @@

    -
    - - -

    Serializes a model and its parameters for persisting -models to disk or elsewhere.

    - -
    -

    Unfreezes parameters returned from the given function or predicate.

    @@ -1538,7 +1510,7 @@

    block(fun, opts \\ [])

    - + View Source @@ -1552,28 +1524,28 @@

    block(fun, opts \\ [])

    of operations in a neural network. All parameters in the block are shared between every usage of the block.

    This returns an arity-1 function which accepts a list of inputs which are forwarded to fun. This is most often used in situations where -you wish to re-use parameters in a block:

    reused_dense = Axon.block(&Axon.dense(&1, 32))

    Everytime reused_dense is invoked, it re-uses the same parameters:

    input = Axon.input("features")
    +you wish to re-use parameters in a block:

    reused_dense = Axon.block(&Axon.dense(&1, 32))

    Everytime reused_dense is invoked, it re-uses the same parameters:

    input = Axon.input("features")
     # unique parameters
    -x1 = Axon.dense(input, 32)
    +x1 = Axon.dense(input, 32)
     # unique parameters
    -x2 = reused_dense.(x1)
    +x2 = reused_dense.(x1)
     # parameters shared
    -x3 = reused_dense.(x2)

    Subgraphs in blocks can be arbitrarily complex:

    reused_block = Axon.block(fn x ->
    +x3 = reused_dense.(x2)

    Subgraphs in blocks can be arbitrarily complex:

    reused_block = Axon.block(fn x ->
       x
    -  |> Axon.dense(32)
    -  |> Axon.dense(64)
    -  |> Axon.dense(32)
    -end)

    Blocks can also have multiple inputs, you can invoke a block with multiple -inputs by passing a list of arguments:

    reused_block = Axon.block(fn x, y, z ->
    -  x = Axon.dense(x, 32)
    -  y = Axon.dense(y, 32)
    -  z = Axon.dense(z, 32)
    -
    -  Axon.add([x, y, z])
    -end)
    +  |> Axon.dense(32)
    +  |> Axon.dense(64)
    +  |> Axon.dense(32)
    +end)

    Blocks can also have multiple inputs, you can invoke a block with multiple +inputs by passing a list of arguments:

    reused_block = Axon.block(fn x, y, z ->
    +  x = Axon.dense(x, 32)
    +  y = Axon.dense(y, 32)
    +  z = Axon.dense(z, 32)
    +
    +  Axon.add([x, y, z])
    +end)
     
     # invoke with a list
    -reused_block.([x, y, z])

    Blocks prefix subgraph parameters with their name and a dot. As with other +reused_block.([x, y, z])

    Blocks prefix subgraph parameters with their name and a dot. As with other Axon layers, if a name is not explicitly provided, one will be dynamically generated.

    @@ -1589,7 +1561,7 @@

    block(fun, opts \\ [])

    constant(tensor, opts \\ [])

    - + View Source @@ -1601,9 +1573,9 @@

    constant(tensor, opts \\ [])

    Adds a constant layer to the network.

    Constant layers encapsulate Nx tensors in an Axon layer for ease of use with other Axon layers. They can be used interchangeably -with other Axon layers:

    inp = Axon.input("input", shape: {nil, 32})
    -my_constant = Axon.constant(Nx.iota({1, 32}))
    -model = Axon.add(inp, my_constant)

    Constant layers will be cast according to the mixed precision policy. +with other Axon layers:

    inp = Axon.input("input", shape: {nil, 32})
    +my_constant = Axon.constant(Nx.iota({1, 32}))
    +model = Axon.add(inp, my_constant)

    Constant layers will be cast according to the mixed precision policy. If it's important for your constant to retain it's type during the computation, you will need to set the mixed precision policy to ignore constant layers.

    @@ -1626,7 +1598,7 @@

    constant(tensor, opts \\ [])

    container(container, opts \\ [])

    - + View Source @@ -1651,27 +1623,27 @@

    container(container, opts \\ [])

    Examples

    -
    iex> inp1 = Axon.input("input_0", shape: {nil, 1})
    -iex> inp2 = Axon.input("input_1", shape: {nil, 2})
    -iex> model = Axon.container(%{a: inp1, b: inp2})
    -iex> %{a: a, b: b} = Axon.predict(model, %{}, %{
    -...>    "input_0" => Nx.tensor([[1.0]]),
    -...>    "input_1" => Nx.tensor([[1.0, 2.0]])
    -...> })
    +
    iex> inp1 = Axon.input("input_0", shape: {nil, 1})
    +iex> inp2 = Axon.input("input_1", shape: {nil, 2})
    +iex> model = Axon.container(%{a: inp1, b: inp2})
    +iex> %{a: a, b: b} = Axon.predict(model, Axon.ModelState.empty(), %{
    +...>    "input_0" => Nx.tensor([[1.0]]),
    +...>    "input_1" => Nx.tensor([[1.0, 2.0]])
    +...> })
     iex> a
    -#Nx.Tensor<
    -  f32[1][1]
    -  [
    -    [1.0]
    -  ]
    ->
    +#Nx.Tensor<
    +  f32[1][1]
    +  [
    +    [1.0]
    +  ]
    +>
     iex> b
    -#Nx.Tensor<
    -  f32[1][2]
    -  [
    -    [1.0, 2.0]
    -  ]
    ->
    +
    #Nx.Tensor< + f32[1][2] + [ + [1.0, 2.0] + ] +>
    @@ -1685,7 +1657,7 @@

    container(container, opts \\ [])

    input(name, opts \\ [])

    - + View Source @@ -1720,7 +1692,7 @@

    input(name, opts \\ [])

    layer(op, inputs, opts \\ [])

    - + View Source @@ -1739,49 +1711,9 @@

    layer(op, inputs, opts \\ [])

    the layer, as long as they are declared

    Note this means your layer should not use these as input options, as they will always be dropped during inference compilation.

    Axon's compiler will additionally forward the following options to every layer at inference time:

    • :mode - :inference or :train. To control layer behavior -based on inference or train time.

    op is a function of the form:

    fun = fn input, weight, bias, _opts ->
    +based on inference or train time.

    op is a function of the form:

    fun = fn input, weight, bias, _opts ->
       input * weight + bias
    -end
    -
    - -
    - -
    - - - Link to this function - -

    namespace(axon, name)

    - - - - View Source - - - -
    - -
    - -

    Wraps an Axon model into a namespace.

    A namespace is a part of an Axon model which is meant to -be a self-contained collection of Axon layers. Namespaces -are guaranteed to always generate with the same internal -layer names and can be re-used universally across models.

    Namespaces are most useful for containing large collections -of layers and offering a straightforward means for accessing -the parameters of individual model components. A common application -of namespaces is to use them in with a pre-trained model for -fine-tuning:

    {base, resnet_params} = resnet()
    -base = base |> Axon.namespace("resnet")
    -
    -model = base |> Axon.dense(1)
    -{init_fn, predict_fn} = Axon.build(model)
    -
    -init_fn.(Nx.template({1, 3, 224, 224}, {:f, 32}), %{"resnset" => resnet_params})

    Notice you can use init_fn in conjunction with namespaces -to specify which portion of a model you'd like to initialize -from a fixed starting point.

    Namespaces have fixed names, which means it's easy to run into namespace -collisions. Re-using namespaces, re-using inner parts of a namespace, -and attempting to share layers between namespaces are still sharp -edges in namespace usage.

    +end
    @@ -1795,7 +1727,7 @@

    namespace(axon, name)

    nx(input, fun, opts \\ [])

    - + View Source @@ -1807,8 +1739,8 @@

    nx(input, fun, opts \\ [])

    Applies the given Nx expression to the input.

    Nx layers are meant for quick applications of functions without trainable parameters. For example, they are useful for applying -functions which apply accessors to containers:

    model = Axon.container({foo, bar})
    -Axon.nx(model, &elem(&1, 0))

    +functions which apply accessors to containers:

    model = Axon.container({foo, bar})
    +Axon.nx(model, &elem(&1, 0))

    @@ -1828,7 +1760,7 @@

    nx(input, fun, opts \\ [])

    optional(x, opts \\ [])

    - + View Source @@ -1839,38 +1771,38 @@

    optional(x, opts \\ [])

    Wraps an Axon model in an optional node.

    By default, when an optional input is missing, all subsequent layers -are nullified. For example, consider this model:

    values = Axon.input("values")
    -mask = Axon.input("mask", optional: true)
    +are nullified. For example, consider this model:

    values = Axon.input("values")
    +mask = Axon.input("mask", optional: true)
     
     model =
       values
    -  |> Axon.dense(10)
    -  |> Axon.multiply(mask)
    -  |> Axon.dense(1)
    -  |> Axon.sigmoid()

    In case the mask is not provided, the input node will resolve to + |> Axon.dense(10) + |> Axon.multiply(mask) + |> Axon.dense(1) + |> Axon.sigmoid()

    In case the mask is not provided, the input node will resolve to %Axon.None{} and so will all the layers that depend on it. By using optional/2 a layer may opt-in to receive %Axon.None{}. To fix our example, we could define a custom layer to apply the -mask only when present

    def apply_optional_mask(%Axon{} = x, %Axon{} = mask) do
    -  Axon.layer(
    -    fn x, mask, _opts ->
    -      case mask do
    -        %Axon.None{} -> x
    -        mask -> Nx.multiply(x, mask)
    -      end
    -    end,
    -    [x, Axon.optional(mask)]
    -  )
    -end
    +mask only when present

    def apply_optional_mask(%Axon{} = x, %Axon{} = mask) do
    +  Axon.layer(
    +    fn x, mask, _opts ->
    +      case mask do
    +        %Axon.None{} -> x
    +        mask -> Nx.multiply(x, mask)
    +      end
    +    end,
    +    [x, Axon.optional(mask)]
    +  )
    +end
     
     # ...
     
     model =
       values
    -  |> Axon.dense(10)
    -  |> apply_optional_mask(mask)
    -  |> Axon.dense(1)
    -  |> Axon.sigmoid()

    + |> Axon.dense(10) + |> apply_optional_mask(mask) + |> Axon.dense(1) + |> Axon.sigmoid()

    @@ -1890,7 +1822,7 @@

    optional(x, opts \\ [])

    param(name, shape, opts \\ [])

    - + View Source @@ -1924,7 +1856,7 @@

    param(name, shape, opts \\ [])

    stack_columns(x, opts \\ [])

    - + View Source @@ -1970,7 +1902,7 @@

    activation(x, activation, opts \\ [])

    - + View Source @@ -2001,7 +1933,7 @@

    activation(x, activation, opts \\ [])

    celu(x, opts \\ [])

    - + View Source @@ -2031,7 +1963,7 @@

    celu(x, opts \\ [])

    elu(x, opts \\ [])

    - + View Source @@ -2061,7 +1993,7 @@

    elu(x, opts \\ [])

    exp(x, opts \\ [])

    - + View Source @@ -2091,7 +2023,7 @@

    exp(x, opts \\ [])

    gelu(x, opts \\ [])

    - + View Source @@ -2121,7 +2053,7 @@

    gelu(x, opts \\ [])

    hard_sigmoid(x, opts \\ [])

    - + View Source @@ -2151,7 +2083,7 @@

    hard_sigmoid(x, opts \\ [])

    hard_silu(x, opts \\ [])

    - + View Source @@ -2181,7 +2113,7 @@

    hard_silu(x, opts \\ [])

    hard_tanh(x, opts \\ [])

    - + View Source @@ -2211,7 +2143,7 @@

    hard_tanh(x, opts \\ [])

    leaky_relu(x, opts \\ [])

    - + View Source @@ -2241,7 +2173,7 @@

    leaky_relu(x, opts \\ [])

    linear(x, opts \\ [])

    - + View Source @@ -2271,7 +2203,7 @@

    linear(x, opts \\ [])

    log_sigmoid(x, opts \\ [])

    - + View Source @@ -2301,7 +2233,7 @@

    log_sigmoid(x, opts \\ [])

    log_softmax(x, opts \\ [])

    - + View Source @@ -2331,7 +2263,7 @@

    log_softmax(x, opts \\ [])

    log_sumexp(x, opts \\ [])

    - + View Source @@ -2361,7 +2293,7 @@

    log_sumexp(x, opts \\ [])

    mish(x, opts \\ [])

    - + View Source @@ -2391,7 +2323,7 @@

    mish(x, opts \\ [])

    relu6(x, opts \\ [])

    - + View Source @@ -2421,7 +2353,7 @@

    relu6(x, opts \\ [])

    relu(x, opts \\ [])

    - + View Source @@ -2451,7 +2383,7 @@

    relu(x, opts \\ [])

    selu(x, opts \\ [])

    - + View Source @@ -2481,7 +2413,7 @@

    selu(x, opts \\ [])

    sigmoid(x, opts \\ [])

    - + View Source @@ -2511,7 +2443,7 @@

    sigmoid(x, opts \\ [])

    silu(x, opts \\ [])

    - + View Source @@ -2541,7 +2473,7 @@

    silu(x, opts \\ [])

    softmax(x, opts \\ [])

    - + View Source @@ -2571,7 +2503,7 @@

    softmax(x, opts \\ [])

    softplus(x, opts \\ [])

    - + View Source @@ -2601,7 +2533,7 @@

    softplus(x, opts \\ [])

    softsign(x, opts \\ [])

    - + View Source @@ -2631,7 +2563,7 @@

    softsign(x, opts \\ [])

    tanh(x, opts \\ [])

    - + View Source @@ -2673,7 +2605,7 @@

    bias(x, opts \\ [])

    - + View Source @@ -2704,7 +2636,7 @@

    bias(x, opts \\ [])

    bilinear(input1, input2, units, opts \\ [])

    - + View Source @@ -2714,7 +2646,7 @@

    bilinear(input1, input2, units, opts \\ [])
    -

    Adds a bilinear layer to the network.

    The bilinear layer implements:

    output = activation(dot(dot(input1, kernel), input2) + bias)

    where activation is given by the :activation option and both +

    Adds a bilinear layer to the network.

    The bilinear layer implements:

    output = activation(dot(dot(input1, kernel), input2) + bias)

    where activation is given by the :activation option and both kernel and bias are layer parameters. units specifies the number of output units.

    All dimensions but the last of input1 and input2 must match. The batch sizes of both inputs must also match or at least one must be nil. @@ -2741,7 +2673,7 @@

    bilinear(input1, input2, units, opts \\ [])

    dense(x, units, opts \\ [])

    - + View Source @@ -2751,7 +2683,7 @@

    dense(x, units, opts \\ [])

    -

    Adds a dense layer to the network.

    The dense layer implements:

    output = activation(dot(input, kernel) + bias)

    where activation is given by the :activation option and both +

    Adds a dense layer to the network.

    The dense layer implements:

    output = activation(dot(input, kernel) + bias)

    where activation is given by the :activation option and both kernel and bias are layer parameters. units specifies the number of output units.

    Compiles to Axon.Layers.dense/4.

    @@ -2776,7 +2708,7 @@

    dense(x, units, opts \\ [])

    embedding(x, vocab_size, embedding_size, opts \\ [])

    - + View Source @@ -2822,7 +2754,7 @@

    conv(x, units, opts \\ [])

    - + View Source @@ -2861,7 +2793,7 @@

    conv(x, units, opts \\ [])

    conv_transpose(x, units, opts \\ [])

    - + View Source @@ -2898,7 +2830,7 @@

    conv_transpose(x, units, opts \\ [])

    depthwise_conv(x, channel_multiplier, opts \\ [])

    - + View Source @@ -2939,7 +2871,7 @@

    depthwise_conv(x, channel_multiplier, opts

    separable_conv2d(x, channel_multiplier, opts \\ [])

    - + View Source @@ -2978,7 +2910,7 @@

    separable_conv2d(x, channel_multiplier, opt

    separable_conv3d(x, channel_multiplier, opts \\ [])

    - + View Source @@ -3029,7 +2961,7 @@

    alpha_dropout(x, opts \\ [])

    - + View Source @@ -3060,7 +2992,7 @@

    alpha_dropout(x, opts \\ [])

    dropout(x, opts \\ [])

    - + View Source @@ -3091,7 +3023,7 @@

    dropout(x, opts \\ [])

    feature_alpha_dropout(x, opts \\ [])

    - + View Source @@ -3122,7 +3054,7 @@

    feature_alpha_dropout(x, opts \\ [])

    spatial_dropout(x, opts \\ [])

    - + View Source @@ -3165,7 +3097,7 @@

    adaptive_avg_pool(x, opts \\ [])

    - + View Source @@ -3196,7 +3128,7 @@

    adaptive_avg_pool(x, opts \\ [])

    adaptive_lp_pool(x, opts \\ [])

    - + View Source @@ -3227,7 +3159,7 @@

    adaptive_lp_pool(x, opts \\ [])

    adaptive_max_pool(x, opts \\ [])

    - + View Source @@ -3258,7 +3190,7 @@

    adaptive_max_pool(x, opts \\ [])

    avg_pool(x, opts \\ [])

    - + View Source @@ -3291,7 +3223,7 @@

    avg_pool(x, opts \\ [])

    global_avg_pool(x, opts \\ [])

    - + View Source @@ -3325,7 +3257,7 @@

    global_avg_pool(x, opts \\ [])

    global_lp_pool(x, opts \\ [])

    - + View Source @@ -3359,7 +3291,7 @@

    global_lp_pool(x, opts \\ [])

    global_max_pool(x, opts \\ [])

    - + View Source @@ -3393,7 +3325,7 @@

    global_max_pool(x, opts \\ [])

    lp_pool(x, opts \\ [])

    - + View Source @@ -3426,7 +3358,7 @@

    lp_pool(x, opts \\ [])

    max_pool(x, opts \\ [])

    - + View Source @@ -3471,7 +3403,7 @@

    batch_norm(x, opts \\ [])

    - + View Source @@ -3504,7 +3436,7 @@

    batch_norm(x, opts \\ [])

    group_norm(x, num_groups, opts \\ [])

    - + View Source @@ -3537,7 +3469,7 @@

    group_norm(x, num_groups, opts \\ [])

    instance_norm(x, opts \\ [])

    - + View Source @@ -3570,7 +3502,7 @@

    instance_norm(x, opts \\ [])

    layer_norm(x, opts \\ [])

    - + View Source @@ -3613,7 +3545,7 @@

    conv_lstm(x, units)

    - + View Source @@ -3635,7 +3567,7 @@

    conv_lstm(x, units)

    conv_lstm(x, units, opts)

    - + View Source @@ -3665,7 +3597,7 @@

    conv_lstm(x, units, opts)

    conv_lstm(x, hidden_state, units, opts)

    - + View Source @@ -3677,7 +3609,7 @@

    conv_lstm(x, hidden_state, units, opts)

    Adds a convolutional long short-term memory (LSTM) layer to the network with the given initial hidden state..

    ConvLSTMs apply Axon.Layers.conv_lstm_cell/5 over an entire input -sequence and return:

    {{new_cell, new_hidden}, output_sequence}

    You can use the output state as the hidden state of another +sequence and return:

    {{new_cell, new_hidden}, output_sequence}

    You can use the output state as the hidden state of another ConvLSTM layer.

    @@ -3700,7 +3632,7 @@

    conv_lstm(x, hidden_state, units, opts)

    gru(x, units)

    -
    + View Source @@ -3722,7 +3654,7 @@

    gru(x, units)

    gru(x, units, opts)

    - + View Source @@ -3752,7 +3684,7 @@

    gru(x, units, opts)

    gru(x, hidden_state, units, opts)

    - + View Source @@ -3764,7 +3696,7 @@

    gru(x, hidden_state, units, opts)

    Adds a gated recurrent unit (GRU) layer to the network with the given initial hidden state.

    GRUs apply Axon.Layers.gru_cell/7 over an entire input -sequence and return:

    {{new_hidden}, output_sequence}

    You can use the output state as the hidden state of another +sequence and return:

    {{new_hidden}, output_sequence}

    You can use the output state as the hidden state of another GRU layer.

    @@ -3787,7 +3719,7 @@

    gru(x, hidden_state, units, opts)

    lstm(x, units)

    - + View Source @@ -3809,7 +3741,7 @@

    lstm(x, units)

    lstm(x, units, opts)

    - + View Source @@ -3839,7 +3771,7 @@

    lstm(x, units, opts)

    lstm(x, hidden_state, units, opts \\ [])

    - + View Source @@ -3851,7 +3783,7 @@

    lstm(x, hidden_state, units, opts \\ [])Adds a long short-term memory (LSTM) layer to the network with the given initial hidden state.

    LSTMs apply Axon.Layers.lstm_cell/7 over an entire input -sequence and return:

    {output_sequence, {new_cell, new_hidden}}

    You can use the output state as the hidden state of another +sequence and return:

    {output_sequence, {new_cell, new_hidden}}

    You can use the output state as the hidden state of another LSTM layer.

    @@ -3876,7 +3808,7 @@

    lstm(x, hidden_state, units, opts \\ [])

    mask(input, eos_token, opts \\ [])

    -
    + View Source @@ -3918,7 +3850,7 @@

    add(x, y, opts)

    - + View Source @@ -3949,7 +3881,7 @@

    add(x, y, opts)

    concatenate(x, y, opts)

    - + View Source @@ -3980,7 +3912,7 @@

    concatenate(x, y, opts)

    cond(parent, cond_fn, true_graph, false_graph, opts \\ [])

    - + View Source @@ -4005,7 +3937,7 @@

    cond(parent, cond_fn, true_graph, false_gra

    multiply(x, y, opts)

    - + View Source @@ -4038,7 +3970,7 @@

    multiply(x, y, opts)

    split(parent, splits, opts \\ [])

    - + View Source @@ -4067,7 +3999,7 @@

    split(parent, splits, opts \\ [])

    subtract(x, y, opts)

    - + View Source @@ -4112,7 +4044,7 @@

    flatten(x, opts \\ [])

    - + View Source @@ -4146,7 +4078,7 @@

    flatten(x, opts \\ [])

    pad(x, config, value \\ 0.0, opts \\ [])

    - + View Source @@ -4179,7 +4111,7 @@

    pad(x, config, value \\ 0.0, opts \\ [])

    reshape(x, new_shape, opts \\ [])

    - + View Source @@ -4213,7 +4145,7 @@

    reshape(x, new_shape, opts \\ [])

    resize(x, resize_shape, opts \\ [])

    - + View Source @@ -4250,7 +4182,7 @@

    resize(x, resize_shape, opts \\ [])

    transpose(x, permutation \\ nil, opts \\ [])

    - + View Source @@ -4292,7 +4224,7 @@

    build(model, opts \\ [])

    - + View Source @@ -4312,16 +4244,16 @@

    build(model, opts \\ [])

    init_fn

    The init_fn receives two arguments, the input template and -an optional map with initial parameters for layers or namespaces:

    {init_fn, predict_fn} = Axon.build(model)
    -init_fn.(Nx.template({1, 1}, {:f, 32}), %{"dense_0" => dense_params})

    +an optional map with initial parameters for layers or namespaces:

    {init_fn, predict_fn} = Axon.build(model)
    +init_fn.(Nx.template({1, 1}, {:f, 32}), %{"dense_0" => dense_params})

    predict_fn

    The predict_fn receives two arguments, the trained parameters -and the actual inputs:

    {_init_fn, predict_fn} = Axon.build(model, opts)
    -predict_fn.(params, input)

    +and the actual inputs:

    {_init_fn, predict_fn} = Axon.build(model, opts)
    +predict_fn.(params, input)

    @@ -4350,7 +4282,7 @@

    build(model, opts \\ [])

    compile(model, template, init_params \\ %{}, opts \\ [])

    - + View Source @@ -4375,49 +4307,6 @@

    compile(model, template, init_params \\ %{}

    It accepts the same options as build/2.

    -
    - - - -
    - - - Link to this function - -

    deserialize(serialized, opts \\ [])

    - - - - View Source - - - -
    - -
    - -

    Deserializes serialized model and parameters into a {model, params} -tuple.

    It is the opposite of Axon.serialize/3.

    - - - - Examples -

    -
    iex> model = Axon.input("input", shape: {nil, 2}) |> Axon.dense(1, kernel_initializer: :zeros, activation: :relu)
    -iex> {init_fn, _} = Axon.build(model)
    -iex> params = init_fn.(Nx.template({1, 2}, :f32), %{})
    -iex> serialized = Axon.serialize(model, params)
    -iex> {saved_model, saved_params} = Axon.deserialize(serialized)
    -iex> {_, predict_fn} = Axon.build(saved_model)
    -iex> predict_fn.(saved_params, Nx.tensor([[1.0, 1.0]]))
    -#Nx.Tensor<
    -  f32[1][1]
    -  [
    -    [0.0]
    -  ]
    ->
    -
    -
    @@ -4429,7 +4318,7 @@

    deserialize(serialized, opts \\ [])

    freeze(model, fun_or_predicate \\ :all)

    - + View Source @@ -4437,6 +4326,10 @@

    freeze(model, fun_or_predicate \\ :all)

    +
    + This function is deprecated. Use Axon.ModelState.freeze/2 instead. +
    +

    Freezes parameters returned from the given function or predicate.

    fun can be a predicate :all, up: n, or down: n. :all @@ -4449,18 +4342,18 @@

    freeze(model, fun_or_predicate \\ :all)

    cnn_base = get_pretrained_cnn_base()
    +in code here:

    cnn_base = get_pretrained_cnn_base()
     model =
       cnn_base
    -  |> Axon.freeze()
    -  |> Axon.flatten()
    -  |> Axon.dense(1024, activation: :relu)
    -  |> Axon.dropout()
    -  |> Axon.dense(1000, activation: :softmax)
    +  |> Axon.freeze()
    +  |> Axon.flatten()
    +  |> Axon.dense(1024, activation: :relu)
    +  |> Axon.dropout()
    +  |> Axon.dense(1000, activation: :softmax)
     
     model
    -|> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.005))
    -|> Axon.Loop.run(data, epochs: 10)

    When compiled, frozen parameters are wrapped in Nx.Defn.Kernel.stop_grad/1, +|> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.005)) +|> Axon.Loop.run(data, epochs: 10)

    When compiled, frozen parameters are wrapped in Nx.Defn.Kernel.stop_grad/1, which zeros out the gradient with respect to the frozen parameter. Gradients of frozen parameters will return 0.0, meaning they won't be changed during the update process.

    @@ -4477,7 +4370,7 @@

    freeze(model, fun_or_predicate \\ :all)

    predict(model, params, input, opts \\ [])

    - + View Source @@ -4502,54 +4395,6 @@

    predict(model, params, input, opts \\ [])

    -
    - - - -
    - - - Link to this function - -

    serialize(axon, params, opts \\ [])

    - - - - View Source - - - -
    - -
    - -

    Serializes a model and its parameters for persisting -models to disk or elsewhere.

    Model and parameters are serialized as a tuple, where the -model is converted to a recursive map to ensure compatibility -with future Axon versions and the parameters are serialized -using Nx.serialize/2. There is some additional metadata included -such as current serialization version for compatibility.

    Serialization opts are forwarded to Nx.serialize/2 and -:erlang.term_to_binary/2 for controlling compression options.

    - - - - Examples -

    -
    iex> model = Axon.input("input", shape: {nil, 2}) |> Axon.dense(1, kernel_initializer: :zeros, activation: :relu)
    -iex> {init_fn, _} = Axon.build(model)
    -iex> params = init_fn.(Nx.template({1, 2}, :f32), %{})
    -iex> serialized = Axon.serialize(model, params)
    -iex> {saved_model, saved_params} = Axon.deserialize(serialized)
    -iex> {_, predict_fn} = Axon.build(saved_model)
    -iex> predict_fn.(saved_params, Nx.tensor([[1.0, 1.0]]))
    -#Nx.Tensor<
    -  f32[1][1]
    -  [
    -    [0.0]
    -  ]
    ->
    -
    -
    @@ -4561,7 +4406,7 @@

    serialize(axon, params, opts \\ [])

    unfreeze(model, fun_or_predicate \\ :all)

    - + View Source @@ -4569,6 +4414,10 @@

    unfreeze(model, fun_or_predicate \\ :all) +
    + This function is deprecated. Use Axon.ModelState.freeze/2 instead. +
    +

    Unfreezes parameters returned from the given function or predicate.

    fun can be a predicate :all, up: n, or down: n. :all @@ -4578,14 +4427,14 @@

    unfreeze(model, fun_or_predicate \\ :all)true if a parameter should be unfrozen or false otherwise.

    Unfreezing parameters is useful when fine tuning a model which you have previously frozen and performed transfer learning on. You may want to unfreeze some of the later frozen layers in a model and -fine tune them specifically for your application:

    cnn_base = get_pretrained_cnn_base()
    +fine tune them specifically for your application:

    cnn_base = get_pretrained_cnn_base()
     model =
       frozen_model
    -  |> Axon.unfreeze(up: 25)
    +  |> Axon.unfreeze(up: 25)
     
     model
    -|> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.0005))
    -|> Axon.Loop.run(data, epochs: 10)

    When compiled, frozen parameters are wrapped in Nx.Defn.Kernel.stop_grad/1, +|> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.0005)) +|> Axon.Loop.run(data, epochs: 10)

    When compiled, frozen parameters are wrapped in Nx.Defn.Kernel.stop_grad/1, which zeros out the gradient with respect to the frozen parameter. Gradients of frozen parameters will return 0.0, meaning they won't be changed during the update process.

    @@ -4612,7 +4461,7 @@

    get_inputs(axon)

    - + View Source @@ -4634,7 +4483,7 @@

    get_inputs(axon)

    get_op_counts(axon)

    - + View Source @@ -4651,13 +4500,13 @@

    get_op_counts(axon)

    Examples

    -
    iex> model = Axon.input("input", shape: {nil, 1}) |> Axon.dense(2)
    -iex> Axon.get_op_counts(model)
    -%{input: 1, dense: 1}
    +
    iex> model = Axon.input("input", shape: {nil, 1}) |> Axon.dense(2)
    +iex> Axon.get_op_counts(model)
    +%{input: 1, dense: 1}
     
    -iex> model = Axon.input("input", shape: {nil, 1}) |> Axon.tanh() |> Axon.tanh()
    -iex> Axon.get_op_counts(model)
    -%{input: 1, tanh: 2}
    +
    iex> model = Axon.input("input", shape: {nil, 1}) |> Axon.tanh() |> Axon.tanh() +iex> Axon.get_op_counts(model) +%{input: 1, tanh: 2}
    @@ -4669,7 +4518,7 @@

    get_op_counts(axon)

    get_options(axon)

    - + View Source @@ -4695,7 +4544,7 @@

    get_options(axon)

    get_output_shape(axon, inputs, opts \\ [])

    - + View Source @@ -4718,7 +4567,7 @@

    get_output_shape(axon, inputs, opts \\ [])<

    get_parameters(axon)

    - + View Source @@ -4742,7 +4591,7 @@

    get_parameters(axon)

    map_nodes(axon, fun)

    - + View Source @@ -4766,24 +4615,24 @@

    map_nodes(axon, fun)

    instrumentation between layers without needing to build a new explicitly instrumented version of a model. For example, you can use this function to visualize intermediate activations -of all convolutional layers in a model:

    instrumented_model = Axon.map_nodes(model, fn
    -  %Axon.Node{op: :conv} = axon_node ->
    -    Axon.attach_hook(axon_node, &visualize_activations/1)
    +of all convolutional layers in a model:

    instrumented_model = Axon.map_nodes(model, fn
    +  %Axon.Node{op: :conv} = axon_node ->
    +    Axon.attach_hook(axon_node, &visualize_activations/1)
     
       axon_node ->
         axon_node
    -end)

    Another use case is to replace entire classes of layers +end)

    Another use case is to replace entire classes of layers with another. For example, you may want to replace all -relu layers with tanh layers:

    new_model = Axon.map_nodes(model, fn
    -  %Axon{op: :relu} = graph ->
    +relu layers with tanh layers:

    new_model = Axon.map_nodes(model, fn
    +  %Axon{op: :relu} = graph ->
         # Get nodes immediate parent
    -    parent = Axon.get_parent(graph)
    +    parent = Axon.get_parent(graph)
         # Replace node with a tanh
    -    Axon.tanh(parent)
    +    Axon.tanh(parent)
     
       graph ->
         graph
    -end)
    +
    end)
    @@ -4795,7 +4644,7 @@

    map_nodes(axon, fun)

    pop_node(axon)

    - + View Source @@ -4805,7 +4654,7 @@

    pop_node(axon)

    -

    Pops the top node off of the graph.

    This returns the popped node and the updated graph:

    {_node, model} = Axon.pop_node(model)
    +

    Pops the top node off of the graph.

    This returns the popped node and the updated graph:

    {_node, model} = Axon.pop_node(model)
    @@ -4817,7 +4666,7 @@

    pop_node(axon)

    reduce_nodes(axon, acc, fun)

    - + View Source @@ -4839,10 +4688,10 @@

    reduce_nodes(axon, acc, fun)

    Internally this function is used in several places to accumulate graph metadata. For example, you can use it to count the number -of a certain type of operation in the graph:

    Axon.reduce_nodes(model, 0, fn
    -  %Axon.Nodes{op: :relu}, acc -> acc + 1
    +of a certain type of operation in the graph:

    Axon.reduce_nodes(model, 0, fn
    +  %Axon.Nodes{op: :relu}, acc -> acc + 1
       _, acc -> acc
    -end)
    +
    end)
    @@ -4854,7 +4703,7 @@

    reduce_nodes(axon, acc, fun)

    set_options(axon, new_opts)

    - + View Source @@ -4881,7 +4730,7 @@

    set_options(axon, new_opts)

    set_parameters(axon, new_params)

    - + View Source @@ -4921,7 +4770,7 @@

    attach_hook(x, fun, opts \\ [])

    - + View Source @@ -4934,20 +4783,20 @@

    attach_hook(x, fun, opts \\ [])

    Attaches a hook to the given Axon model.

    Hooks compile down to Nx.Defn.Kernel.hook/3 and provide the same functionality for adding side-effecting operations to a compiled model. For example, you can use hooks to inspect intermediate activations, -send data to an external service, and more.

    Hooks can be configured to be invoked on the following events:

    • :initialize - on model initialization.
    • :pre_forward - before layer forward pass is invoked.
    • :forward - after layer forward pass is invoked.
    • :backward - after layer backward pass is invoked.

    To invoke a hook on every single event, you may pass :all to on:.

    Axon.input("input", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :all)

    The default event is :forward, assuming you want a hook invoked +send data to an external service, and more.

    Hooks can be configured to be invoked on the following events:

    • :initialize - on model initialization.
    • :pre_forward - before layer forward pass is invoked.
    • :forward - after layer forward pass is invoked.
    • :backward - after layer backward pass is invoked.

    To invoke a hook on every single event, you may pass :all to on:.

    Axon.input("input", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :all)

    The default event is :forward, assuming you want a hook invoked on the layers forward pass.

    You may configure hooks to run in one of only training or inference mode using the :mode option. The default mode is :both to be invoked -during both train and inference mode.

    Axon.input("input", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)

    You can also attach multiple hooks to a single layer. Hooks are invoked in +during both train and inference mode.

    Axon.input("input", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)

    You can also attach multiple hooks to a single layer. Hooks are invoked in the order in which they are declared. If order is important, you should attach -hooks in the order you want them to be executed:

    Axon.input("input", shape: {nil, 1})
    +hooks in the order you want them to be executed:

    Axon.input("input", shape: {nil, 1})
     # I will be executed first
    -|> Axon.attach_hook(&IO.inspect/1)
    +|> Axon.attach_hook(&IO.inspect/1)
     # I will be executed second
    -|> Axon.attach_hook(fn _ -> IO.write("HERE") end)

    Hooks are executed at their point of attachment. You must insert hooks at each point -you want a hook to execute during model execution.

    Axon.input("input", shape: {nil, 1})
    -|> Axon.attach_hook(&IO.inspect/1)
    -|> Axon.relu()
    -|> Axon.attach_hook(&IO.inspect/1)
    +
    |> Axon.attach_hook(fn _ -> IO.write("HERE") end)

    Hooks are executed at their point of attachment. You must insert hooks at each point +you want a hook to execute during model execution.

    Axon.input("input", shape: {nil, 1})
    +|> Axon.attach_hook(&IO.inspect/1)
    +|> Axon.relu()
    +|> Axon.attach_hook(&IO.inspect/1)
    @@ -4961,7 +4810,7 @@

    attach_hook(x, fun, opts \\ [])

    trace_backward(model, inputs, params, loss, opts \\ [])

    - + View Source @@ -4998,7 +4847,7 @@

    trace_backward(model, inputs, params, loss,

    trace_forward(model, inputs, params, opts \\ [])

    - + View Source @@ -5037,7 +4886,7 @@

    trace_forward(model, inputs, params, opts \

    trace_init(model, template, params \\ %{}, opts \\ [])

    - + View Source @@ -5051,7 +4900,7 @@

    trace_init(model, template, params \\ %{}, expression with the given options.

    The returned expression is an Nx expression which can be traversed and lowered to an IR or inspected for debugging purposes.

    You may optionally specify initial parameters for some layers or -namespaces by passing a partial parameter map:

    Axon.trace_init(model, %{"dense_0" => dense_params})

    The parameter map will be merged with the initialized model +namespaces by passing a partial parameter map:

    Axon.trace_init(model, %{"dense_0" => dense_params})

    The parameter map will be merged with the initialized model parameters.

    @@ -5084,7 +4933,7 @@

    t()

    - + View Source @@ -5126,7 +4975,7 @@

    bidirectional(input, forward_fun, merge_fun, opts \\ [])

    - + View Source @@ -5158,7 +5007,7 @@

    bidirectional(input, forward_fun, merge_fun

    blur_pool(x, opts \\ [])

    - + View Source diff --git a/accelerating_axon.html b/accelerating_axon.html index c0dcc0ef..d399e1c3 100644 --- a/accelerating_axon.html +++ b/accelerating_axon.html @@ -14,7 +14,7 @@ - + @@ -136,103 +136,103 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"},
    -  {:exla, ">= 0.5.0"},
    -  {:torchx, ">= 0.5.0"},
    -  {:benchee, "~> 1.1"},
    -  {:kino, ">= 0.9.0", override: true}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"},
    +  {:exla, ">= 0.5.0"},
    +  {:torchx, ">= 0.5.0"},
    +  {:benchee, "~> 1.1"},
    +  {:kino, ">= 0.9.0", override: true}
    +])
    :ok

    Using Nx Backends in Axon

    Nx provides two mechanisms for accelerating your neural networks: backends and compilers. Before we learn how to effectively use them, first let's create a simple model for benchmarking purposes:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(32)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    -  |> Axon.softmax()
    #Axon<
    -  inputs: %{"data" => nil}
    +  Axon.input("data")
    +  |> Axon.dense(32)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    +  |> Axon.softmax()
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "softmax_0"
       nodes: 5
    ->

    Backends are where your tensors (your neural network inputs and parameters) are located. By default, Nx and Axon run all computations using the Nx.BinaryBackend which is a pure Elixir implementation of various numerical routines. The Nx.BinaryBackend is guaranteed to run wherever an Elixir installation runs; however, it is very slow. Due to the computational expense of neural networks, you should basically never use the Nx.BinaryBackend and instead opt for one of the available accelerated libraries. At the time of writing, Nx officially supports two of them:

    1. EXLA - Acceleration via Google's XLA project
    2. TorchX - Bindings to LibTorch

    Axon will respect the global and process-level Nx backend configuration. Compilers are covered more in-depth in the second half of this example. You can set the default backend using the following APIs:

    # Sets the global compilation options (for all Elixir processes)
    -Nx.global_default_backend(Torchx.Backend)
    +>

    Backends are where your tensors (your neural network inputs and parameters) are located. By default, Nx and Axon run all computations using the Nx.BinaryBackend which is a pure Elixir implementation of various numerical routines. The Nx.BinaryBackend is guaranteed to run wherever an Elixir installation runs; however, it is very slow. Due to the computational expense of neural networks, you should basically never use the Nx.BinaryBackend and instead opt for one of the available accelerated libraries. At the time of writing, Nx officially supports two of them:

    1. EXLA - Acceleration via Google's XLA project
    2. TorchX - Bindings to LibTorch

    Axon will respect the global and process-level Nx backend configuration. Compilers are covered more in-depth in the second half of this example. You can set the default backend using the following APIs:

    # Sets the global compilation options (for all Elixir processes)
    +Nx.global_default_backend(Torchx.Backend)
     # OR
    -Nx.global_default_backend(EXLA.Backend)
    +Nx.global_default_backend(EXLA.Backend)
     
     # Sets the process-level compilation options (current process only)
    -Nx.default_backend(Torchx.Backend)
    +Nx.default_backend(Torchx.Backend)
     # OR
    -Nx.default_backend(EXLA.Backend)

    Now all tensors and operations on them will run on the configured backend:

    {inputs, _next_key} =
    -  Nx.Random.key(9999)
    -  |> Nx.Random.uniform(shape: {2, 128})
    -
    -{init_fn, predict_fn} = Axon.build(model)
    -params = init_fn.(inputs, %{})
    -predict_fn.(params, inputs)
    #Nx.Tensor<
    -  EXLA.Backend<cuda:0, 0.3278685746.4275961901.179470>
    -  f32[2][1]
    -  [
    -    [1.0],
    -    [1.0]
    -  ]
    +Nx.default_backend(EXLA.Backend)

    Now all tensors and operations on them will run on the configured backend:

    {inputs, _next_key} =
    +  Nx.Random.key(9999)
    +  |> Nx.Random.uniform(shape: {2, 128})
    +
    +{init_fn, predict_fn} = Axon.build(model)
    +params = init_fn.(inputs, %{})
    +predict_fn.(params, inputs)
    #Nx.Tensor<
    +  EXLA.Backend<cuda:0, 0.3278685746.4275961901.179470>
    +  f32[2][1]
    +  [
    +    [1.0],
    +    [1.0]
    +  ]
     >

    As you swap backends above, you will get tensors allocated on different backends as results. You should be careful using multiple backends in the same project as attempting to mix tensors between backends may result in strange performance bugs or errors, as Nx will require you to explicitly convert between backends.

    With most larger models, using a compiler will bring more performance benefits in addition to the backend.

    Using Nx Compilers in Axon

    -

    Axon is built entirely on top of Nx's numerical definitions defn. Functions declared with defn tell Nx to use just-in-time compilation to compile and execute the given numerical definition with an available Nx compiler. Numerical definitions enable acceleration on CPU/GPU/TPU via pluggable compilers. At the time of this writing, only EXLA supports a compiler in addition to its backend.

    When you call Axon.build/2, Axon can automatically mark your initialization and forward functions as JIT compiled functions. First let's make sure we are using the EXLA backend:

    Nx.default_backend(EXLA.Backend)

    And now let's build another model, this time passing the EXLA compiler as an option:

    {inputs, _next_key} =
    -  Nx.Random.key(9999)
    -  |> Nx.Random.uniform(shape: {2, 128})
    +

    Axon is built entirely on top of Nx's numerical definitions defn. Functions declared with defn tell Nx to use just-in-time compilation to compile and execute the given numerical definition with an available Nx compiler. Numerical definitions enable acceleration on CPU/GPU/TPU via pluggable compilers. At the time of this writing, only EXLA supports a compiler in addition to its backend.

    When you call Axon.build/2, Axon can automatically mark your initialization and forward functions as JIT compiled functions. First let's make sure we are using the EXLA backend:

    Nx.default_backend(EXLA.Backend)

    And now let's build another model, this time passing the EXLA compiler as an option:

    {inputs, _next_key} =
    +  Nx.Random.key(9999)
    +  |> Nx.Random.uniform(shape: {2, 128})
     
    -{init_fn, predict_fn} = Axon.build(model, compiler: EXLA)
    -params = init_fn.(inputs, %{})
    -predict_fn.(params, inputs)
    
    -15:39:26.463 [info] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
    +{init_fn, predict_fn} = Axon.build(model, compiler: EXLA)
    +params = init_fn.(inputs, %{})
    +predict_fn.(params, inputs)
    
    +15:39:26.463 [info] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
     
    -15:39:26.473 [info] XLA service 0x7f3488329030 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
    +15:39:26.473 [info] XLA service 0x7f3488329030 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:
     
    -15:39:26.473 [info]   StreamExecutor device (0): NVIDIA GeForce RTX 3050 Ti Laptop GPU, Compute Capability 8.6
    +15:39:26.473 [info]   StreamExecutor device (0): NVIDIA GeForce RTX 3050 Ti Laptop GPU, Compute Capability 8.6
     
    -15:39:26.473 [info] Using BFC allocator.
    +15:39:26.473 [info] Using BFC allocator.
     
    -15:39:26.473 [info] XLA backend allocating 3605004288 bytes on device 0 for BFCAllocator.
    +15:39:26.473 [info] XLA backend allocating 3605004288 bytes on device 0 for BFCAllocator.
     
    -15:39:28.272 [info] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.
    -
    #Nx.Tensor<
    -  f32[2][1]
    -  EXLA.Backend<cuda:0, 0.3278685746.4275699756.253533>
    -  [
    -    [1.0],
    -    [1.0]
    -  ]
    ->

    You can also instead JIT compile functions explicitly via the Nx.Defn.jit or compiler-specific JIT APIs. This is useful when running benchmarks against various backends:

    {init_fn, predict_fn} = Axon.build(model)
    +15:39:28.272 [info] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.
    +
    #Nx.Tensor<
    +  f32[2][1]
    +  EXLA.Backend<cuda:0, 0.3278685746.4275699756.253533>
    +  [
    +    [1.0],
    +    [1.0]
    +  ]
    +>

    You can also instead JIT compile functions explicitly via the Nx.Defn.jit or compiler-specific JIT APIs. This is useful when running benchmarks against various backends:

    {init_fn, predict_fn} = Axon.build(model)
     
     # These will both JIT compile with EXLA
    -exla_init_fn = Nx.Defn.jit(init_fn, compiler: EXLA)
    -exla_predict_fn = EXLA.jit(predict_fn)
    #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>
    Benchee.run(
    -  %{
    -    "elixir init" => fn -> init_fn.(inputs, %{}) end,
    -    "exla init" => fn -> exla_init_fn.(inputs, %{}) end
    -  },
    +exla_init_fn = Nx.Defn.jit(init_fn, compiler: EXLA)
    +exla_predict_fn = EXLA.jit(predict_fn)
    #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>
    Benchee.run(
    +  %{
    +    "elixir init" => fn -> init_fn.(inputs, %{}) end,
    +    "exla init" => fn -> exla_init_fn.(inputs, %{}) end
    +  },
       time: 10,
       memory_time: 5,
       warmup: 2
    -)
    Warning: the benchmark elixir init is using an evaluated function.
    +)
    Warning: the benchmark elixir init is using an evaluated function.
       Evaluated functions perform slower than compiled functions.
    -  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
    +  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
       Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs
     
     Warning: the benchmark exla init is using an evaluated function.
       Evaluated functions perform slower than compiled functions.
    -  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
    +  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
       Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs
     
     Operating System: Linux
    -CPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz
    +CPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz
     Number of Available Cores: 4
     Available memory: 24.95 GB
     Elixir 1.13.4
    @@ -264,26 +264,26 @@ 

    exla init 9.80 KB elixir init 644.63 KB - 65.80x memory usage +634.83 KB -**All measurements for memory usage were the same**

    Benchee.run(
    -  %{
    -    "elixir predict" => fn -> predict_fn.(params, inputs) end,
    -    "exla predict" => fn -> exla_predict_fn.(params, inputs) end
    -  },
    +**All measurements for memory usage were the same**
    Benchee.run(
    +  %{
    +    "elixir predict" => fn -> predict_fn.(params, inputs) end,
    +    "exla predict" => fn -> exla_predict_fn.(params, inputs) end
    +  },
       time: 10,
       memory_time: 5,
       warmup: 2
    -)
    Warning: the benchmark elixir predict is using an evaluated function.
    +)
    Warning: the benchmark elixir predict is using an evaluated function.
       Evaluated functions perform slower than compiled functions.
    -  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
    +  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
       Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs
     
     Warning: the benchmark exla predict is using an evaluated function.
       Evaluated functions perform slower than compiled functions.
    -  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
    +  You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.
       Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs
     
     Operating System: Linux
    -CPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz
    +CPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz
     Number of Available Cores: 4
     Available memory: 24.95 GB
     Elixir 1.13.4
    diff --git a/api-reference.html b/api-reference.html
    index 2e3c45f1..ccc90c05 100644
    --- a/api-reference.html
    +++ b/api-reference.html
    @@ -14,7 +14,7 @@
     
         
         
    -    
    +    
     
           
     
    @@ -248,6 +248,15 @@ 

    Utilities for creating mixed precision policies.

    + +
    + + +

    Model State Data Structure.

    +
    diff --git a/complex_models.html b/complex_models.html index 7c0c3c28..01c5ab14 100644 --- a/complex_models.html +++ b/complex_models.html @@ -14,7 +14,7 @@ - + @@ -136,26 +136,26 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"},
    -  {:kino, ">= 0.9.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"},
    +  {:kino, ">= 0.9.0"}
    +])
    :ok

    Creating more complex models

    -

    Not all models you'd want to create fit cleanly in the sequential paradigm. Some models require a more flexible API. Fortunately, because Axon models are just Elixir data structures, you can manipulate them and decompose architectures as you would any other Elixir program:

    input = Axon.input("data")
    +

    Not all models you'd want to create fit cleanly in the sequential paradigm. Some models require a more flexible API. Fortunately, because Axon models are just Elixir data structures, you can manipulate them and decompose architectures as you would any other Elixir program:

    input = Axon.input("data")
     
    -x1 = input |> Axon.dense(32)
    -x2 = input |> Axon.dense(64) |> Axon.relu() |> Axon.dense(32)
    +x1 = input |> Axon.dense(32)
    +x2 = input |> Axon.dense(64) |> Axon.relu() |> Axon.dense(32)
     
    -out = Axon.add(x1, x2)
    #Axon<
    -  inputs: %{"data" => nil}
    +out = Axon.add(x1, x2)
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "add_0"
       nodes: 7
    ->

    In the snippet above, your model branches input into x1 and x2. Each branch performs a different set of transformations; however, at the end the branches are merged with an Axon.add/3. You might sometimes see layers like Axon.add/3 called combinators. Really they're just layers that operate on multiple Axon models at once - typically to merge some branches together.

    out represents your final Axon model.

    If you visualize this model, you can see the full effect of the branching in this model:

    template = Nx.template({2, 8}, :f32)
    -Axon.Display.as_graph(out, template)
    graph TD;
    +>

    In the snippet above, your model branches input into x1 and x2. Each branch performs a different set of transformations; however, at the end the branches are merged with an Axon.add/3. You might sometimes see layers like Axon.add/3 called combinators. Really they're just layers that operate on multiple Axon models at once - typically to merge some branches together.

    out represents your final Axon model.

    If you visualize this model, you can see the full effect of the branching in this model:

    template = Nx.template({2, 8}, :f32)
    +Axon.Display.as_graph(out, template)
    graph TD;
     3[/"data (:input) {2, 8}"/];
     4["dense_0 (:dense) {2, 32}"];
     5["dense_1 (:dense) {2, 64}"];
    @@ -169,43 +169,43 @@ 

    6 --> 7; 5 --> 6; 3 --> 5; -3 --> 4;

    And you can use Axon.build/2 on out as you would any other Axon model:

    {init_fn, predict_fn} = Axon.build(out)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    - #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}
    params = init_fn.(template, %{})
    -predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    #Nx.Tensor<
    -  f32[2][32]
    -  [
    -    [-4.283246040344238, 1.8983498811721802, 3.697357654571533, -4.720174789428711, 4.1636152267456055, 1.001131534576416, -0.7027540802955627, -3.7821826934814453, 0.027841567993164062, 9.267499923706055, 3.33616304397583, -1.5465859174728394, 8.983413696289062, 3.7445120811462402, 2.2405576705932617, -3.61336350440979, -1.7320983409881592, 0.5740477442741394, -0.22006472945213318, -0.1806044578552246, 1.1092393398284912, -0.29313594102859497, -0.41948509216308594, 3.526411533355713, -0.9127179384231567, 1.8373844623565674, 1.1746022701263428, -0.6885149478912354, -1.4326229095458984, -1.3498257398605347, -5.803186416625977, 1.5204020738601685],
    -    [-15.615742683410645, 6.555544853210449, 7.033155918121338, -12.33556842803955, 14.105436325073242, -4.230871200561523, 5.985136032104492, -8.445676803588867, 5.383096694946289, 23.413570404052734, 0.8907639980316162, -1.400709629058838, 19.19326400756836, 13.784171104431152, 9.641424179077148, -8.407038688659668, -5.688483238220215, 4.383636474609375, ...]
    -  ]
    ->

    As your architectures grow in complexity, you might find yourself reaching for better abstractions to organize your model creation code. For example, PyTorch models are often organized into nn.Module. The equivalent of an nn.Module in Axon is a regular Elixir function. If you're translating models from PyTorch to Axon, it's natural to create one Elixir function per nn.Module.

    You should write your models as you would write any other Elixir code - you don't need to worry about any framework specific constructs:

    defmodule MyModel do
    -  def model() do
    -    Axon.input("data")
    -    |> conv_block()
    -    |> Axon.flatten()
    -    |> dense_block()
    -    |> dense_block()
    -    |> Axon.dense(1)
    -  end
    -
    -  defp conv_block(input) do
    +3 --> 4;

    And you can use Axon.build/2 on out as you would any other Axon model:

    {init_fn, predict_fn} = Axon.build(out)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    + #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}
    params = init_fn.(template, %{})
    +predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    #Nx.Tensor<
    +  f32[2][32]
    +  [
    +    [-4.283246040344238, 1.8983498811721802, 3.697357654571533, -4.720174789428711, 4.1636152267456055, 1.001131534576416, -0.7027540802955627, -3.7821826934814453, 0.027841567993164062, 9.267499923706055, 3.33616304397583, -1.5465859174728394, 8.983413696289062, 3.7445120811462402, 2.2405576705932617, -3.61336350440979, -1.7320983409881592, 0.5740477442741394, -0.22006472945213318, -0.1806044578552246, 1.1092393398284912, -0.29313594102859497, -0.41948509216308594, 3.526411533355713, -0.9127179384231567, 1.8373844623565674, 1.1746022701263428, -0.6885149478912354, -1.4326229095458984, -1.3498257398605347, -5.803186416625977, 1.5204020738601685],
    +    [-15.615742683410645, 6.555544853210449, 7.033155918121338, -12.33556842803955, 14.105436325073242, -4.230871200561523, 5.985136032104492, -8.445676803588867, 5.383096694946289, 23.413570404052734, 0.8907639980316162, -1.400709629058838, 19.19326400756836, 13.784171104431152, 9.641424179077148, -8.407038688659668, -5.688483238220215, 4.383636474609375, ...]
    +  ]
    +>

    As your architectures grow in complexity, you might find yourself reaching for better abstractions to organize your model creation code. For example, PyTorch models are often organized into nn.Module. The equivalent of an nn.Module in Axon is a regular Elixir function. If you're translating models from PyTorch to Axon, it's natural to create one Elixir function per nn.Module.

    You should write your models as you would write any other Elixir code - you don't need to worry about any framework specific constructs:

    defmodule MyModel do
    +  def model() do
    +    Axon.input("data")
    +    |> conv_block()
    +    |> Axon.flatten()
    +    |> dense_block()
    +    |> dense_block()
    +    |> Axon.dense(1)
    +  end
    +
    +  defp conv_block(input) do
         residual = input
     
    -    x = input |> Axon.conv(3, padding: :same) |> Axon.mish()
    +    x = input |> Axon.conv(3, padding: :same) |> Axon.mish()
     
         x
    -    |> Axon.add(residual)
    -    |> Axon.max_pool(kernel_size: {2, 2})
    -  end
    -
    -  defp dense_block(input) do
    -    input |> Axon.dense(32) |> Axon.relu()
    -  end
    -end
    {:module, MyModel, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:dense_block, 1}}
    model = MyModel.model()
    #Axon<
    -  inputs: %{"data" => nil}
    +    |> Axon.add(residual)
    +    |> Axon.max_pool(kernel_size: {2, 2})
    +  end
    +
    +  defp dense_block(input) do
    +    input |> Axon.dense(32) |> Axon.relu()
    +  end
    +end
    {:module, MyModel, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:dense_block, 1}}
    model = MyModel.model()
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "dense_2"
       nodes: 12
    ->
    template = Nx.template({1, 28, 28, 3}, :f32)
    -Axon.Display.as_graph(model, template)
    graph TD;
    +>
    template = Nx.template({1, 28, 28, 3}, :f32)
    +Axon.Display.as_graph(model, template)
    graph TD;
     10[/"data (:input) {1, 28, 28, 3}"/];
     11["conv_0 (:conv) {1, 28, 28, 3}"];
     12["mish_0 (:mish) {1, 28, 28, 3}"];
    diff --git a/credit_card_fraud.html b/credit_card_fraud.html
    index 9bcc5d04..69993efc 100644
    --- a/credit_card_fraud.html
    +++ b/credit_card_fraud.html
    @@ -14,7 +14,7 @@
     
         
         
    -    
    +    
     
           
     
    @@ -136,18 +136,18 @@ 

    -
    Mix.install([
    -  {:axon, "~> 0.3.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:exla, "~> 0.4.0"},
    -  {:explorer, "~> 0.3.1"},
    -  {:kino, "~> 0.7.0"}
    -])
    +
    Mix.install([
    +  {:axon, "~> 0.3.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:exla, "~> 0.4.0"},
    +  {:explorer, "~> 0.3.1"},
    +  {:kino, "~> 0.7.0"}
    +])
     
    -Nx.Defn.default_options(compiler: EXLA)
    -Nx.global_default_backend(EXLA.Backend)
    +Nx.Defn.default_options(compiler: EXLA)
    +Nx.global_default_backend(EXLA.Backend)
     
    -alias Explorer.{DataFrame, Series}

    +alias Explorer.{DataFrame, Series}

    @@ -159,58 +159,58 @@

    Data processing

    -

    The first step is to prepare the data for training and evaluation. Please download the dataset in the CSV format from https://www.kaggle.com/mlg-ulb/creditcardfraud (this requires a Kaggla account). Once done, put the file path in the input below.

    data_path_input = Kino.Input.text("Data path (CSV)")

    Now, let's read the data into an Explorer.Dataframe:

    data_path = Kino.Input.read(data_path_input)
    +

    The first step is to prepare the data for training and evaluation. Please download the dataset in the CSV format from https://www.kaggle.com/mlg-ulb/creditcardfraud (this requires a Kaggla account). Once done, put the file path in the input below.

    data_path_input = Kino.Input.text("Data path (CSV)")

    Now, let's read the data into an Explorer.Dataframe:

    data_path = Kino.Input.read(data_path_input)
     
    -df = DataFrame.from_csv!(data_path, dtypes: [{"Time", :float}])

    For further processing, we will need a couple helper functions. We will group them in a module for convenience.

    defmodule CredidCard.Data do
    +df = DataFrame.from_csv!(data_path, dtypes: [{"Time", :float}])

    For further processing, we will need a couple helper functions. We will group them in a module for convenience.

    defmodule CredidCard.Data do
       import Nx.Defn
     
    -  def split_train_test(df, portion) do
    -    num_examples = DataFrame.n_rows(df)
    -    num_train = ceil(portion * num_examples)
    +  def split_train_test(df, portion) do
    +    num_examples = DataFrame.n_rows(df)
    +    num_train = ceil(portion * num_examples)
         num_test = num_examples - num_train
     
    -    train = DataFrame.slice(df, 0, num_train)
    -    test = DataFrame.slice(df, num_train, num_test)
    -    {train, test}
    -  end
    +    train = DataFrame.slice(df, 0, num_train)
    +    test = DataFrame.slice(df, num_train, num_test)
    +    {train, test}
    +  end
     
    -  def split_features_targets(df) do
    -    features = DataFrame.select(df, &(&1 == "Class"), :drop)
    -    targets = DataFrame.select(df, &(&1 == "Class"), :keep)
    -    {features, targets}
    -  end
    +  def split_features_targets(df) do
    +    features = DataFrame.select(df, &(&1 == "Class"), :drop)
    +    targets = DataFrame.select(df, &(&1 == "Class"), :keep)
    +    {features, targets}
    +  end
     
    -  def df_to_tensor(df) do
    +  def df_to_tensor(df) do
         df
    -    |> DataFrame.names()
    -    |> Enum.map(&Series.to_tensor(df[&1]))
    -    |> Nx.stack(axis: 1)
    -  end
    +    |> DataFrame.names()
    +    |> Enum.map(&Series.to_tensor(df[&1]))
    +    |> Nx.stack(axis: 1)
    +  end
     
    -  defn normalize_features(tensor) do
    +  defn normalize_features(tensor) do
         max =
           tensor
    -      |> Nx.abs()
    -      |> Nx.reduce_max(axes: [0], keep_axes: true)
    +      |> Nx.abs()
    +      |> Nx.reduce_max(axes: [0], keep_axes: true)
     
         tensor / max
    -  end
    -end

    With that, we can start converting the data into the desired format. First, we split the data into training and test data (in proportion 80% into a training set and 20% into a test set).

    {train_df, test_df} = CredidCard.Data.split_train_test(df, 0.8)
    -{DataFrame.n_rows(train_df), DataFrame.n_rows(test_df)}

    Next, we separate features from labels and convert both to tensors. In case of features we additionally normalize each of them, dividing by the maximum absolute value of that feature.

    {train_features, train_targets} = CredidCard.Data.split_features_targets(train_df)
    -{test_features, test_targets} = CredidCard.Data.split_features_targets(test_df)
    +  end
    +end

    With that, we can start converting the data into the desired format. First, we split the data into training and test data (in proportion 80% into a training set and 20% into a test set).

    {train_df, test_df} = CredidCard.Data.split_train_test(df, 0.8)
    +{DataFrame.n_rows(train_df), DataFrame.n_rows(test_df)}

    Next, we separate features from labels and convert both to tensors. In case of features we additionally normalize each of them, dividing by the maximum absolute value of that feature.

    {train_features, train_targets} = CredidCard.Data.split_features_targets(train_df)
    +{test_features, test_targets} = CredidCard.Data.split_features_targets(test_df)
     
     train_inputs =
       train_features
    -  |> CredidCard.Data.df_to_tensor()
    -  |> CredidCard.Data.normalize_features()
    +  |> CredidCard.Data.df_to_tensor()
    +  |> CredidCard.Data.normalize_features()
     
     test_inputs =
       test_features
    -  |> CredidCard.Data.df_to_tensor()
    -  |> CredidCard.Data.normalize_features()
    +  |> CredidCard.Data.df_to_tensor()
    +  |> CredidCard.Data.normalize_features()
     
    -train_targets = CredidCard.Data.df_to_tensor(train_targets)
    -test_targets = CredidCard.Data.df_to_tensor(test_targets)
    +train_targets = CredidCard.Data.df_to_tensor(train_targets)
    +test_targets = CredidCard.Data.df_to_tensor(test_targets)
     
     :ok

    @@ -219,43 +219,43 @@

    Building the model

    Our model for predicting whether a transaction was fraudulent or not is a dense neural network. It consists of two dense layers with 256 neurons, ReLU activation functions, one dropout layer, and a dense layer with one neuron (since the problem is a binary prediction) followed by a sigmoid activation function.

    model =
    -  Axon.input("input")
    -  |> Axon.dense(256)
    -  |> Axon.relu()
    -  |> Axon.dense(256)
    -  |> Axon.relu()
    -  |> Axon.dropout(rate: 0.3)
    -  |> Axon.dense(1)
    -  |> Axon.sigmoid()

    + Axon.input("input") + |> Axon.dense(256) + |> Axon.relu() + |> Axon.dense(256) + |> Axon.relu() + |> Axon.dropout(rate: 0.3) + |> Axon.dense(1) + |> Axon.sigmoid()

    Training our model

    -

    Now we have both data and model architecture prepared, it's time to train!

    Note the disproportion in the data samples:

    fraud = Nx.sum(train_targets) |> Nx.to_number()
    -legit = Nx.size(train_targets) - fraud
    +

    Now we have both data and model architecture prepared, it's time to train!

    Note the disproportion in the data samples:

    fraud = Nx.sum(train_targets) |> Nx.to_number()
    +legit = Nx.size(train_targets) - fraud
     
    -batched_train_inputs = Nx.to_batched(train_inputs, 2048)
    -batched_train_targets = Nx.to_batched(train_targets, 2048)
    -batched_train = Stream.zip(batched_train_inputs, batched_train_targets)
    +batched_train_inputs = Nx.to_batched(train_inputs, 2048)
    +batched_train_targets = Nx.to_batched(train_targets, 2048)
    +batched_train = Stream.zip(batched_train_inputs, batched_train_targets)
     
    -IO.puts("# of legit transactions (train): #{legit}")
    -IO.puts("# of fraudulent transactions (train): #{fraud}")
    -IO.puts("% fraudlent transactions (train): #{100 * (fraud / (legit + fraud))}%")

    As always, we define our train loop. We are using binary cross-entropy as our loss function and Adam as the optimizer with a learning rate of 0.01. Then we immediately start the training passing our train portion of the dataset.

    loss =
    -  &Axon.Losses.binary_cross_entropy(
    +IO.puts("# of legit transactions (train): #{legit}")
    +IO.puts("# of fraudulent transactions (train): #{fraud}")
    +IO.puts("% fraudlent transactions (train): #{100 * (fraud / (legit + fraud))}%")

    As always, we define our train loop. We are using binary cross-entropy as our loss function and Adam as the optimizer with a learning rate of 0.01. Then we immediately start the training passing our train portion of the dataset.

    loss =
    +  &Axon.Losses.binary_cross_entropy(
         &1,
         &2,
         negative_weight: 1 / legit,
         positive_weight: 1 / fraud,
         reduction: :mean
    -  )
    +  )
     
    -optimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-2)
    +optimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-2)
     
     params =
       model
    -  |> Axon.Loop.trainer(loss, optimizer)
    -  |> Axon.Loop.run(batched_train, %{}, epochs: 30, compiler: EXLA)
    +  |> Axon.Loop.trainer(loss, optimizer)
    +  |> Axon.Loop.run(batched_train, %{}, epochs: 30, compiler: EXLA)
     
     :ok

    @@ -263,39 +263,39 @@

    Model evaluation

    -

    After the training, there is only one thing left: testing. Here, we will focus on the number of true positive, true negative, false positive, and false negative values, but also on the likelihood of denying legit and fraudulent transactions.

    batched_test_inputs = Nx.to_batched(test_inputs, 2048)
    -batched_test_targets = Nx.to_batched(test_targets, 2048)
    -batched_test = Stream.zip(batched_test_inputs, batched_test_targets)
    -
    -summarize = fn %Axon.Loop.State{metrics: metrics} = state ->
    -  legit_transactions_declined = Nx.to_number(metrics["fp"])
    -  legit_transactions_accepted = Nx.to_number(metrics["tn"])
    -  fraud_transactions_accepted = Nx.to_number(metrics["fn"])
    -  fraud_transactions_declined = Nx.to_number(metrics["tp"])
    +

    After the training, there is only one thing left: testing. Here, we will focus on the number of true positive, true negative, false positive, and false negative values, but also on the likelihood of denying legit and fraudulent transactions.

    batched_test_inputs = Nx.to_batched(test_inputs, 2048)
    +batched_test_targets = Nx.to_batched(test_targets, 2048)
    +batched_test = Stream.zip(batched_test_inputs, batched_test_targets)
    +
    +summarize = fn %Axon.Loop.State{metrics: metrics} = state ->
    +  legit_transactions_declined = Nx.to_number(metrics["fp"])
    +  legit_transactions_accepted = Nx.to_number(metrics["tn"])
    +  fraud_transactions_accepted = Nx.to_number(metrics["fn"])
    +  fraud_transactions_declined = Nx.to_number(metrics["tp"])
       total_fraud = fraud_transactions_declined + fraud_transactions_accepted
       total_legit = legit_transactions_declined + legit_transactions_accepted
     
    -  fraud_denial_percent = 100 * (fraud_transactions_declined / total_fraud)
    -  legit_denial_percent = 100 * (legit_transactions_declined / total_legit)
    +  fraud_denial_percent = 100 * (fraud_transactions_declined / total_fraud)
    +  legit_denial_percent = 100 * (legit_transactions_declined / total_legit)
     
    -  IO.write("\n")
    -  IO.puts("Legit Transactions Declined: #{legit_transactions_declined}")
    -  IO.puts("Fraudulent Transactions Caught: #{fraud_transactions_declined}")
    -  IO.puts("Fraudulent Transactions Missed: #{fraud_transactions_accepted}")
    -  IO.puts("Likelihood of catching fraud: #{fraud_denial_percent}%")
    -  IO.puts("Likelihood of denying legit transaction: #{legit_denial_percent}%")
    +  IO.write("\n")
    +  IO.puts("Legit Transactions Declined: #{legit_transactions_declined}")
    +  IO.puts("Fraudulent Transactions Caught: #{fraud_transactions_declined}")
    +  IO.puts("Fraudulent Transactions Missed: #{fraud_transactions_accepted}")
    +  IO.puts("Likelihood of catching fraud: #{fraud_denial_percent}%")
    +  IO.puts("Likelihood of denying legit transaction: #{legit_denial_percent}%")
     
    -  {:continue, state}
    -end
    +  {:continue, state}
    +end
     
     model
    -|> Axon.Loop.evaluator()
    -|> Axon.Loop.metric(:true_positives, "tp", :running_sum)
    -|> Axon.Loop.metric(:true_negatives, "tn", :running_sum)
    -|> Axon.Loop.metric(:false_positives, "fp", :running_sum)
    -|> Axon.Loop.metric(:false_negatives, "fn", :running_sum)
    -|> Axon.Loop.handle(:epoch_completed, summarize)
    -|> Axon.Loop.run(batched_test, params, compiler: EXLA)
    +|> Axon.Loop.evaluator()
    +|> Axon.Loop.metric(:true_positives, "tp", :running_sum)
    +|> Axon.Loop.metric(:true_negatives, "tn", :running_sum)
    +|> Axon.Loop.metric(:false_positives, "fp", :running_sum)
    +|> Axon.Loop.metric(:false_negatives, "fn", :running_sum)
    +|> Axon.Loop.handle(:epoch_completed, summarize)
    +|> Axon.Loop.run(batched_test, params, compiler: EXLA)
     
     :ok
    diff --git a/custom_layers.html b/custom_layers.html index 22d7cf97..36c9a5f0 100644 --- a/custom_layers.html +++ b/custom_layers.html @@ -14,7 +14,7 @@ - + @@ -136,103 +136,103 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"},
    -  {:kino, ">= 0.9.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"},
    +  {:kino, ">= 0.9.0"}
    +])
    :ok

    Creating custom layers

    -

    While Axon has a plethora of built-in layers, more than likely you'll run into a case where you need something not provided by the framework. In these instances, you can use custom layers.

    To Axon, layers are really just defn implementations with special Axon inputs. Every layer in Axon (including the built-in layers), are implemented with the Axon.layer/3 function. The API of Axon.layer/3 intentionally mirrors the API of Kernel.apply/2. To declare a custom layer you need 2 things:

    1. A defn implementation
    2. Inputs

    The defn implementation looks like any other defn you'd write; however, it must always account for additional opts as an argument:

    defmodule CustomLayers0 do
    +

    While Axon has a plethora of built-in layers, more than likely you'll run into a case where you need something not provided by the framework. In these instances, you can use custom layers.

    To Axon, layers are really just defn implementations with special Axon inputs. Every layer in Axon (including the built-in layers), are implemented with the Axon.layer/3 function. The API of Axon.layer/3 intentionally mirrors the API of Kernel.apply/2. To declare a custom layer you need 2 things:

    1. A defn implementation
    2. Inputs

    The defn implementation looks like any other defn you'd write; however, it must always account for additional opts as an argument:

    defmodule CustomLayers0 do
       import Nx.Defn
     
    -  defn my_layer(input, opts \\ []) do
    -    opts = keyword!(opts, mode: :train, alpha: 1.0)
    +  defn my_layer(input, opts \\ []) do
    +    opts = keyword!(opts, mode: :train, alpha: 1.0)
     
         input
    -    |> Nx.sin()
    -    |> Nx.multiply(opts[:alpha])
    -  end
    -end
    {:module, CustomLayers0, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}

    Regardless of the options you configure your layer to accept, the defn implementation will always receive a :mode option indicating whether or not the model is running in training or inference mode. You can customize the behavior of your layer depending on the mode.

    With an implementation defined, you need only to call Axon.layer/3 to apply our custom layer to an Axon input:

    input = Axon.input("data")
    +    |> Nx.sin()
    +    |> Nx.multiply(opts[:alpha])
    +  end
    +end
    {:module, CustomLayers0, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}

    Regardless of the options you configure your layer to accept, the defn implementation will always receive a :mode option indicating whether or not the model is running in training or inference mode. You can customize the behavior of your layer depending on the mode.

    With an implementation defined, you need only to call Axon.layer/3 to apply our custom layer to an Axon input:

    input = Axon.input("data")
     
    -out = Axon.layer(&CustomLayers0.my_layer/2, [input])
    #Axon<
    -  inputs: %{"data" => nil}
    +out = Axon.layer(&CustomLayers0.my_layer/2, [input])
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "custom_0"
       nodes: 2
    ->

    Now you can inspect and execute your model as normal:

    template = Nx.template({2, 8}, :f32)
    -Axon.Display.as_graph(out, template)
    graph TD;
    +>

    Now you can inspect and execute your model as normal:

    template = Nx.template({2, 8}, :f32)
    +Axon.Display.as_graph(out, template)
    graph TD;
     3[/"data (:input) {2, 8}"/];
     4["custom_0 (:custom) {2, 8}"];
    -3 --> 4;

    Notice that by default custom layers render with a default operation marked as :custom. This can make it difficult to determine which layer is which during inspection. You can control the rendering by passing :op_name to Axon.layer/3:

    out = Axon.layer(&CustomLayers0.my_layer/2, [input], op_name: :my_layer)
    +3 --> 4;

    Notice that by default custom layers render with a default operation marked as :custom. This can make it difficult to determine which layer is which during inspection. You can control the rendering by passing :op_name to Axon.layer/3:

    out = Axon.layer(&CustomLayers0.my_layer/2, [input], op_name: :my_layer)
     
    -Axon.Display.as_graph(out, template)
    graph TD;
    +Axon.Display.as_graph(out, template)
    graph TD;
     3[/"data (:input) {2, 8}"/];
     5["my_layer_0 (:my_layer) {2, 8}"];
     3 --> 5;

    You can also control the name of your layer via the :name option. All other options are forwarded to the layer implementation function:

    out =
    -  Axon.layer(&CustomLayers0.my_layer/2, [input],
    +  Axon.layer(&CustomLayers0.my_layer/2, [input],
         name: "layer",
         op_name: :my_layer,
         alpha: 2.0
    -  )
    +  )
     
    -Axon.Display.as_graph(out, template)
    graph TD;
    +Axon.Display.as_graph(out, template)
    graph TD;
     3[/"data (:input) {2, 8}"/];
     6["layer (:my_layer) {2, 8}"];
    -3 --> 6;
    {init_fn, predict_fn} = Axon.build(out)
    -params = init_fn.(template, %{})
    %{}
    predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [0.0, 1.6829419136047363, 1.8185948133468628, 0.28224000334739685, -1.513604998588562, -1.9178485870361328, -0.558830976486206, 1.3139731884002686],
    -    [1.978716492652893, 0.8242369890213013, -1.0880422592163086, -1.9999804496765137, -1.073145866394043, 0.8403340578079224, 1.9812147617340088, 1.3005757331848145]
    -  ]
    ->

    Notice that this model does not have any trainable parameters because none of the layers have trainable parameters. You can introduce trainable parameters by passing inputs created with Axon.param/3 to Axon.layer/3. For example, you can modify your original custom layer to take an additional trainable parameter:

    defmodule CustomLayers1 do
    +3 --> 6;
    {init_fn, predict_fn} = Axon.build(out)
    +params = init_fn.(template, %{})
    %{}
    predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [0.0, 1.6829419136047363, 1.8185948133468628, 0.28224000334739685, -1.513604998588562, -1.9178485870361328, -0.558830976486206, 1.3139731884002686],
    +    [1.978716492652893, 0.8242369890213013, -1.0880422592163086, -1.9999804496765137, -1.073145866394043, 0.8403340578079224, 1.9812147617340088, 1.3005757331848145]
    +  ]
    +>

    Notice that this model does not have any trainable parameters because none of the layers have trainable parameters. You can introduce trainable parameters by passing inputs created with Axon.param/3 to Axon.layer/3. For example, you can modify your original custom layer to take an additional trainable parameter:

    defmodule CustomLayers1 do
       import Nx.Defn
     
    -  defn my_layer(input, alpha, _opts \\ []) do
    +  defn my_layer(input, alpha, _opts \\ []) do
         input
    -    |> Nx.sin()
    -    |> Nx.multiply(alpha)
    -  end
    -end
    {:module, CustomLayers1, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}

    And then construct the layer with a regular Axon input and a trainable parameter:

    alpha = Axon.param("alpha", fn _ -> {} end)
    +    |> Nx.sin()
    +    |> Nx.multiply(alpha)
    +  end
    +end
    {:module, CustomLayers1, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}

    And then construct the layer with a regular Axon input and a trainable parameter:

    alpha = Axon.param("alpha", fn _ -> {} end)
     
    -out = Axon.layer(&CustomLayers1.my_layer/3, [input, alpha], op_name: :my_layer)
    #Axon<
    -  inputs: %{"data" => nil}
    +out = Axon.layer(&CustomLayers1.my_layer/3, [input, alpha], op_name: :my_layer)
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "my_layer_0"
       nodes: 2
    ->
    {init_fn, predict_fn} = Axon.build(out)
    -params = init_fn.(template, %{})
    %{
    -  "my_layer_0" => %{
    -    "alpha" => #Nx.Tensor<
    +>
    {init_fn, predict_fn} = Axon.build(out)
    +params = init_fn.(template, %{})
    %{
    +  "my_layer_0" => %{
    +    "alpha" => #Nx.Tensor<
           f32
           -1.2601861953735352
    -    >
    -  }
    -}

    Notice how your model now initializes with a trainable parameter "alpha" for your custom layer. Each parameter requires a unique (per-layer) string name and a function which determines the parameter's shape from the layer's input shapes.

    If you plan on re-using custom layers in many locations, it's recommended that you wrap them in an Elixir function as an interface:

    defmodule CustomLayers2 do
    +    >
    +  }
    +}

    Notice how your model now initializes with a trainable parameter "alpha" for your custom layer. Each parameter requires a unique (per-layer) string name and a function which determines the parameter's shape from the layer's input shapes.

    If you plan on re-using custom layers in many locations, it's recommended that you wrap them in an Elixir function as an interface:

    defmodule CustomLayers2 do
       import Nx.Defn
     
    -  def my_layer(%Axon{} = input, opts \\ []) do
    -    opts = Keyword.validate!(opts, [:name])
    -    alpha = Axon.param("alpha", fn _ -> {} end)
    +  def my_layer(%Axon{} = input, opts \\ []) do
    +    opts = Keyword.validate!(opts, [:name])
    +    alpha = Axon.param("alpha", fn _ -> {} end)
     
    -    Axon.layer(&my_layer_impl/3, [input, alpha], name: opts[:name], op_name: :my_layer)
    -  end
    +    Axon.layer(&my_layer_impl/3, [input, alpha], name: opts[:name], op_name: :my_layer)
    +  end
     
    -  defnp my_layer_impl(input, alpha, _opts \\ []) do
    +  defnp my_layer_impl(input, alpha, _opts \\ []) do
         input
    -    |> Nx.sin()
    -    |> Nx.multiply(alpha)
    -  end
    -end
    {:module, CustomLayers2, <<70, 79, 82, 49, 0, 0, 12, ...>>, true}
    out =
    +    |> Nx.sin()
    +    |> Nx.multiply(alpha)
    +  end
    +end
    {:module, CustomLayers2, <<70, 79, 82, 49, 0, 0, 12, ...>>, true}
    out =
       input
    -  |> CustomLayers2.my_layer()
    -  |> CustomLayers2.my_layer()
    -  |> Axon.dense(1)
    #Axon<
    -  inputs: %{"data" => nil}
    +  |> CustomLayers2.my_layer()
    +  |> CustomLayers2.my_layer()
    +  |> Axon.dense(1)
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "dense_0"
       nodes: 4
    ->
    Axon.Display.as_graph(out, template)
    graph TD;
    +>
    Axon.Display.as_graph(out, template)
    graph TD;
     3[/"data (:input) {2, 8}"/];
     8["my_layer_0 (:my_layer) {2, 8}"];
     9["my_layer_1 (:my_layer) {2, 8}"];
    diff --git a/custom_models_loss_optimizers.html b/custom_models_loss_optimizers.html
    index 2ad5e699..6a30d3e0 100644
    --- a/custom_models_loss_optimizers.html
    +++ b/custom_models_loss_optimizers.html
    @@ -14,7 +14,7 @@
     
         
         
    -    
    +    
     
           
     
    @@ -136,320 +136,320 @@ 

    -
    Mix.install([
    -  {:axon, github: "elixir-nx/axon"},
    -  {:nx, "~> 0.3.0", github: "elixir-nx/nx", sparse: "nx", override: true}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, github: "elixir-nx/axon"},
    +  {:nx, "~> 0.3.0", github: "elixir-nx/nx", sparse: "nx", override: true}
    +])
    :ok

    Using custom models in training loops

    In the Your first training loop, you learned how to declare a supervised training loop using Axon.Loop.trainer/3 with a model, loss function, and optimizer. Your overall model and loop declaration looked something like this:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    -
    -loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)

    This example uses an %Axon{} struct to represent your model to train, and atoms to represent your loss function and optimizer. Some of your problems will require a bit more flexibility than this example affords. Fortunately, Axon.Loop.trainer/3 is designed for flexibility.

    For example, if your model cannot be cleanly represented as an %Axon{} model, you can instead opt instead to define custom initialization and forward functions to pass to Axon.Loop.trainer/3. Actually, Axon.Loop.trainer/3 is doing this for you under the hood - the ability to pass an %Axon{} struct directly is just a convenience:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    -
    -lowered_model = {init_fn, predict_fn} = Axon.build(model)
    -
    -loop = Axon.Loop.trainer(lowered_model, :mean_squared_error, :sgd)
    #Axon.Loop<
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    -       #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    -       #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    -  metrics: %{
    -    "loss" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,
    -     #Function<6.20267452/2 in Axon.Loop.build_loss_fn/1>}
    -  },
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    +
    +loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)

    This example uses an %Axon{} struct to represent your model to train, and atoms to represent your loss function and optimizer. Some of your problems will require a bit more flexibility than this example affords. Fortunately, Axon.Loop.trainer/3 is designed for flexibility.

    For example, if your model cannot be cleanly represented as an %Axon{} model, you can instead opt instead to define custom initialization and forward functions to pass to Axon.Loop.trainer/3. Actually, Axon.Loop.trainer/3 is doing this for you under the hood - the ability to pass an %Axon{} struct directly is just a convenience:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    +
    +lowered_model = {init_fn, predict_fn} = Axon.build(model)
    +
    +loop = Axon.Loop.trainer(lowered_model, :mean_squared_error, :sgd)
    #Axon.Loop<
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    +       #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    +       #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
    +  metrics: %{
    +    "loss" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,
    +     #Function<6.20267452/2 in Axon.Loop.build_loss_fn/1>}
    +  },
       ...
    ->

    Notice that Axon.Loop.trainer/3 handles the "lowered" form of an Axon model without issue. When you pass an %Axon{} struct, the trainer factory converts it to a lowered representation for you. With this construct, you can build custom models entirely with Nx defn, or readily mix your Axon models into custom workflows without worrying about compatibility with the Axon.Loop API:

    defmodule CustomModel do
    +>

    Notice that Axon.Loop.trainer/3 handles the "lowered" form of an Axon model without issue. When you pass an %Axon{} struct, the trainer factory converts it to a lowered representation for you. With this construct, you can build custom models entirely with Nx defn, or readily mix your Axon models into custom workflows without worrying about compatibility with the Axon.Loop API:

    defmodule CustomModel do
       import Nx.Defn
     
    -  defn custom_predict_fn(model_predict_fn, params, input) do
    -    %{prediction: preds} = out = model_predict_fn.(params, input)
    -    %{out | prediction: Nx.cos(preds)}
    -  end
    -end
    {:module, CustomModel, <<70, 79, 82, 49, 0, 0, 9, ...>>, {:custom_predict_fn, 3}}
    train_data =
    -  Stream.repeatedly(fn ->
    -    xs = Nx.random_normal({8, 1})
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -{init_fn, predict_fn} = Axon.build(model, mode: :train)
    -custom_predict_fn = &CustomModel.custom_predict_fn(predict_fn, &1, &2)
    -
    -loop = Axon.Loop.trainer({init_fn, custom_predict_fn}, :mean_squared_error, :sgd)
    -
    -Axon.Loop.run(loop, train_data, %{}, iterations: 500)
    Epoch: 0, Batch: 500, loss: 0.3053460
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.06573846191167831, 0.37533989548683167, -0.014221129938960075, -0.0056641618721187115, -0.013241665437817574, -0.04930500313639641, 0.03238297998905182, 0.019304191693663597]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.3132522702217102, -0.9284062385559082, 0.5041953921318054, 0.09051526337862015, 0.003381401300430298, -0.22686156630516052, 0.506594181060791, 0.46744370460510254]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.008441010490059853, 0.0, 0.5370790958404541, 0.03584281727671623]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.3442431688308716, -0.33131587505340576, -0.03751888871192932, -0.5497395396232605],
    -        [-0.4568001925945282, -0.5024663805961609, 0.8712142109870911, -0.13484779000282288],
    -        [0.7310590744018555, -0.34318023920059204, 0.3977772295475006, -0.6045383214950562],
    -        [-0.5255699157714844, -0.2829623818397522, -0.45367464423179626, -0.157784566283226],
    -        [-0.47948920726776123, 0.2930692136287689, -0.3784458339214325, -0.69244384765625],
    -        [0.7052943706512451, 0.015830136835575104, -0.02979498915374279, 0.6160839796066284],
    -        [0.3201732933521271, -0.1367085874080658, -0.17100055515766144, 0.7335636019706726],
    -        [-0.2825513482093811, -0.424674928188324, -0.3110836148262024, 0.46001508831977844]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.6889857649803162]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.7191283106803894],
    -        [-0.4222411513328552],
    -        [1.122635006904602],
    -        [-0.7385509014129639]
    -      ]
    -    >
    -  }
    -}

    + defn custom_predict_fn(model_predict_fn, params, input) do + %{prediction: preds} = out = model_predict_fn.(params, input) + %{out | prediction: Nx.cos(preds)} + end +end

    {:module, CustomModel, <<70, 79, 82, 49, 0, 0, 9, ...>>, {:custom_predict_fn, 3}}
    train_data =
    +  Stream.repeatedly(fn ->
    +    xs = Nx.random_normal({8, 1})
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +{init_fn, predict_fn} = Axon.build(model, mode: :train)
    +custom_predict_fn = &CustomModel.custom_predict_fn(predict_fn, &1, &2)
    +
    +loop = Axon.Loop.trainer({init_fn, custom_predict_fn}, :mean_squared_error, :sgd)
    +
    +Axon.Loop.run(loop, train_data, %{}, iterations: 500)
    Epoch: 0, Batch: 500, loss: 0.3053460
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.06573846191167831, 0.37533989548683167, -0.014221129938960075, -0.0056641618721187115, -0.013241665437817574, -0.04930500313639641, 0.03238297998905182, 0.019304191693663597]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.3132522702217102, -0.9284062385559082, 0.5041953921318054, 0.09051526337862015, 0.003381401300430298, -0.22686156630516052, 0.506594181060791, 0.46744370460510254]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.008441010490059853, 0.0, 0.5370790958404541, 0.03584281727671623]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.3442431688308716, -0.33131587505340576, -0.03751888871192932, -0.5497395396232605],
    +        [-0.4568001925945282, -0.5024663805961609, 0.8712142109870911, -0.13484779000282288],
    +        [0.7310590744018555, -0.34318023920059204, 0.3977772295475006, -0.6045383214950562],
    +        [-0.5255699157714844, -0.2829623818397522, -0.45367464423179626, -0.157784566283226],
    +        [-0.47948920726776123, 0.2930692136287689, -0.3784458339214325, -0.69244384765625],
    +        [0.7052943706512451, 0.015830136835575104, -0.02979498915374279, 0.6160839796066284],
    +        [0.3201732933521271, -0.1367085874080658, -0.17100055515766144, 0.7335636019706726],
    +        [-0.2825513482093811, -0.424674928188324, -0.3110836148262024, 0.46001508831977844]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.6889857649803162]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.7191283106803894],
    +        [-0.4222411513328552],
    +        [1.122635006904602],
    +        [-0.7385509014129639]
    +      ]
    +    >
    +  }
    +}

    Using custom loss functions in training loops

    -

    Just as Axon.Loop.trainer/3 allows more flexibility with models, it also supports more flexible loss functions. In most cases, you can get away with using one of Axon's built-in loss functions by specifying an atom. Atoms map directly to a loss-function defined in Axon.Losses. Under the hood, Axon.Loop.trainer/3 is doing something like:

    loss_fn = &apply(Axon.Losses, loss_atom, [&1, &2])

    Rather than pass an atom, you can pass your own custom arity-2 function to Axon.Loop.trainer/3. This arises most often in cases where you want to control some parameters of the loss function, such as the batch-level reduction:

    loss_fn = &Axon.Losses.mean_squared_error(&1, &2, reduction: :sum)
    -
    -loop = Axon.Loop.trainer(model, loss_fn, :sgd)
    #Axon.Loop<
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    -       #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    -       #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    -  metrics: %{
    -    "loss" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,
    -     #Function<41.3316493/2 in :erl_eval.expr/6>}
    -  },
    +

    Just as Axon.Loop.trainer/3 allows more flexibility with models, it also supports more flexible loss functions. In most cases, you can get away with using one of Axon's built-in loss functions by specifying an atom. Atoms map directly to a loss-function defined in Axon.Losses. Under the hood, Axon.Loop.trainer/3 is doing something like:

    loss_fn = &apply(Axon.Losses, loss_atom, [&1, &2])

    Rather than pass an atom, you can pass your own custom arity-2 function to Axon.Loop.trainer/3. This arises most often in cases where you want to control some parameters of the loss function, such as the batch-level reduction:

    loss_fn = &Axon.Losses.mean_squared_error(&1, &2, reduction: :sum)
    +
    +loop = Axon.Loop.trainer(model, loss_fn, :sgd)
    #Axon.Loop<
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    +       #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<23.20267452/1 in Axon.Loop.log/5>,
    +       #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
    +  metrics: %{
    +    "loss" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,
    +     #Function<41.3316493/2 in :erl_eval.expr/6>}
    +  },
       ...
    ->

    You can also define your own custom loss functions, so long as they match the following spec:

    loss(
    -  y_true :: tensor[batch, ...] | container(tensor),
    -  y_preds :: tensor[batch, ...] | container(tensor)
    -  ) :: scalar

    This is useful for constructing loss functions when dealing with multi-output scenarios. For example, it's very easy to construct a custom loss function which is a weighted average of several loss functions on multiple inputs:

    train_data =
    -  Stream.repeatedly(fn ->
    -    xs = Nx.random_normal({8, 1})
    -    y1 = Nx.sin(xs)
    -    y2 = Nx.cos(xs)
    -    {xs, {y1, y2}}
    -  end)
    +>

    You can also define your own custom loss functions, so long as they match the following spec:

    loss(
    +  y_true :: tensor[batch, ...] | container(tensor),
    +  y_preds :: tensor[batch, ...] | container(tensor)
    +  ) :: scalar

    This is useful for constructing loss functions when dealing with multi-output scenarios. For example, it's very easy to construct a custom loss function which is a weighted average of several loss functions on multiple inputs:

    train_data =
    +  Stream.repeatedly(fn ->
    +    xs = Nx.random_normal({8, 1})
    +    y1 = Nx.sin(xs)
    +    y2 = Nx.cos(xs)
    +    {xs, {y1, y2}}
    +  end)
     
     shared =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
     
    -y1 = Axon.dense(shared, 1)
    -y2 = Axon.dense(shared, 1)
    +y1 = Axon.dense(shared, 1)
    +y2 = Axon.dense(shared, 1)
     
    -model = Axon.container({y1, y2})
    +model = Axon.container({y1, y2})
     
    -custom_loss_fn = fn {y_true1, y_true2}, {y_pred1, y_pred2} ->
    -  loss1 = Axon.Losses.mean_squared_error(y_true1, y_pred1, reduction: :mean)
    -  loss2 = Axon.Losses.mean_squared_error(y_true2, y_pred2, reduction: :mean)
    +custom_loss_fn = fn {y_true1, y_true2}, {y_pred1, y_pred2} ->
    +  loss1 = Axon.Losses.mean_squared_error(y_true1, y_pred1, reduction: :mean)
    +  loss2 = Axon.Losses.mean_squared_error(y_true2, y_pred2, reduction: :mean)
     
       loss1
    -  |> Nx.multiply(0.4)
    -  |> Nx.add(Nx.multiply(loss2, 0.6))
    -end
    +  |> Nx.multiply(0.4)
    +  |> Nx.add(Nx.multiply(loss2, 0.6))
    +end
     
     model
    -|> Axon.Loop.trainer(custom_loss_fn, :sgd)
    -|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 1000, loss: 0.1098235
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.07738334685564041, 0.04548311233520508, 0.049238916486501694, 0.38714033365249634, -0.030310271307826042, -0.07575170695781708, 0.02918776497244835, 0.15639683604240417]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.5250527858734131, 0.9252119660377502, -0.7720071077346802, 0.3685735762119293, -0.15688209235668182, -0.41163918375968933, 0.7827479839324951, 0.07295594364404678]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.012770675122737885, 0.6008449792861938, 0.29370757937431335, -0.05354489013552666]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.08783119916915894, 0.4296257495880127, 0.07153885811567307, -0.6921477317810059],
    -        [0.15848888456821442, -0.4663836658000946, 0.7126847505569458, 0.0693722814321518],
    -        [-0.24852830171585083, -0.7588720321655273, -0.5033655166625977, 0.6524038314819336],
    -        [0.2933746874332428, 0.6656989455223083, -0.046741705387830734, 0.44998466968536377],
    -        [0.17215801775455475, -0.3072860836982727, 0.2046997845172882, -0.7001357078552246],
    -        [0.6354788541793823, -0.12706635892391205, -0.18666459619998932, -0.26693975925445557],
    -        [-0.3737913966178894, -0.07344938814640045, 0.22658668458461761, -0.37110695242881775],
    -        [0.01989569514989853, 0.39410898089408875, -0.30496707558631897, -0.4945743680000305]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.5888826251029968]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [1.0239059925079346],
    -        [0.25252565741539],
    -        [0.8877795338630676],
    -        [-0.13882321119308472]
    -      ]
    -    >
    -  },
    -  "dense_3" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.2557465434074402]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.6269392371177673],
    -        [1.1281259059906006],
    -        [-0.503214418888092],
    -        [-0.5435869693756104]
    -      ]
    -    >
    -  }
    -}

    +|> Axon.Loop.trainer(custom_loss_fn, :sgd) +|> Axon.Loop.run(train_data, %{}, iterations: 1000)

    Epoch: 0, Batch: 1000, loss: 0.1098235
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.07738334685564041, 0.04548311233520508, 0.049238916486501694, 0.38714033365249634, -0.030310271307826042, -0.07575170695781708, 0.02918776497244835, 0.15639683604240417]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.5250527858734131, 0.9252119660377502, -0.7720071077346802, 0.3685735762119293, -0.15688209235668182, -0.41163918375968933, 0.7827479839324951, 0.07295594364404678]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.012770675122737885, 0.6008449792861938, 0.29370757937431335, -0.05354489013552666]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.08783119916915894, 0.4296257495880127, 0.07153885811567307, -0.6921477317810059],
    +        [0.15848888456821442, -0.4663836658000946, 0.7126847505569458, 0.0693722814321518],
    +        [-0.24852830171585083, -0.7588720321655273, -0.5033655166625977, 0.6524038314819336],
    +        [0.2933746874332428, 0.6656989455223083, -0.046741705387830734, 0.44998466968536377],
    +        [0.17215801775455475, -0.3072860836982727, 0.2046997845172882, -0.7001357078552246],
    +        [0.6354788541793823, -0.12706635892391205, -0.18666459619998932, -0.26693975925445557],
    +        [-0.3737913966178894, -0.07344938814640045, 0.22658668458461761, -0.37110695242881775],
    +        [0.01989569514989853, 0.39410898089408875, -0.30496707558631897, -0.4945743680000305]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.5888826251029968]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [1.0239059925079346],
    +        [0.25252565741539],
    +        [0.8877795338630676],
    +        [-0.13882321119308472]
    +      ]
    +    >
    +  },
    +  "dense_3" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.2557465434074402]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.6269392371177673],
    +        [1.1281259059906006],
    +        [-0.503214418888092],
    +        [-0.5435869693756104]
    +      ]
    +    >
    +  }
    +}

    Using custom optimizers in training loops

    As you might expect, it's also possible to customize the optimizer passed to Axon.Loop.trainer/3. If you read the Polaris.Updates documentation, you'll learn that optimizers are actually represented as the tuple {init_fn, update_fn} where init_fn initializes optimizer state from model state and update_fn scales gradients from optimizer state, gradients, and model state.

    You likely won't have to implement a custom optimizer; however, you should know how to construct optimizers with different hyperparameters and how to apply different modifiers to different optimizers to customize the optimization process.

    When you specify an optimizer as an atom in Axon.Loop.trainer/3, it maps directly to an optimizer declared in Polaris.Optimizers. You can instead opt to declare your optimizer directly. This is most useful for controlling things like the learning rate and various optimizer hyperparameters:

    train_data =
    -  Stream.repeatedly(fn ->
    -    xs = Nx.random_normal({8, 1})
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    +  Stream.repeatedly(fn ->
    +    xs = Nx.random_normal({8, 1})
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
     
     model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
     
    -optimizer = {_init_optimizer_fn, _update_fn} = Polaris.Optimizers.sgd(learning_rate: 1.0e-3)
    +optimizer = {_init_optimizer_fn, _update_fn} = Polaris.Optimizers.sgd(learning_rate: 1.0e-3)
     
     model
    -|> Axon.Loop.trainer(:mean_squared_error, optimizer)
    -|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 1000, loss: 0.0992607
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.06136200204491615, -0.08278193324804306, -0.07280997931957245, 0.08740464597940445, 0.08663233369588852, -0.06915996968746185, 0.03753892332315445, 0.06512840837240219]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [0.622833251953125, 0.24778570234775543, 0.4959430694580078, -0.604946494102478, -0.31578049063682556, 0.09977878630161285, 0.776294469833374, 0.5804685950279236]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.012786266393959522, 0.01057625561952591, 0.10597240924835205, 0.13692162930965424]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.46233609318733215, -0.7435348033905029, -0.10738609731197357, 0.09911829978227615],
    -        [0.5295257568359375, 0.48769527673721313, -0.23950818181037903, -0.26084062457084656],
    -        [-0.5117107033729553, 0.2039143443107605, -0.12630638480186462, -0.41089773178100586],
    -        [-0.6043668985366821, 0.3961969316005707, 0.5120400190353394, -0.6773409247398376],
    -        [0.22123000025749207, 0.7197521924972534, 0.2679356038570404, -0.12402179092168808],
    -        [0.4830038249492645, 0.3629038631916046, 0.49994897842407227, -0.25865232944488525],
    -        [0.29824453592300415, 0.29333528876304626, -0.05371938645839691, 0.5230391621589661],
    -        [0.5483304262161255, 0.08283360302448273, -0.6959219574928284, 0.6471460461616516]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.07759959995746613]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.036170706152915955],
    -        [-0.5362256765365601],
    -        [-0.6853286027908325],
    -        [0.6693617701530457]
    -      ]
    -    >
    -  }
    -}
    +
    |> Axon.Loop.trainer(:mean_squared_error, optimizer) +|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 1000, loss: 0.0992607
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.06136200204491615, -0.08278193324804306, -0.07280997931957245, 0.08740464597940445, 0.08663233369588852, -0.06915996968746185, 0.03753892332315445, 0.06512840837240219]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [0.622833251953125, 0.24778570234775543, 0.4959430694580078, -0.604946494102478, -0.31578049063682556, 0.09977878630161285, 0.776294469833374, 0.5804685950279236]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.012786266393959522, 0.01057625561952591, 0.10597240924835205, 0.13692162930965424]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.46233609318733215, -0.7435348033905029, -0.10738609731197357, 0.09911829978227615],
    +        [0.5295257568359375, 0.48769527673721313, -0.23950818181037903, -0.26084062457084656],
    +        [-0.5117107033729553, 0.2039143443107605, -0.12630638480186462, -0.41089773178100586],
    +        [-0.6043668985366821, 0.3961969316005707, 0.5120400190353394, -0.6773409247398376],
    +        [0.22123000025749207, 0.7197521924972534, 0.2679356038570404, -0.12402179092168808],
    +        [0.4830038249492645, 0.3629038631916046, 0.49994897842407227, -0.25865232944488525],
    +        [0.29824453592300415, 0.29333528876304626, -0.05371938645839691, 0.5230391621589661],
    +        [0.5483304262161255, 0.08283360302448273, -0.6959219574928284, 0.6471460461616516]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.07759959995746613]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.036170706152915955],
    +        [-0.5362256765365601],
    +        [-0.6853286027908325],
    +        [0.6693617701530457]
    +      ]
    +    >
    +  }
    +}
    diff --git a/dist/search_data-7F2B0842.js b/dist/search_data-7F2B0842.js deleted file mode 100644 index b18e7804..00000000 --- a/dist/search_data-7F2B0842.js +++ /dev/null @@ -1 +0,0 @@ -searchData={"content_type":"text/markdown","items":[{"doc":"A high-level interface for creating neural network models.\n\nAxon is built entirely on top of Nx numerical definitions,\nso every neural network can be JIT or AOT compiled using\nany Nx compiler, or even transformed into high-level neural\nnetwork formats like TensorFlow Lite and\n[ONNX](https://github.com/elixir-nx/axon_onnx).\n\nFor a more in-depth overview of Axon, refer to the [Guides](guides.html).","ref":"Axon.html","title":"Axon","type":"module"},{"doc":"All Axon models start with an input layer, optionally specifying\nthe expected shape of the input data:\n\n input = Axon.input(\"input\", shape: {nil, 784})\n\nNotice you can specify some dimensions as `nil`, indicating\nthat the dimension size will be filled in at model runtime.\nYou can then compose inputs with other layers:\n\n model =\n input\n |> Axon.dense(128, activation: :relu)\n |> Axon.batch_norm()\n |> Axon.dropout(rate: 0.8)\n |> Axon.dense(64)\n |> Axon.tanh()\n |> Axon.dense(10)\n |> Axon.activation(:softmax)\n\nYou can inspect the model for a nice summary:\n\n IO.inspect(model)\n\n #Axon \n\nOr use the `Axon.Display` module to see more in-depth summaries:\n\n Axon.Display.as_table(model, Nx.template({1, 784}, :f32)) |> IO.puts\n\n +----------------------------------------------------------------------------------------------------------------+\n | Model |\n +=======================================+=============+==============+===================+=======================+\n | Layer | Input Shape | Output Shape | Options | Parameters |\n +=======================================+=============+==============+===================+=======================+\n | input ( input ) | [] | {1, 784} | shape: {nil, 784} | |\n | | | | optional: false | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dense_0 ( dense[\"input\"] ) | [{1, 784}] | {1, 128} | | kernel: f32[784][128] |\n | | | | | bias: f32[128] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | relu_0 ( relu[\"dense_0\"] ) | [{1, 128}] | {1, 128} | | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | batch_norm_0 ( batch_norm[\"relu_0\"] ) | [{1, 128}] | {1, 128} | epsilon: 1.0e-5 | gamma: f32[128] |\n | | | | channel_index: 1 | beta: f32[128] |\n | | | | momentum: 0.1 | mean: f32[128] |\n | | | | | var: f32[128] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dropout_0 ( dropout[\"batch_norm_0\"] ) | [{1, 128}] | {1, 128} | rate: 0.8 | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dense_1 ( dense[\"dropout_0\"] ) | [{1, 128}] | {1, 64} | | kernel: f32[128][64] |\n | | | | | bias: f32[64] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | tanh_0 ( tanh[\"dense_1\"] ) | [{1, 64}] | {1, 64} | | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dense_2 ( dense[\"tanh_0\"] ) | [{1, 64}] | {1, 10} | | kernel: f32[64][10] |\n | | | | | bias: f32[10] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | softmax_0 ( softmax[\"dense_2\"] ) | [{1, 10}] | {1, 10} | | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n\n#","ref":"Axon.html#module-model-creation","title":"Model Creation - Axon","type":"module"},{"doc":"Creating a model with multiple inputs is as easy as declaring an\nadditional input in your Axon graph. Every input layer present in\nthe final Axon graph will be required to be passed as input at the\ntime of model execution.\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n\n # Both inputs will be used\n model1 = Axon.add(inp1, inp2)\n\n # Only inp2 will be used\n model2 = Axon.add(inp2, inp2)\n\nAxon graphs are immutable, which means composing and manipulating\nan Axon graph creates an entirely new graph. Additionally, layer\nnames are lazily generated at model execution time. To avoid\nnon-deterministic input orderings and names, Axon requires each\ninput to have a unique binary identifier. You can then reference\ninputs by name when passing to models at execution time:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n\n model1 = Axon.add(inp1, inp2)\n\n {init_fn, predict_fn} = Axon.build(model1)\n\n params1 = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})\n # Inputs are referenced by name\n predict_fn.(params1, %{\"input_0\" => x, \"input_1\" => y})\n\n#","ref":"Axon.html#module-multiple-inputs","title":"Multiple Inputs - Axon","type":"module"},{"doc":"Nx offers robust [container](https://hexdocs.pm/nx/Nx.Container.html) support\nwhich is extended to Axon. Axon allows you to wrap any valid Nx container\nin a layer. Containers are most commonly used to structure outputs:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n model = Axon.container(%{foo: inp1, bar: inp2})\n\nContainers can be arbitrarily nested:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n model = Axon.container({%{foo: {inp1, %{bar: inp2}}}})\n\nYou can even use custom structs which implement the container protocol:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n model = Axon.container(%MyStruct{foo: inp1, bar: inp2})\n\n#","ref":"Axon.html#module-multiple-outputs","title":"Multiple Outputs - Axon","type":"module"},{"doc":"If you find that Axon's built-in layers are insufficient for your needs,\nyou can create your own using the custom layer API. All of Axon's built-in\nlayers (aside from special ones such as `input`, `constant`, and `container`)\nmake use of this same API.\n\nAxon layers are really just placeholders for Nx computations with trainable\nparameters and possibly state. To define a custom layer, you just need to\ndefine a `defn` implementation:\n\n defn my_layer(x, weight, _opts \\\\ []) do\n Nx.atan2(x, weight)\n end\n\nNotice the only stipulation is that your custom layer implementation must\naccept at least 1 input and a list of options. At execution time, every\nlayer will be passed a `:mode` option which can be used to control behavior\nat training and inference time.\n\nInputs to your custom layer can be either Axon graph inputs or trainable\nparameters. You can pass Axon graph inputs as-is to a custom layer. To\ndeclare trainable parameters, use `Axon.param/3`:\n\n weight = Axon.param(\"weight\", param_shape)\n\nTo create a custom layer, you \"wrap\" your implementation and inputs into\na layer using `Axon.layer`. You'll notice the API mirrors Elixir's `apply`:\n\n def atan2_layer(%Axon{} = input) do\n weight = Axon.param(\"weight\", param_shape)\n Axon.layer(&my_layer/3, [input, weight])\n end","ref":"Axon.html#module-custom-layers","title":"Custom Layers - Axon","type":"module"},{"doc":"Under the hood, Axon models are represented as Elixir structs. You\ncan initialize and apply models by building or compiling them with\n`Axon.build/2` or `Axon.compile/4` and then calling the produced\ninitialization and predict functions:\n\n {init_fn, predict_fn} = Axon.build(model)\n\n params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})\n predict_fn.(params, inputs)\n\nYou may either set the default JIT compiler or backend globally, or\npass a specific compiler to `Axon.build/2`:\n\n EXLA.set_as_nx_default([:tpu, :cuda, :rocm, :host])\n\n {init_fn, predict_fn} = Axon.build(model, compiler: EXLA, mode: :train)\n\n params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})\n predict_fn.(params, inputs)\n\n`predict_fn` by default runs in inference mode, which performs certain\noptimizations and removes layers such as dropout layers. If constructing\na training step using `Axon.predict/4` or `Axon.build/2`, be sure to specify\n`mode: :train`.","ref":"Axon.html#module-model-execution","title":"Model Execution - Axon","type":"module"},{"doc":"Combining the Axon model creation API with the optimization and training\nAPIs, you can create and train neural networks with ease:\n\n model =\n Axon.input(\"input_0\", shape: {nil, 784})\n |> Axon.dense(128, activation: :relu)\n |> Axon.layer_norm()\n |> Axon.dropout()\n |> Axon.dense(10, activation: :softmax)\n\n IO.inspect model\n\n model_state =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adamw(learning_rate: 0.005))\n |> Axon.Loop.run(train_data, epochs: 10, compiler: EXLA)\n\nSee `Polaris.Updates` and `Axon.Loop` for a more in-depth treatment of\nmodel optimization and model training.","ref":"Axon.html#module-model-training","title":"Model Training - Axon","type":"module"},{"doc":"When deploying an `Axon` model to production, you usually want to batch\nmultiple prediction requests and run the inference for all of them at\nonce. Conveniently, `Nx` already has an abstraction for this task in the\nform of `Nx.Serving`. Here's how you could define a serving for an `Axon`\nmodel:\n\n def build_serving() do\n # Configuration\n batch_size = 4\n defn_options = [compiler: EXLA]\n\n Nx.Serving.new(\n # This function runs on the serving startup\n fn ->\n # Build the Axon model and load params (usually from file)\n model = build_model()\n params = load_params()\n\n # Build the prediction defn function\n {_init_fun, predict_fun} = Axon.build(model)\n\n inputs_template = %{\"pixel_values\" => Nx.template({batch_size, 224, 224, 3}, :f32)}\n template_args = [Nx.to_template(params), inputs_template]\n\n # Compile the prediction function upfront for the configured batch_size\n predict_fun = Nx.Defn.compile(predict_fun, template_args, defn_options)\n\n # The returned function is called for every accumulated batch\n fn inputs ->\n inputs = Nx.Batch.pad(inputs, batch_size - inputs.size)\n predict_fun.(params, inputs)\n end\n end,\n batch_size: batch_size\n )\n end\n\nThen you would start the serving server as part of your application's\nsupervision tree:\n\n children = [\n ...,\n {Nx.Serving, serving: build_serving(), name: MyApp.Serving, batch_timeout: 100}\n ]\n\nWith that in place, you can now ask serving for predictions all across\nyour application (controllers, live views, async jobs, etc.). Having a\ntensor input you would do:\n\n inputs = %{\"pixel_values\" => ...}\n batch = Nx.Batch.concatenate([inputs])\n result = Nx.Serving.batched_run(MyApp.Serving, batch)\n\nUsually you also want to do pre/post-processing of the model input/output.\nYou could make those preparations directly before/after `Nx.Serving.batched_run/2`,\nhowever you can also make use of `Nx.Serving.client_preprocessing/2` and\n`Nx.Serving.client_postprocessing/2` to encapsulate that logic as part of\nthe serving.","ref":"Axon.html#module-using-with-nx-serving","title":"Using with `Nx.Serving` - Axon","type":"module"},{"doc":"Adds an activation layer to the network.\n\nActivation layers are element-wise functions typically called\nafter the output of another layer.","ref":"Axon.html#activation/3","title":"Axon.activation/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#activation/3-options","title":"Options - Axon.activation/3","type":"function"},{"doc":"Adds an Adaptive average pool layer to the network.\n\nSee `Axon.Layers.adaptive_avg_pool/2` for more details.","ref":"Axon.html#adaptive_avg_pool/2","title":"Axon.adaptive_avg_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:output_size` - layer output size.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#adaptive_avg_pool/2-options","title":"Options - Axon.adaptive_avg_pool/2","type":"function"},{"doc":"Adds an Adaptive power average pool layer to the network.\n\nSee `Axon.Layers.adaptive_lp_pool/2` for more details.","ref":"Axon.html#adaptive_lp_pool/2","title":"Axon.adaptive_lp_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:output_size` - layer output size.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#adaptive_lp_pool/2-options","title":"Options - Axon.adaptive_lp_pool/2","type":"function"},{"doc":"Adds an Adaptive max pool layer to the network.\n\nSee `Axon.Layers.adaptive_max_pool/2` for more details.","ref":"Axon.html#adaptive_max_pool/2","title":"Axon.adaptive_max_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:output_size` - layer output size.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#adaptive_max_pool/2-options","title":"Options - Axon.adaptive_max_pool/2","type":"function"},{"doc":"Adds a add layer to the network.\n\nThis layer performs an element-wise add operation\non input layers. All input layers must be capable of being\nbroadcast together.\n\nIf one shape has a static batch size, all other shapes must have a\nstatic batch size as well.","ref":"Axon.html#add/3","title":"Axon.add/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#add/3-options","title":"Options - Axon.add/3","type":"function"},{"doc":"Adds an Alpha dropout layer to the network.\n\nSee `Axon.Layers.alpha_dropout/2` for more details.","ref":"Axon.html#alpha_dropout/2","title":"Axon.alpha_dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#alpha_dropout/2-options","title":"Options - Axon.alpha_dropout/2","type":"function"},{"doc":"Attaches a hook to the given Axon model.\n\nHooks compile down to `Nx.Defn.Kernel.hook/3` and provide the same\nfunctionality for adding side-effecting operations to a compiled\nmodel. For example, you can use hooks to inspect intermediate activations,\nsend data to an external service, and more.\n\nHooks can be configured to be invoked on the following events:\n\n * `:initialize` - on model initialization.\n * `:pre_forward` - before layer forward pass is invoked.\n * `:forward` - after layer forward pass is invoked.\n * `:backward` - after layer backward pass is invoked.\n\nTo invoke a hook on every single event, you may pass `:all` to `on:`.\n\n Axon.input(\"input\", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :all)\n\nThe default event is `:forward`, assuming you want a hook invoked\non the layers forward pass.\n\nYou may configure hooks to run in one of only training or inference\nmode using the `:mode` option. The default mode is `:both` to be invoked\nduring both train and inference mode.\n\n Axon.input(\"input\", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)\n\nYou can also attach multiple hooks to a single layer. Hooks are invoked in\nthe order in which they are declared. If order is important, you should attach\nhooks in the order you want them to be executed:\n\n Axon.input(\"input\", shape: {nil, 1})\n # I will be executed first\n |> Axon.attach_hook(&IO.inspect/1)\n # I will be executed second\n |> Axon.attach_hook(fn _ -> IO.write(\"HERE\") end)\n\nHooks are executed at their point of attachment. You must insert hooks at each point\nyou want a hook to execute during model execution.\n\n Axon.input(\"input\", shape: {nil, 1})\n |> Axon.attach_hook(&IO.inspect/1)\n |> Axon.relu()\n |> Axon.attach_hook(&IO.inspect/1)","ref":"Axon.html#attach_hook/3","title":"Axon.attach_hook/3","type":"function"},{"doc":"Adds an Average pool layer to the network.\n\nSee `Axon.Layers.avg_pool/2` for more details.","ref":"Axon.html#avg_pool/2","title":"Axon.avg_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to size of kernel.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:dilations` - window dilations. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#avg_pool/2-options","title":"Options - Axon.avg_pool/2","type":"function"},{"doc":"Adds a Batch normalization layer to the network.\n\nSee `Axon.Layers.batch_norm/6` for more details.","ref":"Axon.html#batch_norm/2","title":"Axon.batch_norm/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term. Defaults to `1.0e-5`.","ref":"Axon.html#batch_norm/2-options","title":"Options - Axon.batch_norm/2","type":"function"},{"doc":"Adds a bias layer to the network.\n\nA bias layer simply adds a trainable bias to an input.","ref":"Axon.html#bias/2","title":"Axon.bias/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`.","ref":"Axon.html#bias/2-options","title":"Options - Axon.bias/2","type":"function"},{"doc":"Applies the given forward function bidirectionally and merges\nthe results with the given merge function.\n\nThis is most commonly used with RNNs to capture the dependencies\nof a sequence in both directions.","ref":"Axon.html#bidirectional/4","title":"Axon.bidirectional/4","type":"function"},{"doc":"* `axis` - Axis to reverse.","ref":"Axon.html#bidirectional/4-options","title":"Options - Axon.bidirectional/4","type":"function"},{"doc":"Adds a bilinear layer to the network.\n\nThe bilinear layer implements:\n\n output = activation(dot(dot(input1, kernel), input2) + bias)\n\nwhere `activation` is given by the `:activation` option and both\n`kernel` and `bias` are layer parameters. `units` specifies the\nnumber of output units.\n\nAll dimensions but the last of `input1` and `input2` must match. The\nbatch sizes of both inputs must also match or at least one must be `nil`.\nInferred output batch size coerces to the strictest input batch size.\n\nCompiles to `Axon.Layers.bilinear/5`.","ref":"Axon.html#bilinear/4","title":"Axon.bilinear/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`.\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#bilinear/4-options","title":"Options - Axon.bilinear/4","type":"function"},{"doc":"Returns a function which represents a self-contained re-usable block\nof operations in a neural network. All parameters in the block are\nshared between every usage of the block.\n\nThis returns an arity-1 function which accepts a list of inputs which\nare forwarded to `fun`. This is most often used in situations where\nyou wish to re-use parameters in a block:\n\n reused_dense = Axon.block(&Axon.dense(&1, 32))\n\nEverytime `reused_dense` is invoked, it re-uses the same parameters:\n\n input = Axon.input(\"features\")\n # unique parameters\n x1 = Axon.dense(input, 32)\n # unique parameters\n x2 = reused_dense.(x1)\n # parameters shared\n x3 = reused_dense.(x2)\n\nSubgraphs in blocks can be arbitrarily complex:\n\n reused_block = Axon.block(fn x ->\n x\n |> Axon.dense(32)\n |> Axon.dense(64)\n |> Axon.dense(32)\n end)\n\nBlocks can also have multiple inputs, you can invoke a block with multiple\ninputs by passing a list of arguments:\n\n reused_block = Axon.block(fn x, y, z ->\n x = Axon.dense(x, 32)\n y = Axon.dense(y, 32)\n z = Axon.dense(z, 32)\n\n Axon.add([x, y, z])\n end)\n\n # invoke with a list\n reused_block.([x, y, z])\n\nBlocks prefix subgraph parameters with their name and a dot. As with other\nAxon layers, if a name is not explicitly provided, one will be dynamically\ngenerated.","ref":"Axon.html#block/2","title":"Axon.block/2","type":"function"},{"doc":"Adds a blur pooling layer to the network.\n\nSee `Axon.Layers.blur_pool/2` for more details.","ref":"Axon.html#blur_pool/2","title":"Axon.blur_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#blur_pool/2-options","title":"Options - Axon.blur_pool/2","type":"function"},{"doc":"Builds the given model to `{init_fn, predict_fn}`.\n\nThe given functions can be either given as arguments to `Nx.Defn`\nfunctions or be invoked directly, to perform just-in-time compilation\nand execution. If you want to compile the model (instead of just-in-time)\nbased on a predefined initialization shape, see `compile/4`.\n\n## `init_fn`\n\nThe `init_fn` receives two arguments, the input template and\nan optional map with initial parameters for layers or namespaces:\n\n {init_fn, predict_fn} = Axon.build(model)\n init_fn.(Nx.template({1, 1}, {:f, 32}), %{\"dense_0\" => dense_params})\n\n## `predict_fn`\n\nThe `predict_fn` receives two arguments, the trained parameters\nand the actual inputs:\n\n {_init_fn, predict_fn} = Axon.build(model, opts)\n predict_fn.(params, input)","ref":"Axon.html#build/2","title":"Axon.build/2","type":"function"},{"doc":"* `:compiler` - the underlying `Nx.Defn` compiler to perform\n JIT compilation when the functions are invoked. If none is\n passed, it uses the default compiler configured in `Nx.Defn`;\n\n * `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`\n\n * `:mode` - one of `:inference` or `:train`. Forwarded to layers\n to control differences in compilation at training or inference time.\n Defaults to `:inference`\n\n * `:global_layer_options` - a keyword list of options passed to\n layers that accept said options\n\nAll other options are forwarded to the underlying JIT compiler.","ref":"Axon.html#build/2-options","title":"Options - Axon.build/2","type":"function"},{"doc":"Adds a Continuously-differentiable exponential linear unit activation layer to the network.\n\nSee `Axon.Activations.celu/1` for more details.","ref":"Axon.html#celu/2","title":"Axon.celu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#celu/2-options","title":"Options - Axon.celu/2","type":"function"},{"doc":"Compiles the given model to `{init_fn, predict_fn}`.\n\nThis function will compile a model specialized to the given\ninput shapes and types. This is useful for avoiding the overhead\nof long compilations at program runtime. You must provide template\ninputs which match the expected shapes and types of inputs at\nexecution time.\n\nThis function makes use of the built-in `Nx.Defn.compile/3`. Note\nthat passing inputs which differ in shape or type from the templates\nprovided to this function will result in a crash.","ref":"Axon.html#compile/4","title":"Axon.compile/4","type":"function"},{"doc":"It accepts the same options as `build/2`.","ref":"Axon.html#compile/4-options","title":"Options - Axon.compile/4","type":"function"},{"doc":"Adds a concatenate layer to the network.\n\nThis layer will concatenate inputs along the last\ndimension unless specified otherwise.","ref":"Axon.html#concatenate/3","title":"Axon.concatenate/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:axis` - concatenate axis. Defaults to `-1`.","ref":"Axon.html#concatenate/3-options","title":"Options - Axon.concatenate/3","type":"function"},{"doc":"Adds a conditional layer which conditionally executes\n`true_graph` or `false_graph` based on the condition `cond_fn`\nat runtime.\n\n`cond_fn` is an arity-1 function executed on the output of the\nparent graph. It must return a boolean scalar tensor (e.g. 1 or 0).\n\nThe shapes of `true_graph` and `false_graph` must be equal.","ref":"Axon.html#cond/5","title":"Axon.cond/5","type":"function"},{"doc":"Adds a constant layer to the network.\n\nConstant layers encapsulate Nx tensors in an Axon layer for ease\nof use with other Axon layers. They can be used interchangeably\nwith other Axon layers:\n\n inp = Axon.input(\"input\", shape: {nil, 32})\n my_constant = Axon.constant(Nx.iota({1, 32}))\n model = Axon.add(inp, my_constant)\n\nConstant layers will be cast according to the mixed precision policy.\nIf it's important for your constant to retain it's type during\nthe computation, you will need to set the mixed precision policy to\nignore constant layers.","ref":"Axon.html#constant/2","title":"Axon.constant/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#constant/2-options","title":"Options - Axon.constant/2","type":"function"},{"doc":"Adds a container layer to the network.\n\nIn certain cases you may want your model to have multiple\noutputs. In order to make this work, you must \"join\" the\noutputs into an Axon layer using this function for use in\ninitialization and inference later on.\n\nThe given container can be any valid Axon Nx container.","ref":"Axon.html#container/2","title":"Axon.container/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#container/2-options","title":"Options - Axon.container/2","type":"function"},{"doc":"iex> inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n iex> inp2 = Axon.input(\"input_1\", shape: {nil, 2})\n iex> model = Axon.container(%{a: inp1, b: inp2})\n iex> %{a: a, b: b} = Axon.predict(model, %{}, %{\n ...> \"input_0\" => Nx.tensor([[1.0]]),\n ...> \"input_1\" => Nx.tensor([[1.0, 2.0]])\n ...> })\n iex> a\n #Nx.Tensor \n iex> b\n #Nx.Tensor","ref":"Axon.html#container/2-examples","title":"Examples - Axon.container/2","type":"function"},{"doc":"Adds a convolution layer to the network.\n\nThe convolution layer implements a general dimensional\nconvolutional layer - which convolves a kernel over the input\nto produce an output.\n\nCompiles to `Axon.Layers.conv/4`.","ref":"Axon.html#conv/3","title":"Axon.conv/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:feature_group_size` - feature group size for convolution. Defaults\n to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#conv/3-options","title":"Options - Axon.conv/3","type":"function"},{"doc":"See `conv_lstm/3`.","ref":"Axon.html#conv_lstm/2","title":"Axon.conv_lstm/2","type":"function"},{"doc":"Adds a convolutional long short-term memory (LSTM) layer to the network\nwith a random initial hidden state.\n\nSee `conv_lstm/4` for more details.","ref":"Axon.html#conv_lstm/3","title":"Axon.conv_lstm/3","type":"function"},{"doc":"* `:recurrent_initializer` - initializer for hidden state. Defaults\n to `:orthogonal`.","ref":"Axon.html#conv_lstm/3-additional-options","title":"Additional options - Axon.conv_lstm/3","type":"function"},{"doc":"Adds a convolutional long short-term memory (LSTM) layer to the network\nwith the given initial hidden state..\n\nConvLSTMs apply `Axon.Layers.conv_lstm_cell/5` over an entire input\nsequence and return:\n\n {{new_cell, new_hidden}, output_sequence}\n\nYou can use the output state as the hidden state of another\nConvLSTM layer.","ref":"Axon.html#conv_lstm/4","title":"Axon.conv_lstm/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:padding` - convolutional padding. Defaults to `:same`.\n\n * `:kernel_size` - convolutional kernel size. Defaults to `1`.\n\n * `:strides` - convolutional strides. Defaults to `1`.\n\n * `:unroll` - `:dynamic` (loop preserving) or `:static` (compiled)\n unrolling of RNN.\n\n * `:kernel_initializer` - initializer for kernel weights. Defaults\n to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for bias weights. Defaults to\n `:zeros`.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#conv_lstm/4-options","title":"Options - Axon.conv_lstm/4","type":"function"},{"doc":"Adds a transposed convolution layer to the network.\n\nThe transposed convolution layer is sometimes referred to as a\nfractionally strided convolution or (incorrectly) as a deconvolution.\n\nCompiles to `Axon.Layers.conv_transpose/4`.","ref":"Axon.html#conv_transpose/3","title":"Axon.conv_transpose/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#conv_transpose/3-options","title":"Options - Axon.conv_transpose/3","type":"function"},{"doc":"Adds a dense layer to the network.\n\nThe dense layer implements:\n\n output = activation(dot(input, kernel) + bias)\n\nwhere `activation` is given by the `:activation` option and both\n`kernel` and `bias` are layer parameters. `units` specifies the\nnumber of output units.\n\nCompiles to `Axon.Layers.dense/4`.","ref":"Axon.html#dense/3","title":"Axon.dense/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`.\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#dense/3-options","title":"Options - Axon.dense/3","type":"function"},{"doc":"Adds a depthwise convolution layer to the network.\n\nThe depthwise convolution layer implements a general\ndimensional depthwise convolution - which is a convolution\nwhere the feature group size is equal to the number of\ninput channels.\n\nChannel multiplier grows the input channels by the given\nfactor. An input factor of 1 means the output channels\nare the same as the input channels.\n\nCompiles to `Axon.Layers.depthwise_conv/4`.","ref":"Axon.html#depthwise_conv/3","title":"Axon.depthwise_conv/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#depthwise_conv/3-options","title":"Options - Axon.depthwise_conv/3","type":"function"},{"doc":"Deserializes serialized model and parameters into a `{model, params}`\ntuple.\n\nIt is the opposite of `Axon.serialize/3`.","ref":"Axon.html#deserialize/2","title":"Axon.deserialize/2","type":"function"},{"doc":"iex> model = Axon.input(\"input\", shape: {nil, 2}) |> Axon.dense(1, kernel_initializer: :zeros, activation: :relu)\n iex> {init_fn, _} = Axon.build(model)\n iex> params = init_fn.(Nx.template({1, 2}, :f32), %{})\n iex> serialized = Axon.serialize(model, params)\n iex> {saved_model, saved_params} = Axon.deserialize(serialized)\n iex> {_, predict_fn} = Axon.build(saved_model)\n iex> predict_fn.(saved_params, Nx.tensor([[1.0, 1.0]]))\n #Nx.Tensor","ref":"Axon.html#deserialize/2-examples","title":"Examples - Axon.deserialize/2","type":"function"},{"doc":"Adds a Dropout layer to the network.\n\nSee `Axon.Layers.dropout/2` for more details.","ref":"Axon.html#dropout/2","title":"Axon.dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#dropout/2-options","title":"Options - Axon.dropout/2","type":"function"},{"doc":"Adds an Exponential linear unit activation layer to the network.\n\nSee `Axon.Activations.elu/1` for more details.","ref":"Axon.html#elu/2","title":"Axon.elu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#elu/2-options","title":"Options - Axon.elu/2","type":"function"},{"doc":"Adds an embedding layer to the network.\n\nAn embedding layer initializes a kernel of shape `{vocab_size, embedding_size}`\nwhich acts as a lookup table for sequences of discrete tokens (e.g. sentences).\nEmbeddings are typically used to obtain a dense representation of a sparse input\nspace.","ref":"Axon.html#embedding/4","title":"Axon.embedding/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights. Defaults\n to `:uniform`.","ref":"Axon.html#embedding/4-options","title":"Options - Axon.embedding/4","type":"function"},{"doc":"Adds an Exponential activation layer to the network.\n\nSee `Axon.Activations.exp/1` for more details.","ref":"Axon.html#exp/2","title":"Axon.exp/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#exp/2-options","title":"Options - Axon.exp/2","type":"function"},{"doc":"Adds a Feature alpha dropout layer to the network.\n\nSee `Axon.Layers.feature_alpha_dropout/2` for more details.","ref":"Axon.html#feature_alpha_dropout/2","title":"Axon.feature_alpha_dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#feature_alpha_dropout/2-options","title":"Options - Axon.feature_alpha_dropout/2","type":"function"},{"doc":"Adds a flatten layer to the network.\n\nThis layer will flatten all but the batch dimensions\nof the input into a single layer. Typically called to flatten\nthe output of a convolution for use with a dense layer.","ref":"Axon.html#flatten/2","title":"Axon.flatten/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#flatten/2-options","title":"Options - Axon.flatten/2","type":"function"},{"doc":"Freezes parameters returned from the given function or predicate.\n\n`fun` can be a predicate `:all`, `up: n`, or `down: n`. `:all`\nfreezes all parameters in the model, `up: n` freezes the first `n`\nlayers up (starting from output), and `down: n` freezes the first `n`\nlayers down (starting from input).\n\n`fun` may also be a predicate function which takes a parameter and\nreturns `true` if a parameter should be frozen or `false` otherwise.\n\nFreezing parameters is useful when performing transfer learning\nto leverage features learned from another problem in a new problem.\nFor example, it's common to combine the convolutional base from\nlarger models trained on ImageNet with fresh fully-connected classifiers.\nThe combined model is then trained on fresh data, with the convolutional\nbase frozen so as not to lose information. You can see this example\nin code here:\n\n cnn_base = get_pretrained_cnn_base()\n model =\n cnn_base\n |> Axon.freeze()\n |> Axon.flatten()\n |> Axon.dense(1024, activation: :relu)\n |> Axon.dropout()\n |> Axon.dense(1000, activation: :softmax)\n\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.005))\n |> Axon.Loop.run(data, epochs: 10)\n\nWhen compiled, frozen parameters are wrapped in `Nx.Defn.Kernel.stop_grad/1`,\nwhich zeros out the gradient with respect to the frozen parameter. Gradients\nof frozen parameters will return `0.0`, meaning they won't be changed during\nthe update process.","ref":"Axon.html#freeze/2","title":"Axon.freeze/2","type":"function"},{"doc":"Adds a Gaussian error linear unit activation layer to the network.\n\nSee `Axon.Activations.gelu/1` for more details.","ref":"Axon.html#gelu/2","title":"Axon.gelu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#gelu/2-options","title":"Options - Axon.gelu/2","type":"function"},{"doc":"Returns information about a model's inputs.","ref":"Axon.html#get_inputs/1","title":"Axon.get_inputs/1","type":"function"},{"doc":"Returns a map of model op counts for each unique operation\nin a model by their given `:op_name`.","ref":"Axon.html#get_op_counts/1","title":"Axon.get_op_counts/1","type":"function"},{"doc":"iex> model = Axon.input(\"input\", shape: {nil, 1}) |> Axon.dense(2)\n iex> Axon.get_op_counts(model)\n %{input: 1, dense: 1}\n\n iex> model = Axon.input(\"input\", shape: {nil, 1}) |> Axon.tanh() |> Axon.tanh()\n iex> Axon.get_op_counts(model)\n %{input: 1, tanh: 2}","ref":"Axon.html#get_op_counts/1-examples","title":"Examples - Axon.get_op_counts/1","type":"function"},{"doc":"Returns a node's immediate input options.\n\nNote that this does not take into account options of\nparent layers, only the option which belong to the\nimmediate layer.","ref":"Axon.html#get_options/1","title":"Axon.get_options/1","type":"function"},{"doc":"Returns a model's output shape from the given input\ntemplate.","ref":"Axon.html#get_output_shape/3","title":"Axon.get_output_shape/3","type":"function"},{"doc":"Returns a node's immediate parameters.\n\nNote this does not take into account parameters of\nparent layers - only the parameters which belong to\nthe immediate layer.","ref":"Axon.html#get_parameters/1","title":"Axon.get_parameters/1","type":"function"},{"doc":"Adds a Global average pool layer to the network.\n\nSee `Axon.Layers.global_avg_pool/2` for more details.\n\nTypically used to connect feature extractors such as those in convolutional\nneural networks to fully-connected models by reducing inputs along spatial\ndimensions to only feature and batch dimensions.","ref":"Axon.html#global_avg_pool/2","title":"Axon.global_avg_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:keep_axes` - option to keep reduced axes. If `true`, keeps reduced axes\n with a dimension size of 1.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#global_avg_pool/2-options","title":"Options - Axon.global_avg_pool/2","type":"function"},{"doc":"Adds a Global LP pool layer to the network.\n\nSee `Axon.Layers.global_lp_pool/2` for more details.\n\nTypically used to connect feature extractors such as those in convolutional\nneural networks to fully-connected models by reducing inputs along spatial\ndimensions to only feature and batch dimensions.","ref":"Axon.html#global_lp_pool/2","title":"Axon.global_lp_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:keep_axes` - option to keep reduced axes. If `true`, keeps reduced axes\n with a dimension size of 1.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#global_lp_pool/2-options","title":"Options - Axon.global_lp_pool/2","type":"function"},{"doc":"Adds a Global max pool layer to the network.\n\nSee `Axon.Layers.global_max_pool/2` for more details.\n\nTypically used to connect feature extractors such as those in convolutional\nneural networks to fully-connected models by reducing inputs along spatial\ndimensions to only feature and batch dimensions.","ref":"Axon.html#global_max_pool/2","title":"Axon.global_max_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:keep_axes` - option to keep reduced axes. If `true`, keeps reduced axes\n with a dimension size of 1.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#global_max_pool/2-options","title":"Options - Axon.global_max_pool/2","type":"function"},{"doc":"Adds a group normalization layer to the network.\n\nSee `Axon.Layers.group_norm/4` for more details.","ref":"Axon.html#group_norm/3","title":"Axon.group_norm/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term.","ref":"Axon.html#group_norm/3-options","title":"Options - Axon.group_norm/3","type":"function"},{"doc":"See `gru/3`.","ref":"Axon.html#gru/2","title":"Axon.gru/2","type":"function"},{"doc":"Adds a gated recurrent unit (GRU) layer to the network with\na random initial hidden state.\n\nSee `gru/4` for more details.","ref":"Axon.html#gru/3","title":"Axon.gru/3","type":"function"},{"doc":"* `:recurrent_initializer` - initializer for hidden state.\n Defaults to `:orthogonal`.","ref":"Axon.html#gru/3-additional-options","title":"Additional options - Axon.gru/3","type":"function"},{"doc":"Adds a gated recurrent unit (GRU) layer to the network with\nthe given initial hidden state.\n\nGRUs apply `Axon.Layers.gru_cell/7` over an entire input\nsequence and return:\n\n {{new_hidden}, output_sequence}\n\nYou can use the output state as the hidden state of another\nGRU layer.","ref":"Axon.html#gru/4","title":"Axon.gru/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:activation` - recurrent activation. Defaults to `:tanh`.\n\n * `:gate` - recurrent gate function. Defaults to `:sigmoid`.\n\n * `:unroll` - `:dynamic` (loop preserving) or `:static` (compiled)\n unrolling of RNN.\n\n * `:kernel_initializer` - initializer for kernel weights. Defaults\n to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for bias weights. Defaults to\n `:zeros`.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#gru/4-options","title":"Options - Axon.gru/4","type":"function"},{"doc":"Adds a Hard sigmoid activation layer to the network.\n\nSee `Axon.Activations.hard_sigmoid/1` for more details.","ref":"Axon.html#hard_sigmoid/2","title":"Axon.hard_sigmoid/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#hard_sigmoid/2-options","title":"Options - Axon.hard_sigmoid/2","type":"function"},{"doc":"Adds a Hard sigmoid weighted linear unit activation layer to the network.\n\nSee `Axon.Activations.hard_silu/1` for more details.","ref":"Axon.html#hard_silu/2","title":"Axon.hard_silu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#hard_silu/2-options","title":"Options - Axon.hard_silu/2","type":"function"},{"doc":"Adds a Hard hyperbolic tangent activation layer to the network.\n\nSee `Axon.Activations.hard_tanh/1` for more details.","ref":"Axon.html#hard_tanh/2","title":"Axon.hard_tanh/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#hard_tanh/2-options","title":"Options - Axon.hard_tanh/2","type":"function"},{"doc":"Adds an input layer to the network.\n\nInput layers specify a model's inputs. Input layers are\nalways the root layers of the neural network.\n\nYou must specify the input layers name, which will be used\nto uniquely identify it in the case of multiple inputs.","ref":"Axon.html#input/2","title":"Axon.input/2","type":"function"},{"doc":"* `:shape` - the expected input shape, use `nil` for dimensions\n of a dynamic size.\n\n * `:optional` - if `true`, the input may be omitted when using\n the model. This needs to be handled in one of the subsequent\n layers. See `optional/2` for more details.","ref":"Axon.html#input/2-options","title":"Options - Axon.input/2","type":"function"},{"doc":"Adds an Instance normalization layer to the network.\n\nSee `Axon.Layers.instance_norm/6` for more details.","ref":"Axon.html#instance_norm/2","title":"Axon.instance_norm/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term. Defaults to `1.0e-5`.","ref":"Axon.html#instance_norm/2-options","title":"Options - Axon.instance_norm/2","type":"function"},{"doc":"Custom Axon layer with given inputs.\n\nInputs may be other Axon layers or trainable parameters created\nwith `Axon.param`. At inference time, `op` will be applied with\ninputs in specified order and an additional `opts` parameter which\nspecifies inference options. All options passed to layer are forwarded\nto inference function except:\n\n * `:name` - layer name.\n\n * `:op_name` - layer operation for inspection and building parameter map.\n\n * `:mode` - if the layer should run only on `:inference` or `:train`. Defaults to `:both`\n\n * `:global_options` - a list of global option names that this layer\n supports. Global options passed to `build/2` will be forwarded to\n the layer, as long as they are declared\n\nNote this means your layer should not use these as input options,\nas they will always be dropped during inference compilation.\n\nAxon's compiler will additionally forward the following options to\nevery layer at inference time:\n\n * `:mode` - `:inference` or `:train`. To control layer behavior\n based on inference or train time.\n\n`op` is a function of the form:\n\n fun = fn input, weight, bias, _opts ->\n input * weight + bias\n end","ref":"Axon.html#layer/3","title":"Axon.layer/3","type":"function"},{"doc":"Adds a Layer normalization layer to the network.\n\nSee `Axon.Layers.layer_norm/4` for more details.","ref":"Axon.html#layer_norm/2","title":"Axon.layer_norm/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term.","ref":"Axon.html#layer_norm/2-options","title":"Options - Axon.layer_norm/2","type":"function"},{"doc":"Adds a Leaky rectified linear unit activation layer to the network.\n\nSee `Axon.Activations.leaky_relu/1` for more details.","ref":"Axon.html#leaky_relu/2","title":"Axon.leaky_relu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#leaky_relu/2-options","title":"Options - Axon.leaky_relu/2","type":"function"},{"doc":"Adds a Linear activation layer to the network.\n\nSee `Axon.Activations.linear/1` for more details.","ref":"Axon.html#linear/2","title":"Axon.linear/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#linear/2-options","title":"Options - Axon.linear/2","type":"function"},{"doc":"Adds a Log-sigmoid activation layer to the network.\n\nSee `Axon.Activations.log_sigmoid/1` for more details.","ref":"Axon.html#log_sigmoid/2","title":"Axon.log_sigmoid/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#log_sigmoid/2-options","title":"Options - Axon.log_sigmoid/2","type":"function"},{"doc":"Adds a Log-softmax activation layer to the network.\n\nSee `Axon.Activations.log_softmax/1` for more details.","ref":"Axon.html#log_softmax/2","title":"Axon.log_softmax/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#log_softmax/2-options","title":"Options - Axon.log_softmax/2","type":"function"},{"doc":"Adds a Log-sumexp activation layer to the network.\n\nSee `Axon.Activations.log_sumexp/1` for more details.","ref":"Axon.html#log_sumexp/2","title":"Axon.log_sumexp/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#log_sumexp/2-options","title":"Options - Axon.log_sumexp/2","type":"function"},{"doc":"Adds a Power average pool layer to the network.\n\nSee `Axon.Layers.lp_pool/2` for more details.","ref":"Axon.html#lp_pool/2","title":"Axon.lp_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to size of kernel.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:dilations` - window dilations. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#lp_pool/2-options","title":"Options - Axon.lp_pool/2","type":"function"},{"doc":"See `lstm/3`.","ref":"Axon.html#lstm/2","title":"Axon.lstm/2","type":"function"},{"doc":"Adds a long short-term memory (LSTM) layer to the network\nwith a random initial hidden state.\n\nSee `lstm/4` for more details.","ref":"Axon.html#lstm/3","title":"Axon.lstm/3","type":"function"},{"doc":"* `:recurrent_initializer` - initializer for hidden state.\n Defaults to `:orthogonal`.","ref":"Axon.html#lstm/3-additional-options","title":"Additional options - Axon.lstm/3","type":"function"},{"doc":"Adds a long short-term memory (LSTM) layer to the network\nwith the given initial hidden state.\n\nLSTMs apply `Axon.Layers.lstm_cell/7` over an entire input\nsequence and return:\n\n {output_sequence, {new_cell, new_hidden}}\n\nYou can use the output state as the hidden state of another\nLSTM layer.","ref":"Axon.html#lstm/4","title":"Axon.lstm/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:activation` - recurrent activation. Defaults to `:tanh`.\n\n * `:gate` - recurrent gate function. Defaults to `:sigmoid`.\n\n * `:unroll` - `:dynamic` (loop preserving) or `:static` (compiled)\n unrolling of RNN.\n\n * `:kernel_initializer` - initializer for kernel weights. Defaults\n to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for bias weights. Defaults to\n `:zeros`.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#lstm/4-options","title":"Options - Axon.lstm/4","type":"function"},{"doc":"Traverses graph nodes in order, applying `fun` to each\nnode exactly once to return a transformed node in its\nplace(s) in the graph.\n\nThis function maintains an internal cache which ensures\neach node is only visited and transformed exactly once.\n\n`fun` must accept an Axon node and return an Axon node.\n\nPlease note that modifying node lineage (e.g. altering\na node's parent) will result in disconnected graphs.","ref":"Axon.html#map_nodes/2","title":"Axon.map_nodes/2","type":"function"},{"doc":"One common use of this function is to implement common\ninstrumentation between layers without needing to build\na new explicitly instrumented version of a model. For example,\nyou can use this function to visualize intermediate activations\nof all convolutional layers in a model:\n\n instrumented_model = Axon.map_nodes(model, fn\n %Axon.Node{op: :conv} = axon_node ->\n Axon.attach_hook(axon_node, &visualize_activations/1)\n\n axon_node ->\n axon_node\n end)\n\nAnother use case is to replace entire classes of layers\nwith another. For example, you may want to replace all\nrelu layers with tanh layers:\n\n new_model = Axon.map_nodes(model, fn\n %Axon{op: :relu} = graph ->\n # Get nodes immediate parent\n parent = Axon.get_parent(graph)\n # Replace node with a tanh\n Axon.tanh(parent)\n\n graph ->\n graph\n end)","ref":"Axon.html#map_nodes/2-examples","title":"Examples - Axon.map_nodes/2","type":"function"},{"doc":"Computes a sequence mask according to the given EOS token.\n\nMasks can be propagated to recurrent layers or custom layers to\nindicate that a given token should be ignored in processing. This\nis useful when you have sequences of variable length.\n\nMost commonly, `eos_token` is `0`.","ref":"Axon.html#mask/3","title":"Axon.mask/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#mask/3-options","title":"Options - Axon.mask/3","type":"function"},{"doc":"Adds a Max pool layer to the network.\n\nSee `Axon.Layers.max_pool/2` for more details.","ref":"Axon.html#max_pool/2","title":"Axon.max_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to size of kernel.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:dilations` - window dilations. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#max_pool/2-options","title":"Options - Axon.max_pool/2","type":"function"},{"doc":"Adds a Mish activation layer to the network.\n\nSee `Axon.Activations.mish/1` for more details.","ref":"Axon.html#mish/2","title":"Axon.mish/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#mish/2-options","title":"Options - Axon.mish/2","type":"function"},{"doc":"Adds a multiply layer to the network.\n\nThis layer performs an element-wise multiply operation\non input layers. All input layers must be capable of being\nbroadcast together.\n\nIf one shape has a static batch size, all other shapes must have a\nstatic batch size as well.","ref":"Axon.html#multiply/3","title":"Axon.multiply/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#multiply/3-options","title":"Options - Axon.multiply/3","type":"function"},{"doc":"Wraps an Axon model into a namespace.\n\nA namespace is a part of an Axon model which is meant to\nbe a self-contained collection of Axon layers. Namespaces\nare guaranteed to always generate with the same internal\nlayer names and can be re-used universally across models.\n\nNamespaces are most useful for containing large collections\nof layers and offering a straightforward means for accessing\nthe parameters of individual model components. A common application\nof namespaces is to use them in with a pre-trained model for\nfine-tuning:\n\n {base, resnet_params} = resnet()\n base = base |> Axon.namespace(\"resnet\")\n\n model = base |> Axon.dense(1)\n {init_fn, predict_fn} = Axon.build(model)\n\n init_fn.(Nx.template({1, 3, 224, 224}, {:f, 32}), %{\"resnset\" => resnet_params})\n\nNotice you can use `init_fn` in conjunction with namespaces\nto specify which portion of a model you'd like to initialize\nfrom a fixed starting point.\n\nNamespaces have fixed names, which means it's easy to run into namespace\ncollisions. Re-using namespaces, re-using inner parts of a namespace,\nand attempting to share layers between namespaces are still sharp\nedges in namespace usage.","ref":"Axon.html#namespace/2","title":"Axon.namespace/2","type":"function"},{"doc":"Applies the given `Nx` expression to the input.\n\nNx layers are meant for quick applications of functions without\ntrainable parameters. For example, they are useful for applying\nfunctions which apply accessors to containers:\n\n model = Axon.container({foo, bar})\n Axon.nx(model, &elem(&1, 0))","ref":"Axon.html#nx/3","title":"Axon.nx/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#nx/3-options","title":"Options - Axon.nx/3","type":"function"},{"doc":"Wraps an Axon model in an optional node.\n\nBy default, when an optional input is missing, all subsequent layers\nare nullified. For example, consider this model:\n\n values = Axon.input(\"values\")\n mask = Axon.input(\"mask\", optional: true)\n\n model =\n values\n |> Axon.dense(10)\n |> Axon.multiply(mask)\n |> Axon.dense(1)\n |> Axon.sigmoid()\n\nIn case the mask is not provided, the input node will resolve to\n`%Axon.None{}` and so will all the layers that depend on it. By\nusing `optional/2` a layer may opt-in to receive `%Axon.None{}`.\nTo fix our example, we could define a custom layer to apply the\nmask only when present\n\n def apply_optional_mask(%Axon{} = x, %Axon{} = mask) do\n Axon.layer(\n fn x, mask, _opts ->\n case mask do\n %Axon.None{} -> x\n mask -> Nx.multiply(x, mask)\n end\n end,\n [x, Axon.optional(mask)]\n )\n end\n\n # ...\n\n model =\n values\n |> Axon.dense(10)\n |> apply_optional_mask(mask)\n |> Axon.dense(1)\n |> Axon.sigmoid()","ref":"Axon.html#optional/2","title":"Axon.optional/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#optional/2-options","title":"Options - Axon.optional/2","type":"function"},{"doc":"Adds a pad layer to the network.\n\nThis layer will pad the spatial dimensions of the input.\nPadding configuration is a list of tuples for each spatial\ndimension.","ref":"Axon.html#pad/4","title":"Axon.pad/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:channels` - channel configuration. One of `:first` or\n `:last`. Defaults to `:last`.","ref":"Axon.html#pad/4-options","title":"Options - Axon.pad/4","type":"function"},{"doc":"Trainable Axon parameter used to create custom layers.\n\nParameters are specified in usages of `Axon.layer` and will\nbe automatically initialized and used in subsequent applications\nof Axon models.\n\nYou may specify the parameter shape as either a static shape or\nas function of the inputs to the given layer. If you specify the\nparameter shape as a function, it will be given the","ref":"Axon.html#param/3","title":"Axon.param/3","type":"function"},{"doc":"* `:initializer` - parameter initializer. Defaults to `:glorot_uniform`.","ref":"Axon.html#param/3-options","title":"Options - Axon.param/3","type":"function"},{"doc":"Pops the top node off of the graph.\n\nThis returns the popped node and the updated graph:\n\n {_node, model} = Axon.pop_node(model)","ref":"Axon.html#pop_node/1","title":"Axon.pop_node/1","type":"function"},{"doc":"Builds and runs the given Axon `model` with `params` and `input`.\n\nThis is equivalent to calling `build/2` and then invoking the\npredict function.","ref":"Axon.html#predict/4","title":"Axon.predict/4","type":"function"},{"doc":"* `:mode` - one of `:inference` or `:train`. Forwarded to layers\n to control differences in compilation at training or inference time.\n Defaults to `:inference`\n\n * `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`\n\nAll other options are forwarded to the default JIT compiler\nor backend.","ref":"Axon.html#predict/4-options","title":"Options - Axon.predict/4","type":"function"},{"doc":"Traverses graph nodes in order, applying `fun` to each\nnode exactly once to return a transformed node in its\nplace(s) in the graph.\n\nThis function maintains an internal cache which ensures\neach node is only visited and transformed exactly once.\n\n`fun` must accept an Axon node and accumulator and return\nan updated accumulator.","ref":"Axon.html#reduce_nodes/3","title":"Axon.reduce_nodes/3","type":"function"},{"doc":"Internally this function is used in several places to accumulate\ngraph metadata. For example, you can use it to count the number\nof a certain type of operation in the graph:\n\n Axon.reduce_nodes(model, 0, fn\n %Axon.Nodes{op: :relu}, acc -> acc + 1\n _, acc -> acc\n end)","ref":"Axon.html#reduce_nodes/3-examples","title":"Examples - Axon.reduce_nodes/3","type":"function"},{"doc":"Adds a Rectified linear unit 6 activation layer to the network.\n\nSee `Axon.Activations.relu6/1` for more details.","ref":"Axon.html#relu6/2","title":"Axon.relu6/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#relu6/2-options","title":"Options - Axon.relu6/2","type":"function"},{"doc":"Adds a Rectified linear unit activation layer to the network.\n\nSee `Axon.Activations.relu/1` for more details.","ref":"Axon.html#relu/2","title":"Axon.relu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#relu/2-options","title":"Options - Axon.relu/2","type":"function"},{"doc":"Adds a reshape layer to the network.\n\nThis layer implements a special case of `Nx.reshape` which accounts\nfor possible batch dimensions in the input tensor. You may pass the\nmagic dimension `:batch` as a placeholder for dynamic batch sizes.\nYou can use `:batch` seamlessly with `:auto` dimension sizes.\n\nIf the input is an Axon constant, the reshape behavior matches that of\n`Nx.reshape/2`.","ref":"Axon.html#reshape/3","title":"Axon.reshape/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#reshape/3-options","title":"Options - Axon.reshape/3","type":"function"},{"doc":"Adds a resize layer to the network.\n\nResizing can be used for interpolation or upsampling input\nvalues in a neural network. For example, you can use this\nlayer as an upsampling layer within a GAN.\n\nResize shape must be a tuple representing the resized spatial\ndimensions of the input tensor.\n\nCompiles to `Axon.Layers.resize/2`.","ref":"Axon.html#resize/3","title":"Axon.resize/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:method` - resize method. Defaults to `:nearest`.\n\n * `:antialias` - whether an anti-aliasing filter should be used\n when downsampling. Defaults to `true`.\n\n * `:channels` - channel configuration. One of `:first` or\n `:last`. Defaults to `:last`.","ref":"Axon.html#resize/3-options","title":"Options - Axon.resize/3","type":"function"},{"doc":"Adds a Scaled exponential linear unit activation layer to the network.\n\nSee `Axon.Activations.selu/1` for more details.","ref":"Axon.html#selu/2","title":"Axon.selu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#selu/2-options","title":"Options - Axon.selu/2","type":"function"},{"doc":"Adds a depthwise separable 2-dimensional convolution to the\nnetwork.\n\nDepthwise separable convolutions break the kernel into kernels\nfor each dimension of the input and perform a depthwise conv\nover the input with each kernel.\n\nCompiles to `Axon.Layers.separable_conv2d/6`.","ref":"Axon.html#separable_conv2d/3","title":"Axon.separable_conv2d/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#separable_conv2d/3-options","title":"Options - Axon.separable_conv2d/3","type":"function"},{"doc":"Adds a depthwise separable 3-dimensional convolution to the\nnetwork.\n\nDepthwise separable convolutions break the kernel into kernels\nfor each dimension of the input and perform a depthwise conv\nover the input with each kernel.\n\nCompiles to `Axon.Layers.separable_conv3d/8`.","ref":"Axon.html#separable_conv3d/3","title":"Axon.separable_conv3d/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#separable_conv3d/3-options","title":"Options - Axon.separable_conv3d/3","type":"function"},{"doc":"Serializes a model and its parameters for persisting\nmodels to disk or elsewhere.\n\nModel and parameters are serialized as a tuple, where the\nmodel is converted to a recursive map to ensure compatibility\nwith future Axon versions and the parameters are serialized\nusing `Nx.serialize/2`. There is some additional metadata included\nsuch as current serialization version for compatibility.\n\nSerialization `opts` are forwarded to `Nx.serialize/2` and\n`:erlang.term_to_binary/2` for controlling compression options.","ref":"Axon.html#serialize/3","title":"Axon.serialize/3","type":"function"},{"doc":"iex> model = Axon.input(\"input\", shape: {nil, 2}) |> Axon.dense(1, kernel_initializer: :zeros, activation: :relu)\n iex> {init_fn, _} = Axon.build(model)\n iex> params = init_fn.(Nx.template({1, 2}, :f32), %{})\n iex> serialized = Axon.serialize(model, params)\n iex> {saved_model, saved_params} = Axon.deserialize(serialized)\n iex> {_, predict_fn} = Axon.build(saved_model)\n iex> predict_fn.(saved_params, Nx.tensor([[1.0, 1.0]]))\n #Nx.Tensor","ref":"Axon.html#serialize/3-examples","title":"Examples - Axon.serialize/3","type":"function"},{"doc":"Sets a node's immediate options to the given input\noptions.\n\nNote that this does not take into account options of\nparent layers, only the option which belong to the\nimmediate layer.\n\nNew options must be compatible with the given layer\nop. Adding unsupported options to an Axon layer will\nresult in an error at graph execution time.","ref":"Axon.html#set_options/2","title":"Axon.set_options/2","type":"function"},{"doc":"Sets a node's immediate parameters to the given\nparameters.\n\nNote this does not take into account parameters of\nparent layers - only the parameters which belong to\nthe immediate layer.\n\nThe new parameters must be compatible with the layer's\nold parameters.","ref":"Axon.html#set_parameters/2","title":"Axon.set_parameters/2","type":"function"},{"doc":"Adds a Sigmoid activation layer to the network.\n\nSee `Axon.Activations.sigmoid/1` for more details.","ref":"Axon.html#sigmoid/2","title":"Axon.sigmoid/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#sigmoid/2-options","title":"Options - Axon.sigmoid/2","type":"function"},{"doc":"Adds a Sigmoid weighted linear unit activation layer to the network.\n\nSee `Axon.Activations.silu/1` for more details.","ref":"Axon.html#silu/2","title":"Axon.silu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#silu/2-options","title":"Options - Axon.silu/2","type":"function"},{"doc":"Adds a Softmax activation layer to the network.\n\nSee `Axon.Activations.softmax/1` for more details.","ref":"Axon.html#softmax/2","title":"Axon.softmax/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#softmax/2-options","title":"Options - Axon.softmax/2","type":"function"},{"doc":"Adds a Softplus activation layer to the network.\n\nSee `Axon.Activations.softplus/1` for more details.","ref":"Axon.html#softplus/2","title":"Axon.softplus/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#softplus/2-options","title":"Options - Axon.softplus/2","type":"function"},{"doc":"Adds a Softsign activation layer to the network.\n\nSee `Axon.Activations.softsign/1` for more details.","ref":"Axon.html#softsign/2","title":"Axon.softsign/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#softsign/2-options","title":"Options - Axon.softsign/2","type":"function"},{"doc":"Adds a Spatial dropout layer to the network.\n\nSee `Axon.Layers.spatial_dropout/2` for more details.","ref":"Axon.html#spatial_dropout/2","title":"Axon.spatial_dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#spatial_dropout/2-options","title":"Options - Axon.spatial_dropout/2","type":"function"},{"doc":"Splits input graph into a container of `n` input graphs\nalong the given axis.","ref":"Axon.html#split/3","title":"Axon.split/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:axis` - concatenate axis. Defaults to `-1`.","ref":"Axon.html#split/3-options","title":"Options - Axon.split/3","type":"function"},{"doc":"Adds a stack columns layer to the network.\n\nA stack columns layer is designed to be used with `Nx.LazyContainer`\ndata structures like Explorer DataFrames. Given an input which is a\nDataFrame, `stack_columns/2` will stack the columns in each row to\ncreate a single vector.\n\nYou may optionally specify `:ignore` to ignore certain columns in\nthe container.","ref":"Axon.html#stack_columns/2","title":"Axon.stack_columns/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:ignore` - keys to ignore when stacking.","ref":"Axon.html#stack_columns/2-options","title":"Options - Axon.stack_columns/2","type":"function"},{"doc":"Adds a subtract layer to the network.\n\nThis layer performs an element-wise subtract operation\non input layers. All input layers must be capable of being\nbroadcast together.\n\nIf one shape has a static batch size, all other shapes must have a\nstatic batch size as well.","ref":"Axon.html#subtract/3","title":"Axon.subtract/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#subtract/3-options","title":"Options - Axon.subtract/3","type":"function"},{"doc":"Adds a Hyperbolic tangent activation layer to the network.\n\nSee `Axon.Activations.tanh/1` for more details.","ref":"Axon.html#tanh/2","title":"Axon.tanh/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#tanh/2-options","title":"Options - Axon.tanh/2","type":"function"},{"doc":"Compiles and returns the given model's backward function\nexpression with respect to the given loss function.\n\nThe returned expression is an Nx expression which can be\ntraversed and lowered to an IR or inspected for debugging\npurposes.\n\nThe given loss function must be a scalar loss function which\nexpects inputs and targets with the same shapes as the model's\noutput shapes as determined by the model's signature.","ref":"Axon.html#trace_backward/5","title":"Axon.trace_backward/5","type":"function"},{"doc":"* `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`","ref":"Axon.html#trace_backward/5-options","title":"Options - Axon.trace_backward/5","type":"function"},{"doc":"Compiles and returns the given model's forward function\nexpression with the given options.\n\nThe returned expression is an Nx expression which can be\ntraversed and lowered to an IR or inspected for debugging\npurposes.","ref":"Axon.html#trace_forward/4","title":"Axon.trace_forward/4","type":"function"},{"doc":"* `:mode` - one of `:inference` or `:train`. Forwarded to layers\n to control differences in compilation at training or inference time.\n Defaults to `:inference`\n\n * `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`","ref":"Axon.html#trace_forward/4-options","title":"Options - Axon.trace_forward/4","type":"function"},{"doc":"Compiles and returns the given model's init function\nexpression with the given options.\n\nThe returned expression is an Nx expression which can be\ntraversed and lowered to an IR or inspected for debugging\npurposes.\n\nYou may optionally specify initial parameters for some layers or\nnamespaces by passing a partial parameter map:\n\n Axon.trace_init(model, %{\"dense_0\" => dense_params})\n\nThe parameter map will be merged with the initialized model\nparameters.","ref":"Axon.html#trace_init/4","title":"Axon.trace_init/4","type":"function"},{"doc":"* `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`","ref":"Axon.html#trace_init/4-options","title":"Options - Axon.trace_init/4","type":"function"},{"doc":"Adds a transpose layer to the network.","ref":"Axon.html#transpose/3","title":"Axon.transpose/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#transpose/3-options","title":"Options - Axon.transpose/3","type":"function"},{"doc":"Unfreezes parameters returned from the given function or predicate.\n\n`fun` can be a predicate `:all`, `up: n`, or `down: n`. `:all`\nfreezes all parameters in the model, `up: n` unfreezes the first `n`\nlayers up (starting from output), and `down: n` freezes the first `n`\nlayers down (starting from input).\n\n`fun` may also be a predicate function which takes a parameter and\nreturns `true` if a parameter should be unfrozen or `false` otherwise.\n\nUnfreezing parameters is useful when fine tuning a model which you\nhave previously frozen and performed transfer learning on. You may\nwant to unfreeze some of the later frozen layers in a model and\nfine tune them specifically for your application:\n\n cnn_base = get_pretrained_cnn_base()\n model =\n frozen_model\n |> Axon.unfreeze(up: 25)\n\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.0005))\n |> Axon.Loop.run(data, epochs: 10)\n\nWhen compiled, frozen parameters are wrapped in `Nx.Defn.Kernel.stop_grad/1`,\nwhich zeros out the gradient with respect to the frozen parameter. Gradients\nof frozen parameters will return `0.0`, meaning they won't be changed during\nthe update process.","ref":"Axon.html#unfreeze/2","title":"Axon.unfreeze/2","type":"function"},{"doc":"","ref":"Axon.html#t:t/0","title":"Axon.t/0","type":"type"},{"doc":"Parameter initializers.\n\nParameter initializers are used to initialize the weights\nand biases of a neural network. Because most deep learning\noptimization algorithms are iterative, they require an initial\npoint to iterate from.\n\nSometimes the initialization of a model can determine whether\nor not a model converges. In some cases, the initial point is\nunstable, and therefore the model has no chance of converging\nusing common first-order optimization methods. In cases where\nthe model will converge, initialization can have a significant\nimpact on how quickly the model converges.\n\nMost initialization strategies are built from intuition and\nheuristics rather than theory. It's commonly accepted that\nthe parameters of different layers should be different -\nmotivating the use of random initialization for each layer's\nparameters. Usually, only the weights of a layer are initialized\nusing a random distribution - while the biases are initialized\nto a uniform constant (like 0).\n\nMost initializers use Gaussian (normal) or uniform distributions\nwith variations on scale. The output scale of an initializer\nshould generally be large enough to avoid information loss but\nsmall enough to avoid exploding values. The initializers in\nthis module have a default scale known to work well with\nthe initialization strategy.\n\nThe functions in this module return initialization functions which\ntake shapes and types and return tensors:\n\n init_fn = Axon.Initializers.zeros()\n init_fn.({1, 2}, {:f, 32})\n\nYou may use these functions from within `defn` or outside.","ref":"Axon.Initializers.html","title":"Axon.Initializers","type":"module"},{"doc":"Initializes parameters to value.","ref":"Axon.Initializers.html#full/1","title":"Axon.Initializers.full/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.full(1.00)\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#full/1-examples","title":"Examples - Axon.Initializers.full/1","type":"function"},{"doc":"Initializes parameters with the Glorot normal initializer.\n\nThe Glorot normal initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_avg`\nand `distribution: :truncated_normal`.\n\nThe Glorot normal initializer is also called the Xavier\nnormal initializer.","ref":"Axon.Initializers.html#glorot_normal/1","title":"Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#glorot_normal/1-options","title":"Options - Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.glorot_normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.glorot_normal(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#glorot_normal/1-examples","title":"Examples - Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)","ref":"Axon.Initializers.html#glorot_normal/1-references","title":"References - Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"Initializes parameters with the Glorot uniform initializer.\n\nThe Glorot uniform initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_avg`\nand `distribution: :uniform`.\n\nThe Glorot uniform initializer is also called the Xavier\nuniform initializer.","ref":"Axon.Initializers.html#glorot_uniform/1","title":"Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#glorot_uniform/1-options","title":"Options - Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.glorot_uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.glorot_uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#glorot_uniform/1-examples","title":"Examples - Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)","ref":"Axon.Initializers.html#glorot_uniform/1-references","title":"References - Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"Initializes parameters with the He normal initializer.\n\nThe He normal initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_in`\nand `distribution: :truncated_normal`.","ref":"Axon.Initializers.html#he_normal/1","title":"Axon.Initializers.he_normal/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `2.0`","ref":"Axon.Initializers.html#he_normal/1-options","title":"Options - Axon.Initializers.he_normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.he_normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.he_normal(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#he_normal/1-examples","title":"Examples - Axon.Initializers.he_normal/1","type":"function"},{"doc":"* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)","ref":"Axon.Initializers.html#he_normal/1-references","title":"References - Axon.Initializers.he_normal/1","type":"function"},{"doc":"Initializes parameters with the He uniform initializer.\n\nThe He uniform initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_ni`\nand `distribution: :uniform`.","ref":"Axon.Initializers.html#he_uniform/1","title":"Axon.Initializers.he_uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `2.0`","ref":"Axon.Initializers.html#he_uniform/1-options","title":"Options - Axon.Initializers.he_uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.he_uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.he_uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#he_uniform/1-examples","title":"Examples - Axon.Initializers.he_uniform/1","type":"function"},{"doc":"* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)","ref":"Axon.Initializers.html#he_uniform/1-references","title":"References - Axon.Initializers.he_uniform/1","type":"function"},{"doc":"Initializes parameters to an identity matrix.","ref":"Axon.Initializers.html#identity/0","title":"Axon.Initializers.identity/0","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.identity()\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#identity/0-examples","title":"Examples - Axon.Initializers.identity/0","type":"function"},{"doc":"Initializes parameters with the Lecun normal initializer.\n\nThe Lecun normal initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_in`\nand `distribution: :truncated_normal`.","ref":"Axon.Initializers.html#lecun_normal/1","title":"Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#lecun_normal/1-options","title":"Options - Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.lecun_normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.lecun_normal(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#lecun_normal/1-examples","title":"Examples - Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)","ref":"Axon.Initializers.html#lecun_normal/1-references","title":"References - Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"Initializes parameters with the Lecun uniform initializer.\n\nThe Lecun uniform initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_in`\nand `distribution: :uniform`.","ref":"Axon.Initializers.html#lecun_uniform/1","title":"Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#lecun_uniform/1-options","title":"Options - Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.lecun_uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.lecun_uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#lecun_uniform/1-examples","title":"Examples - Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)","ref":"Axon.Initializers.html#lecun_uniform/1-references","title":"References - Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"Initializes parameters with a random normal distribution.","ref":"Axon.Initializers.html#normal/1","title":"Axon.Initializers.normal/1","type":"function"},{"doc":"* `:mean` - mean of the output distribution. Defaults to `0.0`\n * `:scale` - scale of the output distribution. Defaults to `1.0e-2`","ref":"Axon.Initializers.html#normal/1-options","title":"Options - Axon.Initializers.normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.normal(mean: 1.0, scale: 1.0)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#normal/1-examples","title":"Examples - Axon.Initializers.normal/1","type":"function"},{"doc":"Initializes parameters to 1.","ref":"Axon.Initializers.html#ones/0","title":"Axon.Initializers.ones/0","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.ones()\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#ones/0-examples","title":"Examples - Axon.Initializers.ones/0","type":"function"},{"doc":"Initializes a tensor with an orthogonal distribution.\n\nFor 2-D tensors, the initialization is generated through the QR decomposition of a random distribution\nFor tensors with more than 2 dimensions, a 2-D tensor with shape `{shape[0] * shape[1] * ... * shape[n-2], shape[n-1]}`\nis initialized and then reshaped accordingly.","ref":"Axon.Initializers.html#orthogonal/1","title":"Axon.Initializers.orthogonal/1","type":"function"},{"doc":"* `:distribution` - output distribution. One of [`:normal`, `:uniform`].\n Defaults to `:normal`","ref":"Axon.Initializers.html#orthogonal/1-options","title":"Options - Axon.Initializers.orthogonal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.orthogonal()\n iex> t = init_fn.({3, 3}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.type(t)\n {:f, 32}\n iex> Nx.shape(t)\n {3, 3}\n\n iex> init_fn = Axon.Initializers.orthogonal()\n iex> t = init_fn.({1, 2, 3, 4}, {:f, 64}, Nx.Random.key(1))\n iex> Nx.type(t)\n {:f, 64}\n iex> Nx.shape(t)\n {1, 2, 3, 4}","ref":"Axon.Initializers.html#orthogonal/1-examples","title":"Examples - Axon.Initializers.orthogonal/1","type":"function"},{"doc":"Initializes parameters with a random uniform distribution.","ref":"Axon.Initializers.html#uniform/1","title":"Axon.Initializers.uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0e-2`","ref":"Axon.Initializers.html#uniform/1-options","title":"Options - Axon.Initializers.uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#uniform/1-examples","title":"Examples - Axon.Initializers.uniform/1","type":"function"},{"doc":"Initializes parameters with variance scaling according to\nthe given distribution and mode.\n\nVariance scaling adapts scale to the weights of the output\ntensor.","ref":"Axon.Initializers.html#variance_scaling/1","title":"Axon.Initializers.variance_scaling/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0e-2`\n * `:mode` - compute fan mode. One of `:fan_in`, `:fan_out`, or `:fan_avg`.\n Defaults to `:fan_in`\n * `:distribution` - output distribution. One of `:normal`, `:truncated_normal`,\n or `:uniform`. Defaults to `:normal`","ref":"Axon.Initializers.html#variance_scaling/1-options","title":"Options - Axon.Initializers.variance_scaling/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.variance_scaling()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :truncated_normal)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}\n\n iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :normal)\n iex> t = init_fn.({64, 3, 32, 32}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {64, 3, 32, 32}\n iex> Nx.type(t)\n {:f, 32}","ref":"Axon.Initializers.html#variance_scaling/1-examples","title":"Examples - Axon.Initializers.variance_scaling/1","type":"function"},{"doc":"Initializes parameters to 0.","ref":"Axon.Initializers.html#zeros/0","title":"Axon.Initializers.zeros/0","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.zeros()\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#zeros/0-examples","title":"Examples - Axon.Initializers.zeros/0","type":"function"},{"doc":"Utilities for creating mixed precision policies.\n\nMixed precision is useful for increasing model throughput at the possible\nprice of a small dip in accuracy. When creating a mixed precision policy,\nyou define the policy for `params`, `compute`, and `output`.\n\nThe `params` policy dictates what type parameters should be stored as\nduring training. The `compute` policy dictates what type should be used\nduring intermediate computations in the model's forward pass. The `output`\npolicy dictates what type the model should output.\n\nHere's an example of creating a mixed precision policy and applying it\nto a model:\n\n model =\n Axon.input(\"input\", shape: {nil, 784})\n |> Axon.dense(128, activation: :relu)\n |> Axon.batch_norm()\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(64, activation: :relu)\n |> Axon.batch_norm()\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(10, activation: :softmax)\n\n policy = Axon.MixedPrecision.create_policy(\n params: {:f, 32},\n compute: {:f, 16},\n output: {:f, 32}\n )\n\n mp_model =\n model\n |> Axon.MixedPrecision.apply_policy(policy, except: [:batch_norm])\n\nThe example above applies the mixed precision policy to every layer in\nthe model except Batch Normalization layers. The policy will cast parameters\nand inputs to `{:f, 16}` for intermediate computations in the model's forward\npass before casting the output back to `{:f, 32}`.","ref":"Axon.MixedPrecision.html","title":"Axon.MixedPrecision","type":"module"},{"doc":"Casts the given container according to the given policy\nand type.","ref":"Axon.MixedPrecision.html#cast/3","title":"Axon.MixedPrecision.cast/3","type":"function"},{"doc":"iex> policy = Axon.MixedPrecision.create_policy(params: {:f, 16})\n iex> params = %{\"dense\" => %{\"kernel\" => Nx.tensor([1.0, 2.0, 3.0])}}\n iex> params = Axon.MixedPrecision.cast(policy, params, :params)\n iex> Nx.type(params[\"dense\"][\"kernel\"])\n {:f, 16}\n\n iex> policy = Axon.MixedPrecision.create_policy(compute: {:bf, 16})\n iex> value = Nx.tensor([1.0, 2.0, 3.0])\n iex> value = Axon.MixedPrecision.cast(policy, value, :compute)\n iex> Nx.type(value)\n {:bf, 16}\n\n iex> policy = Axon.MixedPrecision.create_policy(output: {:bf, 16})\n iex> value = Nx.tensor([1.0, 2.0, 3.0])\n iex> value = Axon.MixedPrecision.cast(policy, value, :output)\n iex> Nx.type(value)\n {:bf, 16}\n\nNote that integers are never promoted to floats:\n\n iex> policy = Axon.MixedPrecision.create_policy(output: {:f, 16})\n iex> value = Nx.tensor([1, 2, 3], type: :s64)\n iex> value = Axon.MixedPrecision.cast(policy, value, :params)\n iex> Nx.type(value)\n {:s, 64}","ref":"Axon.MixedPrecision.html#cast/3-examples","title":"Examples - Axon.MixedPrecision.cast/3","type":"function"},{"doc":"Creates a mixed precision policy with the given options.","ref":"Axon.MixedPrecision.html#create_policy/1","title":"Axon.MixedPrecision.create_policy/1","type":"function"},{"doc":"* `params` - parameter precision policy. Defaults to `{:f, 32}`\n * `compute` - compute precision policy. Defaults to `{:f, 32}`\n * `output` - output precision policy. Defaults to `{:f, 32}`","ref":"Axon.MixedPrecision.html#create_policy/1-options","title":"Options - Axon.MixedPrecision.create_policy/1","type":"function"},{"doc":"iex> Axon.MixedPrecision.create_policy(params: {:f, 16}, output: {:f, 16})\n #Axon.MixedPrecision.Policy \n\n iex> Axon.MixedPrecision.create_policy(compute: {:bf, 16})\n #Axon.MixedPrecision.Policy","ref":"Axon.MixedPrecision.html#create_policy/1-examples","title":"Examples - Axon.MixedPrecision.create_policy/1","type":"function"},{"doc":"Represents a missing value of an optional node.\n\nSee `Axon.input/2` and `Axon.optional/2` for more details.","ref":"Axon.None.html","title":"Axon.None","type":"module"},{"doc":"Container for returning stateful outputs from Axon layers.\n\nSome layers, such as `Axon.batch_norm/2`, keep a running internal\nstate which is updated continuously at train time and used statically\nat inference time. In order for the Axon compiler to differentiate\nordinary layer outputs from internal state, you must mark output\nas stateful.\n\nStateful Outputs consist of two fields:\n\n :output - Actual layer output to be forwarded to next layer\n :state - Internal layer state to be tracked and updated\n\n`:output` is simply forwarded to the next layer. `:state` is aggregated\nwith other stateful outputs, and then is treated specially by internal\nAxon training functions such that update state parameters reflect returned\nvalues from stateful outputs.\n\n`:state` must be a map with keys that map directly to layer internal\nstate names. For example, `Axon.Layers.batch_norm` returns StatefulOutput\nwith `:state` keys of `\"mean\"` and `\"var\"`.","ref":"Axon.StatefulOutput.html","title":"Axon.StatefulOutput","type":"module"},{"doc":"Module for rendering various visual representations of Axon models.","ref":"Axon.Display.html","title":"Axon.Display","type":"module"},{"doc":"Traces execution of the given Axon model with the given\ninputs, rendering the execution flow as a mermaid flowchart.\n\nYou must include [kino](https://hex.pm/packages/kino) as\na dependency in your project to make use of this function.","ref":"Axon.Display.html#as_graph/3","title":"Axon.Display.as_graph/3","type":"function"},{"doc":"* `:direction` - defines the direction of the graph visual. The\n value can either be `:top_down` or `:left_right`. Defaults to `:top_down`.","ref":"Axon.Display.html#as_graph/3-options","title":"Options - Axon.Display.as_graph/3","type":"function"},{"doc":"Given an Axon model:\n\n model = Axon.input(\"input\") |> Axon.dense(32)\n\nYou can define input templates for each input:\n\n input = Nx.template({1, 16}, :f32)\n\nAnd then display the execution flow of the model:\n\n Axon.Display.as_graph(model, input, direction: :top_down)","ref":"Axon.Display.html#as_graph/3-examples","title":"Examples - Axon.Display.as_graph/3","type":"function"},{"doc":"Traces execution of the given Axon model with the given\ninputs, rendering the execution flow as a table.\n\nYou must include [table_rex](https://hex.pm/packages/table_rex) as\na dependency in your project to make use of this function.","ref":"Axon.Display.html#as_table/2","title":"Axon.Display.as_table/2","type":"function"},{"doc":"Given an Axon model:\n\n model = Axon.input(\"input\") |> Axon.dense(32)\n\nYou can define input templates for each input:\n\n input = Nx.template({1, 16}, :f32)\n\nAnd then display the execution flow of the model:\n\n Axon.Display.as_table(model, input)","ref":"Axon.Display.html#as_table/2-examples","title":"Examples - Axon.Display.as_table/2","type":"function"},{"doc":"Activation functions.\n\nActivation functions are element-wise, (typically) non-linear\nfunctions called on the output of another layer, such as\na dense layer:\n\n x\n |> dense(weight, bias)\n |> relu()\n\nActivation functions output the \"activation\" or how active\na given layer's neurons are in learning a representation\nof the data-generating distribution.\n\nSome activations are commonly used as output activations. For\nexample `softmax` is often used as the output in multiclass\nclassification problems because it returns a categorical\nprobability distribution:\n\n iex> Axon.Activations.softmax(Nx.tensor([[1, 2, 3]], type: {:f, 32}))\n #Nx.Tensor \n\nOther activations such as `tanh` or `sigmoid` are used because\nthey have desirable properties, such as keeping the output\ntensor constrained within a certain range.\n\nGenerally, the choice of activation function is arbitrary;\nalthough some activations work better than others in certain\nproblem domains. For example ReLU (rectified linear unit)\nactivation is a widely-accepted default. You can see\na list of activation functions and implementations\n[here](https://paperswithcode.com/methods/category/activation-functions).\n\nAll of the functions in this module are implemented as\nnumerical functions and can be JIT or AOT compiled with\nany supported `Nx` compiler.","ref":"Axon.Activations.html","title":"Axon.Activations","type":"module"},{"doc":"Continuously-differentiable exponential linear unit activation.\n\n$$f(x_i) = \\max(0, x_i) + \\min(0, \\alpha * e^{\\frac{x_i}{\\alpha}} - 1)$$","ref":"Axon.Activations.html#celu/2","title":"Axon.Activations.celu/2","type":"function"},{"doc":"* `alpha` - $\\alpha$ in CELU formulation. Must be non-zero.\n Defaults to `1.0`","ref":"Axon.Activations.html#celu/2-options","title":"Options - Axon.Activations.celu/2","type":"function"},{"doc":"iex> Axon.Activations.celu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))\n #Nx.Tensor \n\n iex> Axon.Activations.celu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))\n #Nx.Tensor \n\n#","ref":"Axon.Activations.html#celu/2-examples","title":"Examples - Axon.Activations.celu/2","type":"function"},{"doc":"iex> Axon.Activations.celu(Nx.tensor([0.0, 1.0, 2.0], type: {:f, 32}), alpha: 0.0)\n ** (ArgumentError) :alpha must be non-zero in CELU activation","ref":"Axon.Activations.html#celu/2-error-cases","title":"Error cases - Axon.Activations.celu/2","type":"function"},{"doc":"* [Continuously Differentiable Exponential Linear Units](https://arxiv.org/pdf/1704.07483.pdf)","ref":"Axon.Activations.html#celu/2-references","title":"References - Axon.Activations.celu/2","type":"function"},{"doc":"Exponential linear unit activation.\n\nEquivalent to `celu` for $\\alpha = 1$\n\n$$f(x_i) = \\begin{cases}x_i & x _i > 0 \\newline \\alpha * (e^{x_i} - 1) & x_i \\leq 0 \\\\ \\end{cases}$$","ref":"Axon.Activations.html#elu/2","title":"Axon.Activations.elu/2","type":"function"},{"doc":"* `alpha` - $\\alpha$ in ELU formulation. Defaults to `1.0`","ref":"Axon.Activations.html#elu/2-options","title":"Options - Axon.Activations.elu/2","type":"function"},{"doc":"iex> Axon.Activations.elu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))\n #Nx.Tensor \n\n iex> Axon.Activations.elu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))\n #Nx.Tensor","ref":"Axon.Activations.html#elu/2-examples","title":"Examples - Axon.Activations.elu/2","type":"function"},{"doc":"* [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)","ref":"Axon.Activations.html#elu/2-references","title":"References - Axon.Activations.elu/2","type":"function"},{"doc":"Exponential activation.\n\n$$f(x_i) = e^{x_i}$$","ref":"Axon.Activations.html#exp/1","title":"Axon.Activations.exp/1","type":"function"},{"doc":"iex> Axon.Activations.exp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.exp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#exp/1-examples","title":"Examples - Axon.Activations.exp/1","type":"function"},{"doc":"Gaussian error linear unit activation.\n\n$$f(x_i) = \\frac{x_i}{2}(1 + {erf}(\\frac{x_i}{\\sqrt{2}}))$$","ref":"Axon.Activations.html#gelu/1","title":"Axon.Activations.gelu/1","type":"function"},{"doc":"iex> Axon.Activations.gelu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.gelu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#gelu/1-examples","title":"Examples - Axon.Activations.gelu/1","type":"function"},{"doc":"* [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)","ref":"Axon.Activations.html#gelu/1-references","title":"References - Axon.Activations.gelu/1","type":"function"},{"doc":"Hard sigmoid activation.","ref":"Axon.Activations.html#hard_sigmoid/2","title":"Axon.Activations.hard_sigmoid/2","type":"function"},{"doc":"iex> Axon.Activations.hard_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.hard_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#hard_sigmoid/2-examples","title":"Examples - Axon.Activations.hard_sigmoid/2","type":"function"},{"doc":"Hard sigmoid weighted linear unit activation.\n\n$$f(x_i) = \\begin{cases} 0 & x_i \\leq -3 \\newline\nx & x_i \\geq 3 \\newline\n\\frac{x_i^2}{6} + \\frac{x_i}{2} & otherwise \\end{cases}$$","ref":"Axon.Activations.html#hard_silu/2","title":"Axon.Activations.hard_silu/2","type":"function"},{"doc":"iex> Axon.Activations.hard_silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.hard_silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#hard_silu/2-examples","title":"Examples - Axon.Activations.hard_silu/2","type":"function"},{"doc":"Hard hyperbolic tangent activation.\n\n$$f(x_i) = \\begin{cases} 1 & x > 1 \\newline -1 & x < -1 \\newline x & otherwise \\end{cases}$$","ref":"Axon.Activations.html#hard_tanh/1","title":"Axon.Activations.hard_tanh/1","type":"function"},{"doc":"iex> Axon.Activations.hard_tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.hard_tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#hard_tanh/1-examples","title":"Examples - Axon.Activations.hard_tanh/1","type":"function"},{"doc":"Leaky rectified linear unit activation.\n\n$$f(x_i) = \\begin{cases} x & x \\geq 0 \\newline \\alpha * x & otherwise \\end{cases}$$","ref":"Axon.Activations.html#leaky_relu/2","title":"Axon.Activations.leaky_relu/2","type":"function"},{"doc":"* `:alpha` - $\\alpha$ in Leaky ReLU formulation. Defaults to `1.0e-2`","ref":"Axon.Activations.html#leaky_relu/2-options","title":"Options - Axon.Activations.leaky_relu/2","type":"function"},{"doc":"iex> Axon.Activations.leaky_relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]), alpha: 0.5)\n #Nx.Tensor \n\n iex> Axon.Activations.leaky_relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], names: [:batch, :data]), alpha: 0.5)\n #Nx.Tensor","ref":"Axon.Activations.html#leaky_relu/2-examples","title":"Examples - Axon.Activations.leaky_relu/2","type":"function"},{"doc":"Linear activation.\n\n$$f(x_i) = x_i$$","ref":"Axon.Activations.html#linear/1","title":"Axon.Activations.linear/1","type":"function"},{"doc":"iex> Axon.Activations.linear(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.linear(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#linear/1-examples","title":"Examples - Axon.Activations.linear/1","type":"function"},{"doc":"Log-sigmoid activation.\n\n$$f(x_i) = \\log(sigmoid(x))$$","ref":"Axon.Activations.html#log_sigmoid/1","title":"Axon.Activations.log_sigmoid/1","type":"function"},{"doc":"iex> Axon.Activations.log_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.log_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#log_sigmoid/1-examples","title":"Examples - Axon.Activations.log_sigmoid/1","type":"function"},{"doc":"Log-softmax activation.\n\n$$f(x_i) = -log( um{e^x_i})$$","ref":"Axon.Activations.html#log_softmax/2","title":"Axon.Activations.log_softmax/2","type":"function"},{"doc":"iex> Axon.Activations.log_softmax(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.log_softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#log_softmax/2-examples","title":"Examples - Axon.Activations.log_softmax/2","type":"function"},{"doc":"Logsumexp activation.\n\n$$\\log(sum e^x_i)$$","ref":"Axon.Activations.html#log_sumexp/2","title":"Axon.Activations.log_sumexp/2","type":"function"},{"doc":"iex> Axon.Activations.log_sumexp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.log_sumexp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#log_sumexp/2-examples","title":"Examples - Axon.Activations.log_sumexp/2","type":"function"},{"doc":"Mish activation.\n\n$$f(x_i) = x_i* \\tanh(\\log(1 + e^x_i))$$","ref":"Axon.Activations.html#mish/1","title":"Axon.Activations.mish/1","type":"function"},{"doc":"iex> Axon.Activations.mish(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.mish(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#mish/1-examples","title":"Examples - Axon.Activations.mish/1","type":"function"},{"doc":"Rectified linear unit 6 activation.\n\n$$f(x_i) = \\min_i(\\max_i(x, 0), 6)$$","ref":"Axon.Activations.html#relu6/1","title":"Axon.Activations.relu6/1","type":"function"},{"doc":"iex> Axon.Activations.relu6(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))\n #Nx.Tensor \n\n iex> Axon.Activations.relu6(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#relu6/1-examples","title":"Examples - Axon.Activations.relu6/1","type":"function"},{"doc":"* [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861v1)","ref":"Axon.Activations.html#relu6/1-references","title":"References - Axon.Activations.relu6/1","type":"function"},{"doc":"Rectified linear unit activation.\n\n$$f(x_i) = \\max_i(x, 0)$$","ref":"Axon.Activations.html#relu/1","title":"Axon.Activations.relu/1","type":"function"},{"doc":"iex> Axon.Activations.relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#relu/1-examples","title":"Examples - Axon.Activations.relu/1","type":"function"},{"doc":"Scaled exponential linear unit activation.\n\n$$f(x_i) = \\begin{cases} \\lambda x & x \\geq 0 \\newline\n\\lambda \\alpha(e^{x} - 1) & x < 0 \\end{cases}$$\n\n$$\\alpha \\approx 1.6733$$\n$$\\lambda \\approx 1.0507$$","ref":"Axon.Activations.html#selu/2","title":"Axon.Activations.selu/2","type":"function"},{"doc":"iex> Axon.Activations.selu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.selu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#selu/2-examples","title":"Examples - Axon.Activations.selu/2","type":"function"},{"doc":"* [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515v5)","ref":"Axon.Activations.html#selu/2-references","title":"References - Axon.Activations.selu/2","type":"function"},{"doc":"Sigmoid activation.\n\n$$f(x_i) = \\frac{1}{1 + e^{-x_i}}$$\n\n**Implementation Note: Sigmoid logits are cached as metadata\nin the expression and can be used in calculations later on.\nFor example, they are used in cross-entropy calculations for\nbetter stability.**","ref":"Axon.Activations.html#sigmoid/1","title":"Axon.Activations.sigmoid/1","type":"function"},{"doc":"iex> Axon.Activations.sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#sigmoid/1-examples","title":"Examples - Axon.Activations.sigmoid/1","type":"function"},{"doc":"Sigmoid weighted linear unit activation.\n\n$$f(x_i) = x * sigmoid(x)$$","ref":"Axon.Activations.html#silu/1","title":"Axon.Activations.silu/1","type":"function"},{"doc":"iex> Axon.Activations.silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#silu/1-examples","title":"Examples - Axon.Activations.silu/1","type":"function"},{"doc":"* [Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning](https://arxiv.org/abs/1702.03118v3)","ref":"Axon.Activations.html#silu/1-references","title":"References - Axon.Activations.silu/1","type":"function"},{"doc":"Softmax activation along an axis.\n\n$$\\frac{e^{x_i}}{\\sum_i e^{x_i}}$$\n\n**Implementation Note: Softmax logits are cached as metadata\nin the expression and can be used in calculations later on.\nFor example, they are used in cross-entropy calculations for\nbetter stability.**","ref":"Axon.Activations.html#softmax/2","title":"Axon.Activations.softmax/2","type":"function"},{"doc":"* `:axis` - softmax axis along which to calculate distribution.\n Defaults to 1.","ref":"Axon.Activations.html#softmax/2-options","title":"Options - Axon.Activations.softmax/2","type":"function"},{"doc":"iex> Axon.Activations.softmax(Nx.tensor([[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]], names: [:batch, :data]))\n #Nx.Tensor \n\n iex> Axon.Activations.softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#softmax/2-examples","title":"Examples - Axon.Activations.softmax/2","type":"function"},{"doc":"Softplus activation.\n\n$$\\log(1 + e^x_i)$$","ref":"Axon.Activations.html#softplus/1","title":"Axon.Activations.softplus/1","type":"function"},{"doc":"iex> Axon.Activations.softplus(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.softplus(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#softplus/1-examples","title":"Examples - Axon.Activations.softplus/1","type":"function"},{"doc":"Softsign activation.\n\n$$f(x_i) = \\frac{x_i}{|x_i| + 1}$$","ref":"Axon.Activations.html#softsign/1","title":"Axon.Activations.softsign/1","type":"function"},{"doc":"iex> Axon.Activations.softsign(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.softsign(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#softsign/1-examples","title":"Examples - Axon.Activations.softsign/1","type":"function"},{"doc":"Hyperbolic tangent activation.\n\n$$f(x_i) = \\tanh(x_i)$$","ref":"Axon.Activations.html#tanh/1","title":"Axon.Activations.tanh/1","type":"function"},{"doc":"iex> Axon.Activations.tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#tanh/1-examples","title":"Examples - Axon.Activations.tanh/1","type":"function"},{"doc":"Functional implementations of common neural network layer\noperations.\n\nLayers are the building blocks of neural networks. These\nfunctional implementations can be used to express higher-level\nconstructs using fundamental building blocks. Neural network\nlayers are stateful with respect to their parameters.\nThese implementations do not assume the responsibility of\nmanaging state - instead opting to delegate this responsibility\nto the caller.\n\nBasic neural networks can be seen as a composition of functions:\n\n input\n |> dense(w1, b1)\n |> relu()\n |> dense(w2, b2)\n |> softmax()\n\nThese kinds of models are often referred to as deep feedforward networks\nor multilayer perceptrons (MLPs) because information flows forward\nthrough the network with no feedback connections. Mathematically,\na feedforward network can be represented as:\n\n $$f(x) = f^{(3)}(f^{(2)}(f^{(1)}(x)))$$\n\nYou can see a similar pattern emerge if we condense the call stack\nin the previous example:\n\n softmax(dense(relu(dense(input, w1, b1)), w2, b2))\n\nThe chain structure shown here is the most common structure used\nin neural networks. You can consider each function $f^{(n)}$ as a\n*layer* in the neural network - for example $f^{(2)} is the 2nd\nlayer in the network. The number of function calls in the\nstructure is the *depth* of the network. This is where the term\n*deep learning* comes from.\n\nNeural networks are often written as the mapping:\n\n $$y = f(x; \\theta)$$\n\nWhere $x$ is the input to the neural network and $\\theta$ are the\nset of learned parameters. In Elixir, you would write this:\n\n y = model(input, params)\n\nFrom the previous example, `params` would represent the collection:\n\n {w1, b1, w2, b2}\n\nwhere `w1` and `w2` are layer *kernels*, and `b1` and `b2` are layer\n*biases*.","ref":"Axon.Layers.html","title":"Axon.Layers","type":"module"},{"doc":"Functional implementation of general dimensional adaptive average\npooling.\n\nAdaptive pooling allows you to specify the desired output size\nof the transformed input. This will automatically adapt the\nwindow size and strides to obtain the desired output size. It\nwill then perform average pooling using the calculated window\nsize and strides.\n\nAdaptive pooling can be useful when working on multiple inputs with\ndifferent spatial input shapes. You can guarantee the output of\nan adaptive pooling operation is always the same size regardless\nof input shape.","ref":"Axon.Layers.html#adaptive_avg_pool/2","title":"Axon.Layers.adaptive_avg_pool/2","type":"function"},{"doc":"* `:output_size` - spatial output size. Must be a tuple with\n size equal to the spatial dimensions in the input tensor.\n Required.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#adaptive_avg_pool/2-options","title":"Options - Axon.Layers.adaptive_avg_pool/2","type":"function"},{"doc":"Functional implementation of general dimensional adaptive power\naverage pooling.\n\nComputes:\n\n $$f(X) = qrt[p]{ um_{x in X} x^{p}}$$\n\nAdaptive pooling allows you to specify the desired output size\nof the transformed input. This will automatically adapt the\nwindow size and strides to obtain the desired output size. It\nwill then perform max pooling using the calculated window\nsize and strides.\n\nAdaptive pooling can be useful when working on multiple inputs with\ndifferent spatial input shapes. You can guarantee the output of\nan adaptive pooling operation is always the same size regardless\nof input shape.","ref":"Axon.Layers.html#adaptive_lp_pool/2","title":"Axon.Layers.adaptive_lp_pool/2","type":"function"},{"doc":"* `:norm` - $p$ from above equation. Defaults to 2.\n\n * `:output_size` - spatial output size. Must be a tuple with\n size equal to the spatial dimensions in the input tensor.\n Required.","ref":"Axon.Layers.html#adaptive_lp_pool/2-options","title":"Options - Axon.Layers.adaptive_lp_pool/2","type":"function"},{"doc":"Functional implementation of general dimensional adaptive max\npooling.\n\nAdaptive pooling allows you to specify the desired output size\nof the transformed input. This will automatically adapt the\nwindow size and strides to obtain the desired output size. It\nwill then perform max pooling using the calculated window\nsize and strides.\n\nAdaptive pooling can be useful when working on multiple inputs with\ndifferent spatial input shapes. You can guarantee the output of\nan adaptive pooling operation is always the same size regardless\nof input shape.","ref":"Axon.Layers.html#adaptive_max_pool/2","title":"Axon.Layers.adaptive_max_pool/2","type":"function"},{"doc":"* `:output_size` - spatial output size. Must be a tuple with\n size equal to the spatial dimensions in the input tensor.\n Required.","ref":"Axon.Layers.html#adaptive_max_pool/2-options","title":"Options - Axon.Layers.adaptive_max_pool/2","type":"function"},{"doc":"Functional implementation of an alpha dropout layer.\n\nAlpha dropout is a type of dropout that forces the input\nto have zero mean and unit standard deviation. Randomly\nmasks some elements and scales to enforce self-normalization.","ref":"Axon.Layers.html#alpha_dropout/3","title":"Axon.Layers.alpha_dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#alpha_dropout/3-options","title":"Options - Axon.Layers.alpha_dropout/3","type":"function"},{"doc":"* [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)","ref":"Axon.Layers.html#alpha_dropout/3-references","title":"References - Axon.Layers.alpha_dropout/3","type":"function"},{"doc":"A general dimensional functional average pooling layer.\n\nPooling is applied to the spatial dimension of the input tensor.\nAverage pooling returns the average of all elements in valid\nwindows in the input tensor. It is often used after convolutional\nlayers to downsample the input even further.","ref":"Axon.Layers.html#avg_pool/2","title":"Axon.Layers.avg_pool/2","type":"function"},{"doc":"* `kernel_size` - window size. Rank must match spatial dimension\n of the input tensor. Required.\n\n * `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:window_dilations` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Can be scalar or list who's length matches the number of\n spatial dimensions in the input tensor. Defaults to `1` or no\n dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#avg_pool/2-options","title":"Options - Axon.Layers.avg_pool/2","type":"function"},{"doc":"Functional implementation of batch normalization.\n\nNormalizes the input by calculating mean and variance of the\ninput tensor along every dimension but the given `:channel_index`,\nand then scaling according to:\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. If `training?` is\ntrue, this method will compute a new mean and variance, and return\nthe updated `ra_mean` and `ra_var`. Otherwise, it will just compute\nbatch norm from the given ra_mean and ra_var.","ref":"Axon.Layers.html#batch_norm/6","title":"Axon.Layers.batch_norm/6","type":"function"},{"doc":"* `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes for mean and variance calculation.\n\n * `:momentum` - momentum to use for EMA update.\n\n * `:mode` - if `:train`, uses training mode batch norm. Defaults to `:inference`.","ref":"Axon.Layers.html#batch_norm/6-options","title":"Options - Axon.Layers.batch_norm/6","type":"function"},{"doc":"* [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)","ref":"Axon.Layers.html#batch_norm/6-references","title":"References - Axon.Layers.batch_norm/6","type":"function"},{"doc":"Functional implementation of a bilinear layer.\n\nBilinear transformation of the input such that:\n\n$$y = x_1^{T}Ax_2 + b$$","ref":"Axon.Layers.html#bilinear/5","title":"Axon.Layers.bilinear/5","type":"function"},{"doc":"* `input1` - `{batch_size, ..., input1_features}`\n * `input2` - `{batch_size, ..., input2_features}`\n * `kernel` - `{out_features, input1_features, input2_features}`","ref":"Axon.Layers.html#bilinear/5-parameter-shapes","title":"Parameter Shapes - Axon.Layers.bilinear/5","type":"function"},{"doc":"`{batch_size, ..., output_features}`","ref":"Axon.Layers.html#bilinear/5-output-shape","title":"Output Shape - Axon.Layers.bilinear/5","type":"function"},{"doc":"iex> inp1 = Nx.iota({3, 2}, type: {:f, 32})\n iex> inp2 = Nx.iota({3, 4}, type: {:f, 32})\n iex> kernel = Nx.iota({1, 2, 4}, type: {:f, 32})\n iex> bias = Nx.tensor(1.0)\n iex> Axon.Layers.bilinear(inp1, inp2, kernel, bias)\n #Nx.Tensor","ref":"Axon.Layers.html#bilinear/5-examples","title":"Examples - Axon.Layers.bilinear/5","type":"function"},{"doc":"Functional implementation of a 2-dimensional blur pooling layer.\n\nBlur pooling applies a spatial low-pass filter to the input. It is\noften applied before pooling and convolutional layers as a way to\nincrease model accuracy without much additional computation cost.\n\nThe blur pooling implementation follows from [MosaicML](https://github.com/mosaicml/composer/blob/dev/composer/algorithms/blurpool/blurpool_layers.py).","ref":"Axon.Layers.html#blur_pool/2","title":"Axon.Layers.blur_pool/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#celu/2","title":"Axon.Layers.celu/2","type":"function"},{"doc":"Functional implementation of a general dimensional convolutional\nlayer.\n\nConvolutional layers can be described as applying a convolution\nover an input signal composed of several input planes. Intuitively,\nthe input kernel slides `output_channels` number of filters over\nthe input tensor to extract features from the input tensor.\n\nConvolutional layers are most commonly used in computer vision,\nbut can also be useful when working with sequences and other input signals.","ref":"Axon.Layers.html#conv/4","title":"Axon.Layers.conv/4","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`\n * `kernel` - `{output_channels, input_channels, kernel_spatial0, ..., kernel_spatialN}`\n * `bias` - `{}` or `{output_channels}`","ref":"Axon.Layers.html#conv/4-parameter-shapes","title":"Parameter Shapes - Axon.Layers.conv/4","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#conv/4-options","title":"Options - Axon.Layers.conv/4","type":"function"},{"doc":"#","ref":"Axon.Layers.html#conv/4-examples","title":"Examples - Axon.Layers.conv/4","type":"function"},{"doc":"iex> input = Nx.tensor([[[0.1294, -0.6638, 1.0251]], [[ 0.9182, 1.1512, -1.6149]]], type: {:f, 32})\n iex> kernel = Nx.tensor([[[-1.5475, 1.2425]], [[0.1871, 0.5458]], [[-0.4488, 0.8879]]], type: {:f, 32})\n iex> bias = Nx.tensor([0.7791, 0.1676, 1.5971], type: {:f, 32})\n iex> Axon.Layers.conv(input, kernel, bias, channels: :first)\n #Nx.Tensor \n\n#","ref":"Axon.Layers.html#conv/4-one-dimensional-convolution","title":"One-dimensional convolution - Axon.Layers.conv/4","type":"function"},{"doc":"iex> input = Nx.tensor([[[[-1.0476, -0.5041], [-0.9336, 1.5907]]]], type: {:f, 32})\n iex> kernel = Nx.tensor([\n ...> [[[0.7514, 0.7356], [1.3909, 0.6800]]],\n ...> [[[-0.3450, 0.4551], [-0.6275, -0.9875]]],\n ...> [[[1.8587, 0.4722], [0.6058, -1.0301]]]\n ...> ], type: {:f, 32})\n iex> bias = Nx.tensor([1.9564, 0.2822, -0.5385], type: {:f, 32})\n iex> Axon.Layers.conv(input, kernel, bias, channels: :first)\n #Nx.Tensor \n\n#","ref":"Axon.Layers.html#conv/4-two-dimensional-convolution","title":"Two-dimensional convolution - Axon.Layers.conv/4","type":"function"},{"doc":"iex> input = Nx.tensor([[[[[-0.6497], [1.0939]], [[-2.5465], [0.7801]]]]], type: {:f, 32})\n iex> kernel = Nx.tensor([\n ...> [[[[ 0.7390], [-0.0927]], [[-0.8675], [-0.9209]]]],\n ...> [[[[-0.6638], [0.4341]], [[0.6368], [1.1846]]]]\n ...> ], type: {:f, 32})\n iex> bias = Nx.tensor([-0.4101, 0.1776], type: {:f, 32})\n iex> Axon.Layers.conv(input, kernel, bias, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#conv/4-three-dimensional-convolution","title":"Three-dimensional convolution - Axon.Layers.conv/4","type":"function"},{"doc":"","ref":"Axon.Layers.html#conv_lstm/7","title":"Axon.Layers.conv_lstm/7","type":"function"},{"doc":"ConvLSTM Cell.\n\nWhen combined with `Axon.Layers.*_unroll`, implements a\nConvLSTM-based RNN. More memory efficient than traditional LSTM.","ref":"Axon.Layers.html#conv_lstm_cell/7","title":"Axon.Layers.conv_lstm_cell/7","type":"function"},{"doc":"* `:strides` - convolution strides. Defaults to `1`.\n\n * `:padding` - convolution padding. Defaults to `:same`.","ref":"Axon.Layers.html#conv_lstm_cell/7-options","title":"Options - Axon.Layers.conv_lstm_cell/7","type":"function"},{"doc":"* [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](https://arxiv.org/abs/1506.04214)","ref":"Axon.Layers.html#conv_lstm_cell/7-references","title":"References - Axon.Layers.conv_lstm_cell/7","type":"function"},{"doc":"Functional implementation of a general dimensional transposed\nconvolutional layer.\n\n*Note: This layer is currently implemented as a fractionally strided\nconvolution by padding the input tensor. Please open an issue if you'd\nlike this behavior changed.*\n\nTransposed convolutions are sometimes (incorrectly) referred to as\ndeconvolutions because it \"reverses\" the spatial dimensions\nof a normal convolution. Transposed convolutions are a form of upsampling -\nthey produce larger spatial dimensions than the input tensor. They\ncan be thought of as a convolution in reverse - and are sometimes\nimplemented as the backward pass of a normal convolution.","ref":"Axon.Layers.html#conv_transpose/4","title":"Axon.Layers.conv_transpose/4","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#conv_transpose/4-options","title":"Options - Axon.Layers.conv_transpose/4","type":"function"},{"doc":"iex> input = Nx.iota({1, 3, 3}, type: {:f, 32})\n iex> kernel = Nx.iota({6, 3, 2}, type: {:f, 32})\n iex> bias = Nx.tensor(1.0, type: {:f, 32})\n iex> Axon.Layers.conv_transpose(input, kernel, bias, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#conv_transpose/4-examples","title":"Examples - Axon.Layers.conv_transpose/4","type":"function"},{"doc":"* [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)\n * [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)","ref":"Axon.Layers.html#conv_transpose/4-references","title":"References - Axon.Layers.conv_transpose/4","type":"function"},{"doc":"Functional implementation of a dense layer.\n\nLinear transformation of the input such that:\n\n$$y = xW^T + b$$\n\nA dense layer or fully connected layer transforms\nthe input using the given kernel matrix and bias\nto compute:\n\n Nx.dot(input, kernel) + bias\n\nTypically, both `kernel` and `bias` are learnable\nparameters trained using gradient-based optimization.","ref":"Axon.Layers.html#dense/4","title":"Axon.Layers.dense/4","type":"function"},{"doc":"* `input` - `{batch_size, * input_features}`\n * `kernel` - `{input_features, output_features}`\n * `bias` - `{}` or `{output_features}`","ref":"Axon.Layers.html#dense/4-parameter-shapes","title":"Parameter Shapes - Axon.Layers.dense/4","type":"function"},{"doc":"`{batch_size, *, output_features}`","ref":"Axon.Layers.html#dense/4-output-shape","title":"Output Shape - Axon.Layers.dense/4","type":"function"},{"doc":"iex> input = Nx.tensor([[1.0, 0.5, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0]], type: {:f, 32})\n iex> kernel = Nx.tensor([[0.2], [0.3], [0.5], [0.8]], type: {:f, 32})\n iex> bias = Nx.tensor([1.0], type: {:f, 32})\n iex> Axon.Layers.dense(input, kernel, bias)\n #Nx.Tensor","ref":"Axon.Layers.html#dense/4-examples","title":"Examples - Axon.Layers.dense/4","type":"function"},{"doc":"Functional implementation of a general dimensional depthwise\nconvolution.\n\nDepthwise convolutions apply a single convolutional filter to\neach input channel. This is done by setting `feature_group_size`\nequal to the number of input channels. This will split the\n`output_channels` into `input_channels` number of groups and\nconvolve the grouped kernel channels over the corresponding input\nchannel.","ref":"Axon.Layers.html#depthwise_conv/4","title":"Axon.Layers.depthwise_conv/4","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`\n * `kernel` - `{output_channels, 1, kernel_spatial0, ..., kernel_spatialN}`\n * `bias` - `{output_channels}` or `{}`\n\n `output_channels` must be a multiple of the input channels.","ref":"Axon.Layers.html#depthwise_conv/4-parameter-shapes","title":"Parameter Shapes - Axon.Layers.depthwise_conv/4","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#depthwise_conv/4-options","title":"Options - Axon.Layers.depthwise_conv/4","type":"function"},{"doc":"Functional implementation of a dropout layer.\n\nApplies a mask to some elements of the input tensor with probability\n`rate` and scales the input tensor by a factor of $\\frac{1}{1 - rate}$.\n\nDropout is a form of regularization that helps prevent overfitting\nby preventing models from becoming too reliant on certain connections.\nDropout can somewhat be thought of as learning an ensemble of models\nwith random connections masked.","ref":"Axon.Layers.html#dropout/3","title":"Axon.Layers.dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#dropout/3-options","title":"Options - Axon.Layers.dropout/3","type":"function"},{"doc":"* [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](https://jmlr.org/papers/v15/srivastava14a.html)","ref":"Axon.Layers.html#dropout/3-references","title":"References - Axon.Layers.dropout/3","type":"function"},{"doc":"Dynamically unrolls an RNN.\n\nUnrolls implement a `scan` operation which applies a\ntransformation on the leading axis of `input_sequence` carrying\nsome state. In this instance `cell_fn` is an RNN cell function\nsuch as `lstm_cell` or `gru_cell`.\n\nThis function will make use of an `defn` while-loop such and thus\nmay be more efficient for long sequences.","ref":"Axon.Layers.html#dynamic_unroll/7","title":"Axon.Layers.dynamic_unroll/7","type":"function"},{"doc":"","ref":"Axon.Layers.html#elu/2","title":"Axon.Layers.elu/2","type":"function"},{"doc":"Computes embedding by treating kernel matrix as a lookup table\nfor discrete tokens.\n\n`input` is a vector of discrete values, typically representing tokens\n(e.g. words, characters, etc.) from a vocabulary. `kernel` is a kernel\nmatrix of shape `{vocab_size, embedding_size}` from which the dense\nembeddings will be drawn.","ref":"Axon.Layers.html#embedding/3","title":"Axon.Layers.embedding/3","type":"function"},{"doc":"* `input` - `{batch_size, ..., seq_len}`\n * `kernel` - `{vocab_size, embedding_size}`","ref":"Axon.Layers.html#embedding/3-parameter-shapes","title":"Parameter Shapes - Axon.Layers.embedding/3","type":"function"},{"doc":"iex> input = Nx.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])\n iex> kernels = Nx.tensor([\n ...> [0.46299999952316284, 0.5562999844551086, 0.18170000612735748],\n ...> [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],\n ...> [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],\n ...> [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],\n ...> [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],\n ...> [0.1898999959230423, 0.9311000108718872, 0.8356000185012817],\n ...> [0.6383000016212463, 0.8794000148773193, 0.5282999873161316],\n ...> [0.9523000121116638, 0.7597000002861023, 0.08250000327825546],\n ...> [0.6622999906539917, 0.02329999953508377, 0.8205999732017517],\n ...> [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]\n ...> ])\n iex> Axon.Layers.embedding(input, kernels)\n #Nx.Tensor","ref":"Axon.Layers.html#embedding/3-examples","title":"Examples - Axon.Layers.embedding/3","type":"function"},{"doc":"Functional implementation of a feature alpha dropout layer.\n\nFeature alpha dropout applies dropout in the same manner as\nspatial dropout; however, it also enforces self-normalization\nby masking inputs with the SELU activation function and scaling\nunmasked inputs.","ref":"Axon.Layers.html#feature_alpha_dropout/3","title":"Axon.Layers.feature_alpha_dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#feature_alpha_dropout/3-options","title":"Options - Axon.Layers.feature_alpha_dropout/3","type":"function"},{"doc":"Flattens input to shape of `{batch, units}` by folding outer\ndimensions.","ref":"Axon.Layers.html#flatten/2","title":"Axon.Layers.flatten/2","type":"function"},{"doc":"iex> Axon.Layers.flatten(Nx.iota({1, 2, 2}, type: {:f, 32}))\n #Nx.Tensor","ref":"Axon.Layers.html#flatten/2-examples","title":"Examples - Axon.Layers.flatten/2","type":"function"},{"doc":"Functional implementation of global average pooling which averages across\nthe spatial dimensions of the input such that the only remaining dimensions\nare the batch and feature dimensions.\n\nAssumes data is configured in a channels-first like format.","ref":"Axon.Layers.html#global_avg_pool/2","title":"Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"* `input` - {batch_size, features, s1, ..., sN}","ref":"Axon.Layers.html#global_avg_pool/2-parameter-shapes","title":"Parameter Shapes - Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"* `:keep_axes` - option to keep reduced axes with size 1 for each reduced\n dimensions. Defaults to `false`","ref":"Axon.Layers.html#global_avg_pool/2-options","title":"Options - Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"iex> Axon.Layers.global_avg_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)\n #Nx.Tensor \n\n iex> Axon.Layers.global_avg_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), channels: :first, keep_axes: true)\n #Nx.Tensor","ref":"Axon.Layers.html#global_avg_pool/2-examples","title":"Examples - Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"Functional implementation of global LP pooling which computes the following\nfunction across spatial dimensions of the input:\n\n $$f(X) = qrt[p]{ um_{x in X} x^{p}}$$\n\nWhere $p$ is given by the keyword argument `:norm`. As $p$ approaches\ninfinity, it becomes equivalent to max pooling.\n\nAssumes data is configured in a channels-first like format.","ref":"Axon.Layers.html#global_lp_pool/2","title":"Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"* `input` - {batch_size, s1, ..., sN, features}","ref":"Axon.Layers.html#global_lp_pool/2-parameter-shapes","title":"Parameter Shapes - Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"* `:keep_axes` - option to keep reduced axes with size 1 for each reduced\n dimensions. Defaults to `false`\n * `:norm` - $p$ in above function. Defaults to 2","ref":"Axon.Layers.html#global_lp_pool/2-options","title":"Options - Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"iex> Axon.Layers.global_lp_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), norm: 1, channels: :first)\n #Nx.Tensor \n\n iex> Axon.Layers.global_lp_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 16}), keep_axes: true, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#global_lp_pool/2-examples","title":"Examples - Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"Functional implementation of global max pooling which computes maximums across\nthe spatial dimensions of the input such that the only remaining dimensions are\nthe batch and feature dimensions.\n\nAssumes data is configured in a channels-first like format.","ref":"Axon.Layers.html#global_max_pool/2","title":"Axon.Layers.global_max_pool/2","type":"function"},{"doc":"* `input` - {batch_size, s1, ..., sN, features}","ref":"Axon.Layers.html#global_max_pool/2-parameter-shapes","title":"Parameter Shapes - Axon.Layers.global_max_pool/2","type":"function"},{"doc":"* `:keep_axes` - option to keep reduced axes with size 1 for each reduced\n dimensions. Defaults to `false`","ref":"Axon.Layers.html#global_max_pool/2-options","title":"Options - Axon.Layers.global_max_pool/2","type":"function"},{"doc":"iex> Axon.Layers.global_max_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)\n #Nx.Tensor \n\n iex> Axon.Layers.global_max_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), keep_axes: true, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#global_max_pool/2-examples","title":"Examples - Axon.Layers.global_max_pool/2","type":"function"},{"doc":"Functional implementation of group normalization.\n\nNormalizes the input by reshaping input into `:num_groups`\ngroups and then calculating the mean and variance along\nevery dimension but the input batch dimension.\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. This method does\nnot maintain an EMA of mean and variance.","ref":"Axon.Layers.html#group_norm/4","title":"Axon.Layers.group_norm/4","type":"function"},{"doc":"* `:num_groups` - Number of groups.\n\n * `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes and group shape for mean and variance calculation.","ref":"Axon.Layers.html#group_norm/4-options","title":"Options - Axon.Layers.group_norm/4","type":"function"},{"doc":"* [Group Normalization](https://arxiv.org/abs/1803.08494v3)","ref":"Axon.Layers.html#group_norm/4-references","title":"References - Axon.Layers.group_norm/4","type":"function"},{"doc":"","ref":"Axon.Layers.html#gru/7","title":"Axon.Layers.gru/7","type":"function"},{"doc":"GRU Cell.\n\nWhen combined with `Axon.Layers.*_unroll`, implements a\nGRU-based RNN. More memory efficient than traditional LSTM.","ref":"Axon.Layers.html#gru_cell/8","title":"Axon.Layers.gru_cell/8","type":"function"},{"doc":"* [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](https://arxiv.org/pdf/1412.3555v1.pdf)","ref":"Axon.Layers.html#gru_cell/8-references","title":"References - Axon.Layers.gru_cell/8","type":"function"},{"doc":"","ref":"Axon.Layers.html#hard_sigmoid/2","title":"Axon.Layers.hard_sigmoid/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#hard_silu/2","title":"Axon.Layers.hard_silu/2","type":"function"},{"doc":"Functional implementation of instance normalization.\n\nNormalizes the input by calculating mean and variance of the\ninput tensor along the spatial dimensions of the input.\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. If `training?` is\ntrue, this method will compute a new mean and variance, and return\nthe updated `ra_mean` and `ra_var`. Otherwise, it will just compute\nbatch norm from the given ra_mean and ra_var.","ref":"Axon.Layers.html#instance_norm/6","title":"Axon.Layers.instance_norm/6","type":"function"},{"doc":"* `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes for mean and variance calculation.\n\n * `:momentum` - momentum to use for EMA update.\n\n * `:training?` - if true, uses training mode batch norm. Defaults to false.","ref":"Axon.Layers.html#instance_norm/6-options","title":"Options - Axon.Layers.instance_norm/6","type":"function"},{"doc":"* [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022v3)","ref":"Axon.Layers.html#instance_norm/6-references","title":"References - Axon.Layers.instance_norm/6","type":"function"},{"doc":"Functional implementation of layer normalization.\n\nNormalizes the input by calculating mean and variance of the\ninput tensor along the given feature dimension `:channel_index`.\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. This method does\nnot maintain an EMA of mean and variance.","ref":"Axon.Layers.html#layer_norm/4","title":"Axon.Layers.layer_norm/4","type":"function"},{"doc":"* `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes for mean and variance calculation.","ref":"Axon.Layers.html#layer_norm/4-options","title":"Options - Axon.Layers.layer_norm/4","type":"function"},{"doc":"","ref":"Axon.Layers.html#leaky_relu/2","title":"Axon.Layers.leaky_relu/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#log_softmax/2","title":"Axon.Layers.log_softmax/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#log_sumexp/2","title":"Axon.Layers.log_sumexp/2","type":"function"},{"doc":"Functional implementation of a general dimensional power average\npooling layer.\n\nPooling is applied to the spatial dimension of the input tensor.\nPower average pooling computes the following function on each\nvalid window of the input tensor:\n\n$$f(X) = \\sqrt[p]{\\sum_{x \\in X} x^{p}}$$\n\nWhere $p$ is given by the keyword argument `:norm`. As $p$ approaches\ninfinity, it becomes equivalent to max pooling.","ref":"Axon.Layers.html#lp_pool/2","title":"Axon.Layers.lp_pool/2","type":"function"},{"doc":"* `:norm` - $p$ from above equation. Defaults to 2.\n\n * `:kernel_size` - window size. Rank must match spatial dimension\n of the input tensor. Required.\n\n * `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to size of kernel.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:window_dilations` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Can be scalar or list who's length matches the number of\n spatial dimensions in the input tensor. Defaults to `1` or no\n dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#lp_pool/2-options","title":"Options - Axon.Layers.lp_pool/2","type":"function"},{"doc":"iex> t = Nx.tensor([[[0.9450, 0.4684, 1.8146], [1.2663, 0.4354, -0.0781], [-0.4759, 0.3251, 0.8742]]], type: {:f, 32})\n iex> Axon.Layers.lp_pool(t, kernel_size: 2, norm: 2, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#lp_pool/2-examples","title":"Examples - Axon.Layers.lp_pool/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#lstm/7","title":"Axon.Layers.lstm/7","type":"function"},{"doc":"LSTM Cell.\n\nWhen combined with `Axon.Layers.*_unroll`, implements a\nLSTM-based RNN. More memory efficient than traditional LSTM.","ref":"Axon.Layers.html#lstm_cell/8","title":"Axon.Layers.lstm_cell/8","type":"function"},{"doc":"* [Long Short-Term Memory](http://www.bioinf.jku.at/publications/older/2604.pdf)","ref":"Axon.Layers.html#lstm_cell/8-references","title":"References - Axon.Layers.lstm_cell/8","type":"function"},{"doc":"Functional implementation of a general dimensional max pooling layer.\n\nPooling is applied to the spatial dimension of the input tensor.\nMax pooling returns the maximum element in each valid window of\nthe input tensor. It is often used after convolutional layers\nto downsample the input even further.","ref":"Axon.Layers.html#max_pool/2","title":"Axon.Layers.max_pool/2","type":"function"},{"doc":"* `kernel_size` - window size. Rank must match spatial dimension\n of the input tensor. Required.\n\n * `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to size of kernel.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:window_dilations` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Can be scalar or list who's length matches the number of\n spatial dimensions in the input tensor. Defaults to `1` or no\n dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#max_pool/2-options","title":"Options - Axon.Layers.max_pool/2","type":"function"},{"doc":"iex> t = Nx.tensor([[\n ...> [0.051500000059604645, -0.7042999863624573, -0.32899999618530273],\n ...> [-0.37130001187324524, 1.6191999912261963, -0.11829999834299088],\n ...> [0.7099999785423279, 0.7282999753952026, -0.18639999628067017]]], type: {:f, 32})\n iex> Axon.Layers.max_pool(t, kernel_size: 2, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#max_pool/2-examples","title":"Examples - Axon.Layers.max_pool/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#multiply/2","title":"Axon.Layers.multiply/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#padding_config_transform/2","title":"Axon.Layers.padding_config_transform/2","type":"function"},{"doc":"Resizes a batch of tensors to the given shape using one of a\nnumber of sampling methods.\n\nRequires input option `:size` which should be a tuple specifying\nthe resized spatial dimensions of the input tensor. Input tensor\nmust be at least rank 3, with fixed `batch` and `channel` dimensions.\nResizing will upsample or downsample using the given resize method.","ref":"Axon.Layers.html#resize/2","title":"Axon.Layers.resize/2","type":"function"},{"doc":"* `:size` - a tuple specifying the resized spatial dimensions.\n Required.\n\n * `:method` - the resizing method to use, either of `:nearest`,\n `:bilinear`, `:bicubic`, `:lanczos3`, `:lanczos5`. Defaults to\n `:nearest`.\n\n * `:antialias` - whether an anti-aliasing filter should be used\n when downsampling. This has no effect with upsampling. Defaults\n to `true`.\n\n * `:channels` - channels location, either `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#resize/2-options","title":"Options - Axon.Layers.resize/2","type":"function"},{"doc":"iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})\n iex> Axon.Layers.resize(img, size: {4, 4}, channels: :first)\n #Nx.Tensor \n\n#","ref":"Axon.Layers.html#resize/2-examples","title":"Examples - Axon.Layers.resize/2","type":"function"},{"doc":"iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})\n iex> Axon.Layers.resize(img, size: {4, 4}, method: :foo)\n ** (ArgumentError) expected :method to be either of :nearest, :bilinear, :bicubic, :lanczos3, :lanczos5, got: :foo","ref":"Axon.Layers.html#resize/2-error-cases","title":"Error cases - Axon.Layers.resize/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#selu/2","title":"Axon.Layers.selu/2","type":"function"},{"doc":"Functional implementation of a 2-dimensional separable depthwise\nconvolution.\n\nThe 2-d depthwise separable convolution performs 2 depthwise convolutions\neach over 1 spatial dimension of the input.","ref":"Axon.Layers.html#separable_conv2d/6","title":"Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`\n * `k1` - `{output_channels, 1, kernel_spatial0, 1}`\n * `b1` - `{output_channels}` or `{}`\n * `k2` - `{output_channels, 1, 1, kernel_spatial1}`\n * `b2` - `{output_channels}` or `{}`\n\n `output_channels` must be a multiple of the input channels.","ref":"Axon.Layers.html#separable_conv2d/6-parameter-shapes","title":"Parameter Shapes - Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#separable_conv2d/6-options","title":"Options - Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"* [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)","ref":"Axon.Layers.html#separable_conv2d/6-references","title":"References - Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"Functional implementation of a 3-dimensional separable depthwise\nconvolution.\n\nThe 3-d depthwise separable convolution performs 3 depthwise convolutions\neach over 1 spatial dimension of the input.","ref":"Axon.Layers.html#separable_conv3d/8","title":"Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, input_spatial1, input_spatial2}`\n * `k1` - `{output_channels, 1, kernel_spatial0, 1, 1}`\n * `b1` - `{output_channels}` or `{}`\n * `k2` - `{output_channels, 1, 1, kernel_spatial1, 1}`\n * `b2` - `{output_channels}` or `{}`\n * `k3` - `{output_channels, 1, 1, 1, 1, kernel_spatial2}`\n * `b3` - `{output_channels}` or `{}`\n\n `output_channels` must be a multiple of the input channels.","ref":"Axon.Layers.html#separable_conv3d/8-parameter-shapes","title":"Parameter Shapes - Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#separable_conv3d/8-options","title":"Options - Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"* [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)","ref":"Axon.Layers.html#separable_conv3d/8-references","title":"References - Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"","ref":"Axon.Layers.html#softmax/2","title":"Axon.Layers.softmax/2","type":"function"},{"doc":"Functional implementation of an n-dimensional spatial\ndropout layer.\n\nApplies a mask to entire feature maps instead of individual\nelements. This is done by calculating a mask shape equal to\nthe spatial dimensions of the input tensor with 1 channel,\nand then broadcasting the mask across the feature dimension\nof the input tensor.","ref":"Axon.Layers.html#spatial_dropout/3","title":"Axon.Layers.spatial_dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#spatial_dropout/3-options","title":"Options - Axon.Layers.spatial_dropout/3","type":"function"},{"doc":"* [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)","ref":"Axon.Layers.html#spatial_dropout/3-references","title":"References - Axon.Layers.spatial_dropout/3","type":"function"},{"doc":"Statically unrolls an RNN.\n\nUnrolls implement a `scan` operation which applies a\ntransformation on the leading axis of `input_sequence` carrying\nsome state. In this instance `cell_fn` is an RNN cell function\nsuch as `lstm_cell` or `gru_cell`.\n\nThis function inlines the unrolling of the sequence such that\nthe entire operation appears as a part of the compilation graph.\nThis makes it suitable for shorter sequences.","ref":"Axon.Layers.html#static_unroll/7","title":"Axon.Layers.static_unroll/7","type":"function"},{"doc":"","ref":"Axon.Layers.html#subtract/2","title":"Axon.Layers.subtract/2","type":"function"},{"doc":"Implementations of loss-scalers for use in mixed precision\ntraining.\n\nLoss scaling is used to prevent underflow when using mixed\nprecision during the model training process. Each loss-scale\nimplementation here returns a 3-tuple of the functions:\n\n {init_fn, scale_fn, unscale_fn, adjust_fn} = Axon.LossScale.static(Nx.pow(2, 15))\n\nYou can use these to scale/unscale loss and gradients as well\nas adjust the loss scale state.\n\n`Axon.Loop.trainer/3` builds loss-scaling in by default. You\ncan reference the `Axon.Loop.train_step/3` implementation to\nsee how loss-scaling is applied in practice.","ref":"Axon.LossScale.html","title":"Axon.LossScale","type":"module"},{"doc":"Implements dynamic loss-scale.","ref":"Axon.LossScale.html#dynamic/1","title":"Axon.LossScale.dynamic/1","type":"function"},{"doc":"Implements identity loss-scale.","ref":"Axon.LossScale.html#identity/1","title":"Axon.LossScale.identity/1","type":"function"},{"doc":"Implements static loss-scale.","ref":"Axon.LossScale.html#static/1","title":"Axon.LossScale.static/1","type":"function"},{"doc":"Loss functions.\n\nLoss functions evaluate predictions with respect to true\ndata, often to measure the divergence between a model's\nrepresentation of the data-generating distribution and the\ntrue representation of the data-generating distribution.\n\nEach loss function is implemented as an element-wise function\nmeasuring the loss with respect to the input target `y_true`\nand input prediction `y_pred`. As an example, the `mean_squared_error/2`\nloss function produces a tensor whose values are the mean squared\nerror between targets and predictions:\n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred)\n #Nx.Tensor \n\nIt's common to compute the loss across an entire minibatch.\nYou can easily do so by specifying a `:reduction` mode, or\nby composing one of these with an `Nx` reduction method:\n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\nYou can even compose loss functions:\n\n defn my_strange_loss(y_true, y_pred) do\n y_true\n |> Axon.Losses.mean_squared_error(y_pred)\n |> Axon.Losses.binary_cross_entropy(y_pred)\n |> Nx.sum()\n end\n\nOr, more commonly, you can combine loss functions with penalties for\nregularization:\n\n defn regularized_loss(params, y_true, y_pred) do\n loss = Axon.mean_squared_error(y_true, y_pred)\n penalty = l2_penalty(params)\n Nx.sum(loss) + penalty\n end\n\nAll of the functions in this module are implemented as\nnumerical functions and can be JIT or AOT compiled with\nany supported `Nx` compiler.","ref":"Axon.Losses.html","title":"Axon.Losses","type":"module"},{"doc":"Applies label smoothing to the given labels.\n\nLabel smoothing is a regularization technique which shrink targets\ntowards a uniform distribution. Label smoothing can improve model\ngeneralization.","ref":"Axon.Losses.html#apply_label_smoothing/3","title":"Axon.Losses.apply_label_smoothing/3","type":"function"},{"doc":"* `:smoothing` - smoothing factor. Defaults to 0.1","ref":"Axon.Losses.html#apply_label_smoothing/3-options","title":"Options - Axon.Losses.apply_label_smoothing/3","type":"function"},{"doc":"* [Rethinking the Inception Architecture for Computer Vision](https://arxiv.org/abs/1512.00567)","ref":"Axon.Losses.html#apply_label_smoothing/3-references","title":"References - Axon.Losses.apply_label_smoothing/3","type":"function"},{"doc":"Binary cross-entropy loss function.\n\n$$l_i = -\\frac{1}{2}(\\hat{y_i} \\cdot \\log(y_i) + (1 - \\hat{y_i}) \\cdot \\log(1 - y_i))$$\n\nBinary cross-entropy loss is most often used in binary classification problems.\nBy default, it expects `y_pred` to encode probabilities from `[0.0, 1.0]`, typically\nas the output of the sigmoid function or another function which squeezes values\nbetween 0 and 1. You may optionally set `from_logits: true` to specify that values\nare being sent as non-normalized values (e.g. weights with possibly infinite range).\nIn this case, input values will be encoded as probabilities by applying the logistic\nsigmoid function before computing loss.","ref":"Axon.Losses.html#binary_cross_entropy/3","title":"Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#binary_cross_entropy/3-argument-shapes","title":"Argument Shapes - Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n\n * `:negative_weight` - class weight for `0` class useful for scaling loss\n by importance of class. Defaults to `1.0`.\n\n * `:positive_weight` - class weight for `1` class useful for scaling loss\n by importance of class. Defaults to `1.0`.\n\n * `:from_logits` - whether `y_pred` is a logits tensor. Defaults to `false`.","ref":"Axon.Losses.html#binary_cross_entropy/3-options","title":"Options - Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])\n iex> Axon.Losses.binary_cross_entropy(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])\n iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])\n iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#binary_cross_entropy/3-examples","title":"Examples - Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"Categorical cross-entropy loss function.\n\n$$l_i = -\\sum_i^C \\hat{y_i} \\cdot \\log(y_i)$$\n\nCategorical cross-entropy is typically used for multi-class classification problems.\nBy default, it expects `y_pred` to encode a probability distribution along the last\naxis. You can specify `from_logits: true` to indicate `y_pred` is a logits tensor.\n\n # Batch size of 3 with 3 target classes\n y_true = Nx.tensor([0, 2, 1])\n y_pred = Nx.tensor([[0.2, 0.8, 0.0], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]])","ref":"Axon.Losses.html#categorical_cross_entropy/3","title":"Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#categorical_cross_entropy/3-argument-shapes","title":"Argument Shapes - Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n\n * `:class_weights` - 1-D list corresponding to weight of each\n class useful for scaling loss according to importance of class. Tensor\n size must match number of classes in dataset. Defaults to `1.0` for all\n classes.\n\n * `:from_logits` - whether `y_pred` is a logits tensor. Defaults to `false`.\n\n * `:sparse` - whether `y_true` encodes a \"sparse\" tensor. In this case the\n inputs are integer values corresponding to the target class. Defaults to\n `false`.","ref":"Axon.Losses.html#categorical_cross_entropy/3-options","title":"Options - Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([1, 2], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum, sparse: true)\n #Nx.Tensor","ref":"Axon.Losses.html#categorical_cross_entropy/3-examples","title":"Examples - Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"Categorical hinge loss function.","ref":"Axon.Losses.html#categorical_hinge/3","title":"Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#categorical_hinge/3-argument-shapes","title":"Argument Shapes - Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#categorical_hinge/3-options","title":"Options - Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])\n iex> Axon.Losses.categorical_hinge(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])\n iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])\n iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#categorical_hinge/3-examples","title":"Examples - Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"Connectionist Temporal Classification loss.","ref":"Axon.Losses.html#connectionist_temporal_classification/3","title":"Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"* `l_true` - $(B)$\n * `y_true` - $(B, S)$\n * `y_pred` - $(B, T, D)$","ref":"Axon.Losses.html#connectionist_temporal_classification/3-argument-shapes","title":"Argument Shapes - Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:sum` or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#connectionist_temporal_classification/3-options","title":"Options - Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"`l_true` contains lengths of target sequences. Nonzero positive values.\n `y_true` contains target sequences. Each value represents a class\n of element in range of available classes 0 <= y < D. Blank element\n class is included in this range, but shouldn't be presented among\n y_true values. Maximum target sequence length should be lower or equal\n to `y_pred` sequence length: S <= T.\n `y_pred` - log probabilities of classes D along the\n prediction sequence T.","ref":"Axon.Losses.html#connectionist_temporal_classification/3-description","title":"Description - Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"Cosine Similarity error loss function.\n\n$$l_i = \\sum_i (\\hat{y_i} - y_i)^2$$","ref":"Axon.Losses.html#cosine_similarity/3","title":"Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#cosine_similarity/3-argument-shapes","title":"Argument Shapes - Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n * `:axes` - Defaults to `[1]`.\n * `:eps` - Defaults to `1.0e-6`.","ref":"Axon.Losses.html#cosine_similarity/3-options","title":"Options - Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"iex> y_pred = Nx.tensor([[1.0, 0.0], [1.0, 1.0]])\n iex> y_true = Nx.tensor([[0.0, 1.0], [1.0, 1.0]])\n iex> Axon.Losses.cosine_similarity(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Losses.html#cosine_similarity/3-examples","title":"Examples - Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"Hinge loss function.\n\n$$\\frac{1}{C}\\max_i(1 - \\hat{y_i} * y_i, 0)$$","ref":"Axon.Losses.html#hinge/3","title":"Axon.Losses.hinge/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#hinge/3-options","title":"Options - Axon.Losses.hinge/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#hinge/3-argument-shapes","title":"Argument Shapes - Axon.Losses.hinge/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[ 1, 1, -1], [ 1, 1, -1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])\n iex> Axon.Losses.hinge(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[ 1, 1, -1], [ 1, 1, -1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])\n iex> Axon.Losses.hinge(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[ 1, 1, -1], [ 1, 1, -1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])\n iex> Axon.Losses.hinge(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#hinge/3-examples","title":"Examples - Axon.Losses.hinge/3","type":"function"},{"doc":"Huber loss.","ref":"Axon.Losses.html#huber/3","title":"Axon.Losses.huber/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#huber/3-argument-shapes","title":"Argument Shapes - Axon.Losses.huber/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n\n * `:delta` - the point where the Huber loss function changes from a quadratic to linear.\n Defaults to `1.0`.","ref":"Axon.Losses.html#huber/3-options","title":"Options - Axon.Losses.huber/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[1], [1.5], [2.0]])\n iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])\n iex> Axon.Losses.huber(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[1], [1.5], [2.0]])\n iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])\n iex> Axon.Losses.huber(y_true, y_pred, reduction: :mean)\n #Nx.Tensor","ref":"Axon.Losses.html#huber/3-examples","title":"Examples - Axon.Losses.huber/3","type":"function"},{"doc":"Kullback-Leibler divergence loss function.\n\n$$l_i = \\sum_i^C \\hat{y_i} \\cdot \\log(\\frac{\\hat{y_i}}{y_i})$$","ref":"Axon.Losses.html#kl_divergence/3","title":"Axon.Losses.kl_divergence/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#kl_divergence/3-argument-shapes","title":"Argument Shapes - Axon.Losses.kl_divergence/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#kl_divergence/3-options","title":"Options - Axon.Losses.kl_divergence/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})\n iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])\n iex> Axon.Losses.kl_divergence(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})\n iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])\n iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})\n iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])\n iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#kl_divergence/3-examples","title":"Examples - Axon.Losses.kl_divergence/3","type":"function"},{"doc":"Modifies the given loss function to smooth labels prior\nto calculating loss.\n\nSee `apply_label_smoothing/2` for details.","ref":"Axon.Losses.html#label_smoothing/2","title":"Axon.Losses.label_smoothing/2","type":"function"},{"doc":"* `:smoothing` - smoothing factor. Defaults to 0.1","ref":"Axon.Losses.html#label_smoothing/2-options","title":"Options - Axon.Losses.label_smoothing/2","type":"function"},{"doc":"Logarithmic-Hyperbolic Cosine loss function.\n\n$$l_i = \\frac{1}{C} \\sum_i^C (\\hat{y_i} - y_i) + \\log(1 + e^{-2(\\hat{y_i} - y_i)}) - \\log(2)$$","ref":"Axon.Losses.html#log_cosh/3","title":"Axon.Losses.log_cosh/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#log_cosh/3-argument-shapes","title":"Argument Shapes - Axon.Losses.log_cosh/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#log_cosh/3-options","title":"Options - Axon.Losses.log_cosh/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])\n iex> Axon.Losses.log_cosh(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])\n iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])\n iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#log_cosh/3-examples","title":"Examples - Axon.Losses.log_cosh/3","type":"function"},{"doc":"Margin ranking loss function.\n\n$$l_i = \\max(0, -\\hat{y_i} * (y^(1)_i - y^(2)_i) + \\alpha)$$","ref":"Axon.Losses.html#margin_ranking/3","title":"Axon.Losses.margin_ranking/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#margin_ranking/3-options","title":"Options - Axon.Losses.margin_ranking/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})\n iex> y_pred1 = Nx.tensor([0.6934, -0.7239, 1.1954], type: {:f, 32})\n iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})\n iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2})\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})\n iex> y_pred1 = Nx.tensor([0.6934, -0.7239, 1.1954], type: {:f, 32})\n iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})\n iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})\n iex> y_pred1 = Nx.tensor([0.6934, -0.7239, 1.1954], type: {:f, 32})\n iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})\n iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#margin_ranking/3-examples","title":"Examples - Axon.Losses.margin_ranking/3","type":"function"},{"doc":"Mean-absolute error loss function.\n\n$$l_i = \\sum_i |\\hat{y_i} - y_i|$$","ref":"Axon.Losses.html#mean_absolute_error/3","title":"Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#mean_absolute_error/3-argument-shapes","title":"Argument Shapes - Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#mean_absolute_error/3-options","title":"Options - Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_absolute_error(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#mean_absolute_error/3-examples","title":"Examples - Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"Mean-squared error loss function.\n\n$$l_i = \\sum_i (\\hat{y_i} - y_i)^2$$","ref":"Axon.Losses.html#mean_squared_error/3","title":"Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#mean_squared_error/3-argument-shapes","title":"Argument Shapes - Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#mean_squared_error/3-options","title":"Options - Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#mean_squared_error/3-examples","title":"Examples - Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"Poisson loss function.\n\n$$l_i = \\frac{1}{C} \\sum_i^C y_i - (\\hat{y_i} \\cdot \\log(y_i))$$","ref":"Axon.Losses.html#poisson/3","title":"Axon.Losses.poisson/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#poisson/3-argument-shapes","title":"Argument Shapes - Axon.Losses.poisson/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#poisson/3-options","title":"Options - Axon.Losses.poisson/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.poisson(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.poisson(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.poisson(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#poisson/3-examples","title":"Examples - Axon.Losses.poisson/3","type":"function"},{"doc":"Soft margin loss function.\n\n$$l_i = \\sum_i \\frac{\\log(1 + e^{-\\hat{y_i} * y_i})}{N}$$","ref":"Axon.Losses.html#soft_margin/3","title":"Axon.Losses.soft_margin/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#soft_margin/3-options","title":"Options - Axon.Losses.soft_margin/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[-1.0, 1.0, 1.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})\n iex> Axon.Losses.soft_margin(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[-1.0, 1.0, 1.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})\n iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[-1.0, 1.0, 1.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})\n iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#soft_margin/3-examples","title":"Examples - Axon.Losses.soft_margin/3","type":"function"},{"doc":"Metric functions.\n\nMetrics are used to measure the performance and compare\nperformance of models in easy-to-understand terms. Often\ntimes, neural networks use surrogate loss functions such\nas negative log-likelihood to indirectly optimize a certain\nperformance metric. Metrics such as accuracy, also called\nthe 0-1 loss, do not have useful derivatives (e.g. they\nare information sparse), and are often intractable even\nwith low input dimensions.\n\nDespite not being able to train specifically for certain\nmetrics, it's still useful to track these metrics to\nmonitor the performance of a neural network during training.\nMetrics such as accuracy provide useful feedback during\ntraining, whereas loss can sometimes be difficult to interpret.\n \nYou can attach any of these functions as metrics within the\n`Axon.Loop` API using `Axon.Loop.metric/3`.\n\nAll of the functions in this module are implemented as\nnumerical functions and can be JIT or AOT compiled with\nany supported `Nx` compiler.","ref":"Axon.Metrics.html","title":"Axon.Metrics","type":"module"},{"doc":"Computes the accuracy of the given predictions.\n\nIf the size of the last axis is 1, it performs a binary\naccuracy computation with a threshold of 0.5. Otherwise,\ncomputes categorical accuracy.","ref":"Axon.Metrics.html#accuracy/3","title":"Axon.Metrics.accuracy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#accuracy/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.accuracy/3","type":"function"},{"doc":"iex> Axon.Metrics.accuracy(Nx.tensor([[1], [0], [0]]), Nx.tensor([[1], [1], [1]]))\n #Nx.Tensor \n\n iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1], [1, 0], [1, 0]]), Nx.tensor([[0, 1], [1, 0], [0, 1]]))\n #Nx.Tensor \n\n iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0, 1, 0], [0, 1, 0]]))\n #Nx.Tensor","ref":"Axon.Metrics.html#accuracy/3-examples","title":"Examples - Axon.Metrics.accuracy/3","type":"function"},{"doc":"","ref":"Axon.Metrics.html#accuracy_transform/4","title":"Axon.Metrics.accuracy_transform/4","type":"function"},{"doc":"Computes the number of false negative predictions with respect\nto given targets.","ref":"Axon.Metrics.html#false_negatives/3","title":"Axon.Metrics.false_negatives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#false_negatives/3-options","title":"Options - Axon.Metrics.false_negatives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.false_negatives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#false_negatives/3-examples","title":"Examples - Axon.Metrics.false_negatives/3","type":"function"},{"doc":"Computes the number of false positive predictions with respect\nto given targets.","ref":"Axon.Metrics.html#false_positives/3","title":"Axon.Metrics.false_positives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#false_positives/3-options","title":"Options - Axon.Metrics.false_positives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.false_positives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#false_positives/3-examples","title":"Examples - Axon.Metrics.false_positives/3","type":"function"},{"doc":"Calculates the mean absolute error of predictions\nwith respect to targets.\n\n$$l_i = \\sum_i |\\hat{y_i} - y_i|$$","ref":"Axon.Metrics.html#mean_absolute_error/2","title":"Axon.Metrics.mean_absolute_error/2","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#mean_absolute_error/2-argument-shapes","title":"Argument Shapes - Axon.Metrics.mean_absolute_error/2","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Metrics.mean_absolute_error(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#mean_absolute_error/2-examples","title":"Examples - Axon.Metrics.mean_absolute_error/2","type":"function"},{"doc":"Computes the precision of the given predictions with\nrespect to the given targets.","ref":"Axon.Metrics.html#precision/3","title":"Axon.Metrics.precision/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#precision/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.precision/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#precision/3-options","title":"Options - Axon.Metrics.precision/3","type":"function"},{"doc":"iex> Axon.Metrics.precision(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#precision/3-examples","title":"Examples - Axon.Metrics.precision/3","type":"function"},{"doc":"Computes the recall of the given predictions with\nrespect to the given targets.","ref":"Axon.Metrics.html#recall/3","title":"Axon.Metrics.recall/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#recall/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.recall/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#recall/3-options","title":"Options - Axon.Metrics.recall/3","type":"function"},{"doc":"iex> Axon.Metrics.recall(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#recall/3-examples","title":"Examples - Axon.Metrics.recall/3","type":"function"},{"doc":"Returns a function which computes a running average given current average,\nnew observation, and current iteration.","ref":"Axon.Metrics.html#running_average/1","title":"Axon.Metrics.running_average/1","type":"function"},{"doc":"iex> cur_avg = 0.5\n iex> iteration = 1\n iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> avg_acc = Axon.Metrics.running_average(&Axon.Metrics.accuracy/2)\n iex> avg_acc.(cur_avg, [y_true, y_pred], iteration)\n #Nx.Tensor","ref":"Axon.Metrics.html#running_average/1-examples","title":"Examples - Axon.Metrics.running_average/1","type":"function"},{"doc":"Returns a function which computes a running sum given current sum,\nnew observation, and current iteration.","ref":"Axon.Metrics.html#running_sum/1","title":"Axon.Metrics.running_sum/1","type":"function"},{"doc":"iex> cur_sum = 12\n iex> iteration = 2\n iex> y_true = Nx.tensor([0, 1, 0, 1])\n iex> y_pred = Nx.tensor([1, 1, 0, 1])\n iex> fps = Axon.Metrics.running_sum(&Axon.Metrics.false_positives/2)\n iex> fps.(cur_sum, [y_true, y_pred], iteration)\n #Nx.Tensor","ref":"Axon.Metrics.html#running_sum/1-examples","title":"Examples - Axon.Metrics.running_sum/1","type":"function"},{"doc":"Computes the sensitivity of the given predictions\nwith respect to the given targets.","ref":"Axon.Metrics.html#sensitivity/3","title":"Axon.Metrics.sensitivity/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#sensitivity/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.sensitivity/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#sensitivity/3-options","title":"Options - Axon.Metrics.sensitivity/3","type":"function"},{"doc":"iex> Axon.Metrics.sensitivity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#sensitivity/3-examples","title":"Examples - Axon.Metrics.sensitivity/3","type":"function"},{"doc":"Computes the specificity of the given predictions\nwith respect to the given targets.","ref":"Axon.Metrics.html#specificity/3","title":"Axon.Metrics.specificity/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#specificity/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.specificity/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#specificity/3-options","title":"Options - Axon.Metrics.specificity/3","type":"function"},{"doc":"iex> Axon.Metrics.specificity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#specificity/3-examples","title":"Examples - Axon.Metrics.specificity/3","type":"function"},{"doc":"Computes the top-k categorical accuracy.","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3","title":"Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"* `k` - The k in \"top-k\". Defaults to 5.\n * `sparse` - If `y_true` is a sparse tensor. Defaults to `false`.","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3-options","title":"Options - Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([0, 1, 0, 0, 0]), Nx.tensor([0.1, 0.4, 0.3, 0.7, 0.1]), k: 2)\n #Nx.Tensor \n\n iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2)\n #Nx.Tensor \n\n iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0], [2]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2, sparse: true)\n #Nx.Tensor","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3-examples","title":"Examples - Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"Computes the number of true negative predictions with respect\nto given targets.","ref":"Axon.Metrics.html#true_negatives/3","title":"Axon.Metrics.true_negatives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#true_negatives/3-options","title":"Options - Axon.Metrics.true_negatives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.true_negatives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#true_negatives/3-examples","title":"Examples - Axon.Metrics.true_negatives/3","type":"function"},{"doc":"Computes the number of true positive predictions with respect\nto given targets.","ref":"Axon.Metrics.html#true_positives/3","title":"Axon.Metrics.true_positives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#true_positives/3-options","title":"Options - Axon.Metrics.true_positives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.true_positives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#true_positives/3-examples","title":"Examples - Axon.Metrics.true_positives/3","type":"function"},{"doc":"Abstraction for modeling a reduction of a dataset with an accumulated\nstate for a number of epochs.\n\nInspired heavily by [PyTorch Ignite](https://pytorch.org/ignite/index.html).\n\nThe main abstraction is the `%Axon.Loop{}` struct, which controls a nested\nreduction of the form:\n\n Enum.reduce(1..max_epochs, state, fn epoch, state ->\n Enum.reduce(data, state, &batch_step/2)\n end)\n\n`data` is assumed to be an `Enumerable` or `Stream` of input data which is\nhandled by a processing function, `batch_step`. The purpose of the loop\nabstraction is to take away much of the boilerplate code used in solving machine\nlearning tasks. Tasks such as normalizing a dataset, hyperparameter optimization,\nor training machine learning models boil down to writing one function:\n\n defn batch_step(batch, state) do\n # ...do something with batch...\n updated_state\n end\n\nFor tasks such as training a neural network, `state` will encapsulate things\nsuch as model and optimizer state. For supervised learning tasks, `batch_step`\nmight look something like:\n\n defn batch_step({inputs, targets}, state) do\n %{parameters: params, optimizer_state: optim_state} = state\n\n gradients = grad(params, objective_fn.(&1, inputs, targets))\n {updates, new_optim_state} = optimizer.(optim_state, params, gradients)\n\n new_params = apply_updates(params, updates)\n\n %{parameters: new_params, optimizer_state: optim_state}\n end\n\n`batch_step` takes a batch of `{input, target}` pairs and the current state,\nand updates the model parameters based on the gradients received from some arbitrary\nobjective function. This function will run in a nested loop, iterating over the entire\ndataset for `N` epochs before finally returning the trained model state. By defining\n1 function, we've created a training loop that works for most machine learning models.\n\nIn actuality, the loop abstraction accumulates a struct, `%Axon.Loop.State{}`, which looks\nlike (assuming `container` is a generic Elixir container of tensors, e.g. map, tuple, etc.):\n\n %Axon.Loop.State{\n epoch: integer(),\n max_epoch: integer(),\n iteration: integer(),\n max_iteration: integer(),\n metrics: map(string(), container()),\n times: map(integer(), integer()),\n step_state: container()\n }\n\n`batch_step` takes in the batch and the step state field and returns a `step_state`,\nwhich is a generic container of state accumulated at each iteration. The rest of the fields\nin the state struct are updated automatically behind the scenes.\n\nThe loop must start from some initial step state, thus most tasks must also provide\nan additional initialization function to provide some starting point for the step\nstate. For machine learning tasks, the initialization function will return things like\ninitial model parameters and optimizer state.\n\nTypically, the final output of the loop is the accumulated final state; however, you\nmay optionally apply an output transform to extract specific values at the end of the\nloop. For example, `Axon.Loop.trainer/4` by default extracts trained model state:\n\n output_transform = fn state ->\n state.step_state[:model_state]\n end","ref":"Axon.Loop.html","title":"Axon.Loop","type":"module"},{"doc":"The core of the Axon loop are the init and step functions. The initialization is an\narity-0 function which provides an initial step state:\n\n init = fn ->\n %{params: Axon.init(model)}\n end\n\nWhile the step function is the `batch_step` function mentioned earlier:\n\n step = fn data, state ->\n new_state = # ...do something...\n new_state\n end\n\nNote that any optimization and training anonymous functions that need to be used in the\n`batch_step` function can be passed as extra arguments. For example:\n\n step_with_training_arguments = fn data, state, optimizer_update_fn, state_update_fn ->\n # ...do something...\n end\n\n step = &(step_with_training_arguments.(&1, &2, actual_optimizer_update_fn, actual_state_update_fn))","ref":"Axon.Loop.html#module-initialize-and-step","title":"Initialize and Step - Axon.Loop","type":"module"},{"doc":"Often times you want to compute metrics associated with your training iterations.\nTo accomplish this, you can attach metrics to each `Axon.Loop`. Assuming a `batch_step`\nfunction which looks like:\n\n defn batch_step({inputs, targets}, state) do\n %{parameters: params, optimizer_state: optim_state} = state\n\n gradients = grad(params, objective_fn.(&1, inputs, targets))\n {updates, new_optim_state} = optimizer.(optim_state, params, gradients)\n\n new_params = apply_updates(params, updates)\n\n # Shown for simplicity, you can optimize this by calculating preds\n # along with the gradient calculation\n preds = model_fn.(params, inputs)\n\n %{\n y_true: targets,\n y_pred: preds,\n parameters: new_params,\n optimizer_state: optim_state\n }\n end\n\nYou can attach metrics to this by using `Axon.Loop.metric/4`:\n\n Axon.Loop.loop(&batch_step/2)\n |> Axon.Loop.metric(\"Accuracy\", :accuracy, fn %{y_true: y_, y_pred: y} -> [y_, y] end)\n |> Axon.Loop.run(data)\n\nBecause metrics work directly on `step_state`, you typically need to provide an output\ntransform to indicate which values should be passed to your metric function. By default,\nAxon assumes a supervised training task with the fields `:y_true` and `:y_pred` present\nin the step state. See `Axon.Loop.metric/4` for more information.\n\nMetrics will be tracked in the loop state using the user-provided key. Metrics integrate\nseamlessly with the supervised metrics defined in `Axon.Metrics`. You can also use metrics\nto keep running averages of some values in the original dataset.","ref":"Axon.Loop.html#module-metrics","title":"Metrics - Axon.Loop","type":"module"},{"doc":"You can instrument several points in the loop using event handlers. By default, several events\nare fired when running a loop:\n\n events = [\n :started, # After loop state initialization\n :epoch_started, # On epoch start\n :iteration_started, # On iteration start\n :iteration_completed, # On iteration complete\n :epoch_completed, # On epoch complete\n :epoch_halted, # On epoch halt, if early halted\n ]\n\nYou can attach event handlers to events using `Axon.Loop.handle_event/4`:\n\n loop\n |> Axon.Loop.handle_event(:iteration_completed, &log_metrics/1, every: 100)\n |> Axon.Loop.run(data)\n\nThe above will trigger `log_metrics/1` every 100 times the `:iteration_completed` event\nis fired. Event handlers must return a tuple `{status, state}`, where `status` is an\natom with one of the following values:\n\n :continue # Continue epoch, continue looping\n :halt_epoch # Halt the epoch, continue looping\n :halt_loop # Halt looping\n\nAnd `state` is an updated `Axon.Loop.State` struct. Handler functions take as input\nthe current loop state.\n\nIt's important to note that event handlers are triggered in the order they are attached\nto the loop. If you have two handlers on the same event, they will trigger in order:\n\n loop\n |> Axon.Loop.handle_event(:epoch_completed, &normalize_state/1) # Runs first\n |> Axon.Loop.handle_event(:epoch_completed, &log_state/1) # Runs second\n\nYou may provide filters to filter when event handlers trigger. See `Axon.Loop.handle_event/4`\nfor more details on valid filters.","ref":"Axon.Loop.html#module-events-and-handlers","title":"Events and Handlers - Axon.Loop","type":"module"},{"doc":"Axon loops are typically created from one of the factory functions provided in this\nmodule:\n\n * `Axon.Loop.loop/3` - Creates a loop from step function and optional initialization\n functions and output transform functions.\n\n * `Axon.Loop.trainer/3` - Creates a supervised training loop from model, loss, and\n optimizer.\n\n * `Axon.Loop.evaluator/1` - Creates a supervised evaluator loop from model.","ref":"Axon.Loop.html#module-factories","title":"Factories - Axon.Loop","type":"module"},{"doc":"In order to execute a loop, you should use `Axon.Loop.run/3`:\n\n Axon.Loop.run(loop, data, epochs: 10)","ref":"Axon.Loop.html#module-running-loops","title":"Running loops - Axon.Loop","type":"module"},{"doc":"At times you may want to resume a loop from some previous state. You can accomplish this\nwith `Axon.Loop.from_state/2`:\n\n loop\n |> Axon.Loop.from_state(state)\n |> Axon.Loop.run(data)","ref":"Axon.Loop.html#module-resuming-loops","title":"Resuming loops - Axon.Loop","type":"module"},{"doc":"Adds a handler function which saves loop checkpoints on a given\nevent, optionally with metric-based criteria.\n\nBy default, loop checkpoints will be saved at the end of every\nepoch in the current working directory under the `checkpoint/`\npath. Checkpoints are serialized representations of loop state\nobtained from `Axon.Loop.serialize_state/2`. Serialization\noptions will be forwarded to `Axon.Loop.serialize_state/2`.\n\nYou can customize checkpoint events by passing `:event` and `:filter`\noptions:\n\n loop\n |> Axon.Loop.checkpoint(event: :iteration_completed, filter: [every: 50])\n\nCheckpoints are saved under the `checkpoint/` directory with a pattern\nof `checkpoint_{epoch}_{iteration}.ckpt`. You can customize the path and pattern\nwith the `:path` and `:file_pattern` options:\n\n my_file_pattern =\n fn %Axon.Loop.State{epoch: epoch, iteration: iter} ->\n \"checkpoint_#{epoch}_#{iter}\"\n end\n\n loop\n |> Axon.Loop.checkpoint(path: \"my_checkpoints\", file_pattern: my_file_pattern)\n\nIf you'd like to only save checkpoints based on some metric criteria,\nyou can specify the `:criteria` option. `:criteria` must be a valid key\nin metrics:\n\n loop\n |> Axon.Loop.checkpoint(criteria: \"validation_loss\")\n\nThe default criteria mode is `:min`, meaning the min score metric will\nbe considered \"best\" when deciding to save on a given event. Valid modes\nare `:min` and `:max`:\n\n loop\n |> Axon.Loop.checkpoint(criteria: \"validation_accuracy\", mode: :max)","ref":"Axon.Loop.html#checkpoint/2","title":"Axon.Loop.checkpoint/2","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:epoch_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.\n\n * `:patience` - number of given events to wait for improvement. Defaults\n to `3`.\n\n * `:mode` - whether given metric is being minimized or maximized. One of\n `:min`, `:max` or an arity-1 function which returns `true` or `false`.\n Defaults to `:min`.\n\n * `:path` - path to directory to save checkpoints. Defaults to `checkpoint`\n\n * `:file_pattern` - arity-1 function which returns a string file pattern\n based on the current loop state. Defaults to saving checkpoints to files\n `checkpoint_#{epoch}_#{iteration}.ckpt`.","ref":"Axon.Loop.html#checkpoint/2-options","title":"Options - Axon.Loop.checkpoint/2","type":"function"},{"doc":"Deserializes loop state from a binary.\n\nIt is the opposite of `Axon.Loop.serialize_state/2`.\n\nBy default, the step state is deserialized using `Nx.deserialize.2`;\nhowever, this behavior can be changed if step state is an application\nspecific container. For example, if you introduce your own data\nstructure into step_state and you customized the serialization logic,\n`Nx.deserialize/2` will not be sufficient for deserialization. - you\nmust pass custom logic with `:deserialize_step_state`.","ref":"Axon.Loop.html#deserialize_state/2","title":"Axon.Loop.deserialize_state/2","type":"function"},{"doc":"Adds a handler function which halts a loop if the given\nmetric does not improve between events.\n\nBy default, this will run after each epoch and track the\nimprovement of a given metric.\n\nYou must specify a metric to monitor and the metric must\nbe present in the loop state. Typically, this will be\na validation metric:\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.validate(val_data)\n |> Axon.Loop.early_stop(\"validation_accuracy\")\n\nIt's important to remember that handlers are executed in the\norder they are added to the loop. For example, if you'd like\nto checkpoint a loop after every epoch and use early stopping,\nmost likely you want to add the checkpoint handler before\nthe early stopping handler:\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.checkpoint()\n |> Axon.Loop.early_stop(\"accuracy\")\n\nThat will ensure checkpoint is always fired, even if the loop\nexited early.","ref":"Axon.Loop.html#early_stop/3","title":"Axon.Loop.early_stop/3","type":"function"},{"doc":"Creates a supervised evaluation step from a model and model state.\n\nThis function is intended for more fine-grained control over the loop\ncreation process. It returns a tuple of `{init_fn, step_fn}` where\n`init_fn` returns an initial step state and `step_fn` performs a\nsingle evaluation step.","ref":"Axon.Loop.html#eval_step/1","title":"Axon.Loop.eval_step/1","type":"function"},{"doc":"Creates a supervised evaluator from a model.\n\nAn evaluator can be used for things such as testing and validation of models\nafter or during training. It assumes `model` is an Axon struct, container of\nstructs, or a tuple of `init` / `apply` functions. `model_state` must be a\ncontainer usable from within `model`.\n\nThe evaluator returns a step state of the form:\n\n %{\n y_true: labels,\n y_pred: predictions\n }\n\nSuch that you can attach any number of supervised metrics to the evaluation\nloop:\n\n model\n |> Axon.Loop.evaluator()\n |> Axon.Loop.metric(\"Accuracy\", :accuracy)\n\nYou must pass a compatible trained model state to `Axon.Loop.run/4` when using\nsupervised evaluation loops. For example, if you've binded the result of a training\nrun to `trained_model_state`, you can run the trained model through an evaluation\nrun like this:\n\n model\n |> Axon.Loop.evaluator()\n |> Axon.Loop.run(data, trained_model_state, compiler: EXLA)\n\nThis function applies an output transform which returns the map of metrics accumulated\nover the given loop.","ref":"Axon.Loop.html#evaluator/1","title":"Axon.Loop.evaluator/1","type":"function"},{"doc":"Attaches `state` to the given loop in order to resume looping\nfrom a previous state.\n\nIt's important to note that a loop's attached state takes precedence\nover defined initialization functions. Given initialization function:\n\n defn init_state(), do: %{foo: 1, bar: 2}\n\nAnd an attached state:\n\n state = %State{step_state: %{foo: 2, bar: 3}}\n\n`init_state/0` will never execute, and instead the initial step state\nof `%{foo: 2, bar: 3}` will be used.","ref":"Axon.Loop.html#from_state/2","title":"Axon.Loop.from_state/2","type":"function"},{"doc":"Adds a handler function to the loop which will be triggered on `event`\nwith an optional filter.\n\nEvents take place at different points during loop execution. The default\nevents are:\n\n events = [\n :started, # After loop state initialization\n :epoch_started, # On epoch start\n :iteration_started, # On iteration start\n :iteration_completed, # On iteration complete\n :epoch_completed, # On epoch complete\n :epoch_halted, # On epoch halt, if early halted\n ]\n\nGenerally, event handlers are side-effecting operations which provide some\nsort of inspection into the loop's progress. It's important to note that\nif you define multiple handlers to be triggered on the same event, they\nwill execute in order from when they were attached to the training\nloop:\n\n loop\n |> Axon.Loop.handle_event(:epoch_started, &normalize_step_state/1) # executes first\n |> Axon.Loop.handle_event(:epoch_started, &log_step_state/1) # executes second\n\nThus, if you have separate handlers which alter or depend on loop state,\nyou need to ensure they are ordered correctly, or combined into a single\nevent handler for maximum control over execution.\n\n`event` must be an atom representing the event to trigger `handler` or a\nlist of atoms indicating `handler` should be triggered on multiple events.\n`event` may be `:all` which indicates the handler should be triggered on\nevery event during loop processing.\n\n`handler` must be an arity-1 function which takes as input loop state and\nreturns `{status, state}`, where `status` is an atom with one of the following\nvalues:\n\n :continue # Continue epoch, continue looping\n :halt_epoch # Halt the epoch, continue looping\n :halt_loop # Halt looping\n\n`filter` is an atom representing a valid filter predicate, a keyword of\npredicate-value pairs, or a function which takes loop state and returns\na `true`, indicating the handler should run, or `false`, indicating the\nhandler should not run. Valid predicates are:\n\n :always # Always trigger event\n :once # Trigger on first event firing\n\nValid predicate-value pairs are:\n\n every: N # Trigger every `N` event\n only: N # Trigger on `N` event\n\n**Warning: If you modify the step state in an event handler, it will trigger\npotentially excessive recompilation and result in significant additional overhead\nduring loop execution.**","ref":"Axon.Loop.html#handle_event/4","title":"Axon.Loop.handle_event/4","type":"function"},{"doc":"Adds a handler function which updates a `Kino.VegaLite` plot.\n\nBy default, this will run after every iteration.\n\nYou must specify a plot to push to and a metric to track. The `:x` axis will be the iteration count, labeled `\"step\"`. The metric must match the name given to the `:y` axis in your `VegaLite` plot:\n\n plot =\n Vl.new()\n |> Vl.mark(:line)\n |> Vl.encode_field(:x, \"step\", type: :quantitative)\n |> Vl.encode_field(:y, \"loss\", type: :quantitative)\n |> Kino.VegaLite.new()\n |> Kino.render()\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.kino_vega_lite_plot(plot, \"loss\")","ref":"Axon.Loop.html#kino_vega_lite_plot/4","title":"Axon.Loop.kino_vega_lite_plot/4","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:iteration_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.","ref":"Axon.Loop.html#kino_vega_lite_plot/4-options","title":"Options - Axon.Loop.kino_vega_lite_plot/4","type":"function"},{"doc":"Adds a handler function which logs the given message produced\nby `message_fn` to the given IO device every `event` satisfying\n`filter`.\n\nIn most cases, this is useful for inspecting the contents of\nthe loop state at intermediate stages. For example, the default\n`trainer` loop factory attaches IO logging of epoch, batch, loss\nand metrics.\n\nIt's also possible to log loop state to files by changing the\ngiven IO device. By default, the IO device is `:stdio`.\n\n`message_fn` should take the loop state and return a binary\nrepresenting the message to be written to the IO device.","ref":"Axon.Loop.html#log/3","title":"Axon.Loop.log/3","type":"function"},{"doc":"Creates a loop from `step_fn`, an optional `init_fn`, and an\noptional `output_transform`.\n\n`step_fn` is an arity-2 function which takes a batch and state\nand returns an updated step state:\n\n defn batch_step(batch, step_state) do\n step_state + 1\n end\n\n`init_fn` by default is an identity function which forwards its\ninitial arguments as the model state. You should define a custom\ninitialization function if you require a different behavior:\n\n defn init_step_state(state) do\n Map.merge(%{foo: 1}, state)\n end\n\nYou may use `state` in conjunction with initialization functions in\n`init_fn`. For example, `train_step/3` uses initial state as initial\nmodel parameters to allow initializing models from partial parameterizations.\n\n`step_batch/2` and `init_step_state/1` are typically called from\nwithin `Nx.Defn.jit/3`. While JIT-compilation will work with anonymous functions,\n`def`, and `defn`, it is recommended that you use the stricter `defn` to define\nboth functions in order to avoid bugs or cryptic errors.\n\n`output_transform/1` applies a transformation on the final accumulated loop state.\nThis is useful for extracting specific fields from a loop and piping them into\nadditional functions.","ref":"Axon.Loop.html#loop/3","title":"Axon.Loop.loop/3","type":"function"},{"doc":"Adds a metric of the given name to the loop.\n\nA metric is a function which tracks or measures some value with respect\nto values in the step state. For example, when training classification\nmodels, it's common to track the model's accuracy during training:\n\n loop\n |> Axon.Loop.metric(:accuracy, \"Accuracy\")\n\nBy default, metrics assume a supervised learning task and extract the fields\n`[:y_true, :y_pred]` from the step state. If you wish to work on a different\nvalue, you can use an output transform. An output transform is a list of keys\nto extract from the output state, or a function which returns a flattened list\nof values to pass to the given metric function. Values received from output\ntransforms are passed to the given metric using:\n\n value = output_transform.(step_state)\n apply(metric, value)\n\nThus, even if you want your metric to work on a container, your output transform\nmust return a list.\n\n`metric` must be an atom which matches the name of a metric in `Axon.Metrics`, or\nan arbitrary function which returns a tensor or container.\n\n`name` must be a string or atom used to store the computed metric in the loop\nstate. If names conflict, the last attached metric will take precedence:\n\n loop\n |> Axon.Loop.metric(:mean_squared_error, \"Error\") # Will be overwritten\n |> Axon.Loop.metric(:mean_absolute_error, \"Error\") # Will be used\n\nBy default, metrics keep a running average of the metric calculation. You can\noverride this behavior by changing `accumulate`:\n\n loop\n |> Axon.Loop.metric(:true_negatives, \"tn\", :running_sum)\n\nAccumulation function can be one of the accumulation combinators in Axon.Metrics\nor an arity-3 function of the form: `accumulate(acc, obs, i) :: new_acc`.","ref":"Axon.Loop.html#metric/5","title":"Axon.Loop.metric/5","type":"function"},{"doc":"Adds a handler function which monitors the given metric\nand fires some action when the given metric meets some\ncriteria.\n\nThis function is a generalization of handlers such as\n`Axon.Loop.reduce_lr_on_plateau/3` and `Axon.Loop.early_stop/3`.\n\nYou must specify a metric to monitor that is present in\nthe state metrics. This handler will then monitor the value\nof the metric at the specified intervals and fire the specified\nfunction if the criteria is met.\n\nYou must also specify a name for the monitor attached to the\ngiven metric. This will be used to store metadata associated\nwith the monitor.\n\nThe common case of monitor is to track improvement of metrics\nand take action if metrics haven't improved after a certain number\nof events. However, you can also set a monitor up to trigger if\na metric hits some criteria (such as a threshold) by passing a\ncustom monitoring mode.","ref":"Axon.Loop.html#monitor/5","title":"Axon.Loop.monitor/5","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:epoch_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.\n\n * `:patience` - number of given events to wait for improvement. Defaults\n to `3`.\n\n * `:mode` - whether given metric is being minimized or maximized. One of\n `:min`, `:max` or an arity-1 function which returns `true` or `false`.\n Defaults to `:min`.","ref":"Axon.Loop.html#monitor/5-options","title":"Options - Axon.Loop.monitor/5","type":"function"},{"doc":"Adds a handler function which reduces the learning rate by\nthe given factor if the given metric does not improve between\nevents.\n\nBy default, this will run after each epoch and track the\nimprovement of a given metric.\n\nYou must specify a metric to monitor and the metric must\nbe present in the loop state. Typically, this will be\na validation metric:\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.validate(model, val_data)\n |> Axon.Loop.reduce_lr_on_plateau(\"accuracy\", mode: :max)","ref":"Axon.Loop.html#reduce_lr_on_plateau/3","title":"Axon.Loop.reduce_lr_on_plateau/3","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:epoch_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.\n\n * `:patience` - number of given events to wait for improvement. Defaults\n to `3`.\n\n * `:mode` - whether given metric is being minimized or maximized. Defaults\n to `:min`.\n\n * `:factor` - factor to decrease learning rate by. Defaults to `0.1`.","ref":"Axon.Loop.html#reduce_lr_on_plateau/3-options","title":"Options - Axon.Loop.reduce_lr_on_plateau/3","type":"function"},{"doc":"Runs the given loop on data with the given options.\n\n`loop` must be a valid Axon.Loop struct built from one of the\nloop factories provided in this module.\n\n`data` must be an Enumerable or Stream which yields batches of\ndata on each iteration.","ref":"Axon.Loop.html#run/4","title":"Axon.Loop.run/4","type":"function"},{"doc":"* `:epochs` - max epochs to run loop for. Must be non-negative integer.\n Defaults to `1`.\n\n * `:iterations` - max iterations to run each epoch. Must be non-negative\n integer. Defaults to `-1` or no max iterations.\n\n * `:jit_compile?` - whether or not to JIT compile initialization and step\n functions. JIT compilation must be used for gradient computations. Defaults\n to true.\n\n * `:garbage_collect` - whether or not to garbage collect after\n each loop iteration. This may prevent OOMs, but it will slow down training.\n\n * `:strict?` - whether or not to compile step functions strictly. If this flag\n is set, the loop will raise on any cache miss during the training loop. Defaults\n to true.\n\n * `:debug` - run loop in debug mode to trace loop progress. Defaults to\n false.\n\n Additional options are forwarded to `Nx.Defn.jit` as JIT-options. If no JIT\n options are set, the default options set with `Nx.Defn.default_options` are\n used.","ref":"Axon.Loop.html#run/4-options","title":"Options - Axon.Loop.run/4","type":"function"},{"doc":"Serializes loop state to a binary for saving and loading\nloop from previous states.\n\nYou can consider the serialized state to be a checkpoint of\nall state at a given iteration and epoch.\n\nBy default, the step state is serialized using `Nx.serialize/2`;\nhowever, this behavior can be changed if step state is an application\nspecific container. For example, if you introduce your own data\nstructure into step_state, `Nx.serialize/2` will not be sufficient\nfor serialization - you must pass custom serialization as an option\nwith `:serialize_step_state`.\n\nAdditional `opts` controls serialization options such as compression.\nIt is forwarded to `:erlang.term_to_binary/2`.","ref":"Axon.Loop.html#serialize_state/2","title":"Axon.Loop.serialize_state/2","type":"function"},{"doc":"Creates a supervised train step from a model, loss function, and\noptimizer.\n\nThis function is intended for more fine-grained control over the loop\ncreation process. It returns a tuple of `{init_fn, step_fn}` where `init_fn`\nis an initialization function which returns an initial step state and\n`step_fn` is a supervised train step constructed from `model`, `loss`,\nand `optimizer`.\n\n`model` must be an Axon struct, a valid defn container\nof Axon structs, or a `{init_fn, apply_fn}`-tuple where `init_fn` is\nan arity-2 function which initializes the model state and `apply_fn` is\nan arity-2 function which applies the forward pass of the model. The forward\npass of the model must return a map with keys `:prediction` and `:state`\nrepresenting the model's prediction and updated state for layers which\naggregate state during training.\n\n`loss` must be an atom which matches a function in `Axon.Losses`, a list\nof `{loss, weight}` tuples representing a basic weighted loss function\nfor multi-output models, or an arity-2 function representing a custom loss\nfunction.\n\n`optimizer` must be an atom matching the name of a valid optimizer in `Polaris.Optimizers`,\nor a `{init_fn, update_fn}` tuple where `init_fn` is an arity-1 function which\ninitializes the optimizer state from the model parameters and `update_fn` is an\narity-3 function that receives `(gradient, optimizer_state, model_parameters)` and\nscales gradient updates with respect to input parameters, optimizer state, and gradients.\nThe `update_fn` returns `{scaled_updates, optimizer_state}`, which can then be applied to\nthe model through `model_parameters = Axon.Update.apply_updates(model_parameters, scaled_updates)`.\nSee `Polaris.Updates` for more information on building optimizers.","ref":"Axon.Loop.html#train_step/4","title":"Axon.Loop.train_step/4","type":"function"},{"doc":"* `:seed` - seed to use when constructing models. Seed controls random initialization\n of model parameters. Defaults to no seed which constructs a random seed for you at\n model build time.\n\n * `:loss_scale` - type of loss-scaling to use, if any. Loss-scaling is necessary when\n doing mixed precision training for numerical stability. Defaults to `:identity` or\n no loss-scaling.\n\n * `:gradient_accumulation_steps` - number of gradient accumulation steps to take during\n training. Gradient accumulation decreases the number of updates by accumulating gradients\n between steps, increasing the effective batch size on smaller devices. Defaults to 1.","ref":"Axon.Loop.html#train_step/4-options","title":"Options - Axon.Loop.train_step/4","type":"function"},{"doc":"Creates a supervised training loop from a model, loss function,\nand optimizer.\n\nThis function is useful for training models on most standard supervised\nlearning tasks. It assumes data consists of tuples of input-target pairs,\ne.g. `[{x0, y0}, {x1, y1}, ..., {xN, yN}]` where `x0` and `y0` are batched\ntensors or containers of batched tensors.\n\nIt defines an initialization function which first initializes model state\nusing the given model and then initializes optimizer state using the initial\nmodel state. The step function uses a differentiable objective function\ndefined with respect to the model parameters, input data, and target data\nusing the given loss function. It then updates model parameters using the\ngiven optimizer in order to minimize loss with respect to the model parameters.\n\n`model` must be an Axon struct, a valid defn container\nof Axon structs, or a `{init_fn, apply_fn}`-tuple where `init_fn` is\nan arity-2 function which initializes the model state and `apply_fn` is\nan arity-2 function which applies the forward pass of the model.\n\n`loss` must be an atom which matches a function in `Axon.Losses`, a list\nof `{loss, weight}` tuples representing a basic weighted loss function\nfor multi-output models, or an arity-2 function representing a custom loss\nfunction.\n\n`optimizer` must be an atom matching the name of a valid optimizer in `Polaris.Optimizers`,\nor a `{init_fn, update_fn}` tuple where `init_fn` is an arity-1 function which\ninitializes the optimizer state from attached parameters and `update_fn` is an\narity-3 function which scales gradient updates with respect to input parameters,\noptimizer state, and gradients. See `Polaris.Updates` for more information on building\noptimizers.\n\nThis function creates a step function which outputs a map consisting of the following\nfields for `step_state`:\n\n %{\n y_pred: tensor() | container(tensor()), # Model predictions for use in metrics\n y_true: tensor() | container(tensor()), # True labels for use in metrics\n loss: tensor(), # Running average of loss over epoch\n model_state: container(tensor()), # Model parameters and state\n optimizer_state: container(tensor()) # Optimizer state associated with each parameter\n }","ref":"Axon.Loop.html#trainer/4","title":"Axon.Loop.trainer/4","type":"function"},{"doc":"#","ref":"Axon.Loop.html#trainer/4-examples","title":"Examples - Axon.Loop.trainer/4","type":"function"},{"doc":"data = Stream.zip(input, target)\n\n model = Axon.input(\"input\", shape: {nil, 32}) |> Axon.dense(1, activation: :sigmoid)\n\n model\n |> Axon.Loop.trainer(:binary_cross_entropy, :adam)\n |> Axon.Loop.run(data)\n\n#","ref":"Axon.Loop.html#trainer/4-basic-usage","title":"Basic usage - Axon.Loop.trainer/4","type":"function"},{"doc":"model\n |> Axon.Loop.trainer(:binary_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.05))\n |> Axon.Loop.run(data)\n\n#","ref":"Axon.Loop.html#trainer/4-customizing-optimizer","title":"Customizing Optimizer - Axon.Loop.trainer/4","type":"function"},{"doc":"loss_fn = fn y_true, y_pred -> Nx.cos(y_true, y_pred) end\n\n model\n |> Axon.Loop.trainer(loss_fn, Polaris.Optimizers.rmsprop(learning_rate: 0.01))\n |> Axon.Loop.run(data)\n\n#","ref":"Axon.Loop.html#trainer/4-custom-loss","title":"Custom loss - Axon.Loop.trainer/4","type":"function"},{"doc":"model = {Axon.input(\"input_0\", shape: {nil, 1}), Axon.input(\"input_1\", shape: {nil, 2})}\n loss_weights = [mean_squared_error: 0.5, mean_absolute_error: 0.5]\n\n model\n |> Axon.Loop.trainer(loss_weights, :sgd)\n |> Axon.Loop.run(data)","ref":"Axon.Loop.html#trainer/4-multiple-objectives-with-multi-output-model","title":"Multiple objectives with multi-output model - Axon.Loop.trainer/4","type":"function"},{"doc":"* `:log` - training loss and metric log interval. Set to 0 to silence\n training logs. Defaults to 50\n\n * `:seed` - seed to use when constructing models. Seed controls random initialization\n of model parameters. Defaults to no seed which constructs a random seed for you at\n model build time.\n\n * `:loss_scale` - type of loss-scaling to use, if any. Loss-scaling is necessary when\n doing mixed precision training for numerical stability. Defaults to `:identity` or\n no loss-scaling.\n\n * `:gradient_accumulation_steps` - number of gradient accumulation steps to take during\n training. Gradient accumulation decreases the number of updates by accumulating gradients\n between steps, increasing the effective batch size on smaller devices. Defaults to 1.","ref":"Axon.Loop.html#trainer/4-options","title":"Options - Axon.Loop.trainer/4","type":"function"},{"doc":"Adds a handler function which tests the performance of `model`\nagainst the given validation set.\n\nThis handler assumes the loop state matches the state initialized\nin a supervised training loop. Typically, you'd call this immediately\nafter creating a supervised training loop:\n\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.validate(model, validation_data)\n\nPlease note that you must pass the same (or an equivalent) model\ninto this method so it can be used during the validation loop. The\nmetrics which are computed are those which are present BEFORE the\nvalidation handler was added to the loop. For the following loop:\n\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(:mean_absolute_error)\n |> Axon.Loop.validate(model, validation_data)\n |> Axon.Loop.metric(:binary_cross_entropy)\n\nonly `:mean_absolute_error` will be computed at validation time.\n\nThe returned loop state is altered to contain validation\nmetrics for use in later handlers such as early stopping and model\ncheckpoints. Since the order of execution of event handlers is in\nthe same order they are declared in the training loop, you MUST call\nthis method before any other handler which expects or may use\nvalidation metrics.\n\nBy default the validation loop runs after every epoch; however, you\ncan customize it by overriding the default event and event filters:\n\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(:mean_absolute_error)\n |> Axon.Loop.validate(model, validation_data, event: :iteration_completed, filter: [every: 10_000])\n |> Axon.Loop.metric(:binary_cross_entropy)","ref":"Axon.Loop.html#validate/4","title":"Axon.Loop.validate/4","type":"function"},{"doc":"Accumulated state in an Axon.Loop.\n\nLoop state is a struct:\n\n %State{\n epoch: integer(),\n max_epoch: integer(),\n iteration: integer(),\n max_iteration: integer(),\n metrics: map(string(), container()),\n times: map(integer(), integer()),\n step_state: container(),\n handler_metadata: container()\n }\n\n`epoch` is the current epoch, starting at 0, of the nested loop.\nDefaults to 0.\n\n`max_epoch` is the maximum number of epochs the loop should run\nfor. Defaults to 1.\n\n`iteration` is the current iteration of the inner loop. In supervised\nsettings, this will be the current batch. Defaults to 0.\n\n`max_iteration` is the maximum number of iterations the loop should\nrun a given epoch for. Defaults to -1 (no max).\n\n`metrics` is a map of `%{\"metric_name\" => value}` which accumulates metrics\nover the course of loop processing. Defaults to an empty map.\n\n`times` is a map of `%{epoch_number => value}` which maps a given epoch\nto the processing time. Defaults to an empty map.\n\n`step_state` is the step state as defined by the loop's processing\ninitialization and update functions. `step_state` is a required field.\n\n`handler_metadata` is a metadata field for storing loop handler metadata.\nFor example, loop checkpoints with specific metric criteria can store\nprevious best metrics in the handler meta for use between iterations.\n\n`event_counts` is a metadata field which stores information about the number\nof times each event has been fired. This is useful when creating custom filters.\n\n`status` refers to the loop state status after the loop has executed. You can\nuse this to determine if the loop ran to completion or if it was halted early.","ref":"Axon.Loop.State.html","title":"Axon.Loop.State","type":"module"},{"doc":"","ref":"Axon.CompileError.html","title":"Axon.CompileError","type":"exception"},{"doc":"","ref":"Axon.CompileError.html#message/1","title":"Axon.CompileError.message/1","type":"function"},{"doc":"# Axon Guides\n\nAxon is a library for creating and training neural networks in Elixir. The Axon guides are a collection of Livebooks designed to introduce Axon's APIs and design decisions from the bottom-up. After working through the guides, you will feel comfortable and confident working with Axon and using Axon for your next deep learning problem.","ref":"guides.html","title":"Axon Guides","type":"extras"},{"doc":"* [Your first Axon model](model_creation/your_first_axon_model.livemd)\n* [Sequential models](model_creation/sequential_models.livemd)\n* [Complex models](model_creation/complex_models.livemd)\n* [Multi-input / multi-output models](model_creation/multi_input_multi_output_models.livemd)\n* [Custom layers](model_creation/custom_layers.livemd)\n* [Model hooks](model_creation/model_hooks.livemd)","ref":"guides.html#model-creation","title":"Model Creation - Axon Guides","type":"extras"},{"doc":"* [Accelerating Axon](model_execution/accelerating_axon.livemd)\n* [Training and inference mode](model_execution/training_and_inference_mode.livemd)","ref":"guides.html#model-execution","title":"Model Execution - Axon Guides","type":"extras"},{"doc":"* [Your first training loop](training_and_evaluation/your_first_training_loop.livemd)\n* [Instrumenting loops with metrics](training_and_evaluation/instrumenting_loops_with_metrics.livemd)\n* [Your first evaluation loop](training_and_evaluation/your_first_evaluation_loop.livemd)\n* [Using loop event handlers](training_and_evaluation/using_loop_event_handlers.livemd)\n* [Custom models, loss functions, and optimizers](training_and_evaluation/custom_models_loss_optimizers.livemd)\n* [Writing custom metrics](training_and_evaluation/writing_custom_metrics.livemd)\n* [Writing custom event handlers](training_and_evaluation/writing_custom_event_handlers.livemd)","ref":"guides.html#training-and-evaluation","title":"Training and Evaluation - Axon Guides","type":"extras"},{"doc":"* [Converting ONNX models to Axon](serialization/onnx_to_axon.livemd)","ref":"guides.html#serialization","title":"Serialization - Axon Guides","type":"extras"},{"doc":"# Your first Axon model\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"your_first_axon_model.html","title":"Your first Axon model","type":"extras"},{"doc":"Axon is a library for creating and training neural networks in Elixir. Everything in Axon centers around the `%Axon{}` struct which represents an instance of an Axon model.\n\nModels are just graphs which represent the transformation and flow of input data to a desired output. Really, you can think of models as representing a single computation or function. An Axon model, when executed, takes data as input and returns transformed data as output.\n\nAll Axon models start with a declaration of input nodes. These are the root nodes of your computation graph, and correspond to the actual input data you want to send to Axon:\n\n```elixir\ninput = Axon.input(\"data\")\n```\n\n\n\n```\n#Axon \n```\n\nTechnically speaking, `input` is now a valid Axon model which you can inspect, execute, and initialize. You can visualize how data flows through the graph using `Axon.Display.as_graph/2`:\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(input, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n;\n```\n\nNotice the execution flow is just a single node, because your graph only consists of an input node! You pass data in and the model spits the same data back out, without any intermediate transformations.\n\nYou can see this in action by actually executing your model. You can build the `%Axon{}` struct into it's `initialization` and `forward` functions by calling `Axon.build/2`. This pattern of \"lowering\" or transforming the `%Axon{}` data structure into other functions or representations is very common in Axon. By simply traversing the data structure, you can create useful functions, execution visualizations, and more!\n\n```elixir\n{init_fn, predict_fn} = Axon.build(input)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\nNotice that `Axon.build/2` returns a tuple of `{init_fn, predict_fn}`. `init_fn` has the signature:\n\n```\ninit_fn.(template :: map(tensor) | tensor, initial_params :: map) :: map(tensor)\n```\n\nwhile `predict_fn` has the signature:\n\n```\npredict_fn.(params :: map(tensor), input :: map(tensor) | tensor)\n```\n\n`init_fn` returns all of your model's trainable parameters and state. You need to pass a template of the expected inputs because the shape of certain model parameters often depend on the shape of model inputs. You also need to pass any initial parameters you want your model to start with. This is useful for things like transfer learning, which you can read about in another guide.\n\n`predict_fn` returns transformed inputs from your model's trainable parameters and the given inputs.\n\n```elixir\nparams = init_fn.(Nx.template({1, 8}, :f32), %{})\n```\n\n\n\n```\n%{}\n```\n\nIn this example, you use `Nx.template/2` to create a *template tensor*, which is a placeholder that does not actually consume any memory. Templates are useful for initialization because you don't actually need to know anything about your inputs other than their shape and type.\n\nNotice `init_fn` returned an empty map because your model does not have any trainable parameters. This should make sense because it's just an input layer.\n\nNow you can pass these trainable parameters to `predict_fn` along with some input to actually execute your model:\n\n```elixir\npredict_fn.(params, Nx.iota({1, 8}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAnd your model just returned the given input, as expected!","ref":"your_first_axon_model.html#your-first-model","title":"Your first model - Your first Axon model","type":"extras"},{"doc":"# Sequential models\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"sequential_models.html","title":"Sequential models","type":"extras"},{"doc":"In the [last guide](your_first_axon_model.livemd), you created a simple identity model which just returned the input. Of course, you would never actually use Axon for such purposes. You want to create real neural networks!\n\nIn equivalent frameworks in the Python ecosystem such as Keras and PyTorch, there is a concept of *sequential models*. Sequential models are named after the sequential nature in which data flows through them. Sequential models transform the input with sequential, successive transformations.\n\nIf you're an experienced Elixir programmer, this paradigm of sequential transformations might sound a lot like what happens when using the pipe (`|>`) operator. In Elixir, it's common to see code blocks like:\n\n\n\n```elixir\nlist\n|> Enum.map(fn x -> x + 1 end)\n|> Enum.filter(&rem(&1, 2) == 0)\n|> Enum.count()\n```\n\nThe snippet above passes `list` through a sequence of transformations. You can apply this same paradigm in Axon to create sequential models. In fact, creating sequential models is so natural with Elixir's pipe operator, that Axon does not need a distinct *sequential* construct. To create a sequential model, you just pass Axon models through successive transformations in the Axon API:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(32)\n |> Axon.activation(:relu)\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(1)\n |> Axon.activation(:softmax)\n```\n\n\n\n```\n#Axon \n```\n\nIf you visualize this model, it's easy to see how data flows sequentially through it:\n\n```elixir\ntemplate = Nx.template({2, 16}, :f32)\nAxon.Display.as_graph(model, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 16}\"/];\n4[\"dense_0 (:dense) {2, 32}\"];\n5[\"relu_0 (:relu) {2, 32}\"];\n6[\"dropout_0 (:dropout) {2, 32}\"];\n7[\"dense_1 (:dense) {2, 1}\"];\n8[\"softmax_0 (:softmax) {2, 1}\"];\n7 --> 8;\n6 --> 7;\n5 --> 6;\n4 --> 5;\n3 --> 4;\n```\n\nYour model is more involved and as a result so is the execution graph! Now, using the same constructs from the last section, you can build and run your model:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\n```elixir\nparams = init_fn.(template, %{})\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nWow! Notice that this model actually has trainable parameters. You can see that the parameter map is just a regular Elixir map. Each top-level entry maps to a layer with a key corresponding to that layer's name and a value corresponding to that layer's trainable parameters. Each layer's individual trainable parameters are given layer-specific names and map directly to Nx tensors.\n\nNow you can use these `params` with your `predict_fn`:\n\n```elixir\npredict_fn.(params, Nx.iota({2, 16}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAnd voila! You've successfully created and used a sequential model in Axon!","ref":"sequential_models.html#creating-a-sequential-model","title":"Creating a sequential model - Sequential models","type":"extras"},{"doc":"# Complex models\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"complex_models.html","title":"Complex models","type":"extras"},{"doc":"Not all models you'd want to create fit cleanly in the *sequential* paradigm. Some models require a more flexible API. Fortunately, because Axon models are just Elixir data structures, you can manipulate them and decompose architectures as you would any other Elixir program:\n\n```elixir\ninput = Axon.input(\"data\")\n\nx1 = input |> Axon.dense(32)\nx2 = input |> Axon.dense(64) |> Axon.relu() |> Axon.dense(32)\n\nout = Axon.add(x1, x2)\n```\n\n\n\n```\n#Axon \n```\n\nIn the snippet above, your model branches `input` into `x1` and `x2`. Each branch performs a different set of transformations; however, at the end the branches are merged with an `Axon.add/3`. You might sometimes see layers like `Axon.add/3` called *combinators*. Really they're just layers that operate on multiple Axon models at once - typically to merge some branches together.\n\n`out` represents your final Axon model.\n\nIf you visualize this model, you can see the full effect of the branching in this model:\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n4[\"dense_0 (:dense) {2, 32}\"];\n5[\"dense_1 (:dense) {2, 64}\"];\n6[\"relu_0 (:relu) {2, 64}\"];\n7[\"dense_2 (:dense) {2, 32}\"];\n8[\"container_0 (:container) {{2, 32}, {2, 32}}\"];\n9[\"add_0 (:add) {2, 32}\"];\n8 --> 9;\n7 --> 8;\n4 --> 8;\n6 --> 7;\n5 --> 6;\n3 --> 5;\n3 --> 4;\n```\n\nAnd you can use `Axon.build/2` on `out` as you would any other Axon model:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\n```elixir\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAs your architectures grow in complexity, you might find yourself reaching for better abstractions to organize your model creation code. For example, PyTorch models are often organized into `nn.Module`. The equivalent of an `nn.Module` in Axon is a regular Elixir function. If you're translating models from PyTorch to Axon, it's natural to create one Elixir function per `nn.Module`.\n\nYou should write your models as you would write any other Elixir code - you don't need to worry about any framework specific constructs:\n\n```elixir\ndefmodule MyModel do\n def model() do\n Axon.input(\"data\")\n |> conv_block()\n |> Axon.flatten()\n |> dense_block()\n |> dense_block()\n |> Axon.dense(1)\n end\n\n defp conv_block(input) do\n residual = input\n\n x = input |> Axon.conv(3, padding: :same) |> Axon.mish()\n\n x\n |> Axon.add(residual)\n |> Axon.max_pool(kernel_size: {2, 2})\n end\n\n defp dense_block(input) do\n input |> Axon.dense(32) |> Axon.relu()\n end\nend\n```\n\n\n\n```\n{:module, MyModel, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:dense_block, 1}}\n```\n\n```elixir\nmodel = MyModel.model()\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\ntemplate = Nx.template({1, 28, 28, 3}, :f32)\nAxon.Display.as_graph(model, template)\n```\n\n\n\n```mermaid\ngraph TD;\n10[/\"data (:input) {1, 28, 28, 3}\"/];\n11[\"conv_0 (:conv) {1, 28, 28, 3}\"];\n12[\"mish_0 (:mish) {1, 28, 28, 3}\"];\n13[\"container_0 (:container) {{1, 28, 28, 3}, {1, 28, 28, 3}}\"];\n14[\"add_0 (:add) {1, 28, 28, 3}\"];\n15[\"max_pool_0 (:max_pool) {1, 14, 14, 3}\"];\n16[\"flatten_0 (:flatten) {1, 588}\"];\n17[\"dense_0 (:dense) {1, 32}\"];\n18[\"relu_0 (:relu) {1, 32}\"];\n19[\"dense_1 (:dense) {1, 32}\"];\n20[\"relu_1 (:relu) {1, 32}\"];\n21[\"dense_2 (:dense) {1, 1}\"];\n20 --> 21;\n19 --> 20;\n18 --> 19;\n17 --> 18;\n16 --> 17;\n15 --> 16;\n14 --> 15;\n13 --> 14;\n10 --> 13;\n12 --> 13;\n11 --> 12;\n10 --> 11;\n```","ref":"complex_models.html#creating-more-complex-models","title":"Creating more complex models - Complex models","type":"extras"},{"doc":"# Multi-input / multi-output models\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"multi_input_multi_output_models.html","title":"Multi-input / multi-output models","type":"extras"},{"doc":"Sometimes your application necessitates the use of multiple inputs. To use multiple inputs in an Axon model, you just need to declare multiple inputs in your graph:\n\n```elixir\ninput_1 = Axon.input(\"input_1\")\ninput_2 = Axon.input(\"input_2\")\n\nout = Axon.add(input_1, input_2)\n```\n\n\n\n```\n#Axon \n```\n\nNotice when you inspect the model, it tells you what your models inputs are up front. You can also get metadata about your model inputs programmatically with `Axon.get_inputs/1`:\n\n```elixir\nAxon.get_inputs(out)\n```\n\n\n\n```\n%{\"input_1\" => nil, \"input_2\" => nil}\n```\n\nEach input is uniquely named, so you can pass inputs by-name into inspection and execution functions with a map:\n\n```elixir\ninputs = %{\n \"input_1\" => Nx.template({2, 8}, :f32),\n \"input_2\" => Nx.template({2, 8}, :f32)\n}\n\nAxon.Display.as_graph(out, inputs)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"input_1 (:input) {2, 8}\"/];\n4[/\"input_2 (:input) {2, 8}\"/];\n5[\"container_0 (:container) {{2, 8}, {2, 8}}\"];\n6[\"add_0 (:add) {2, 8}\"];\n5 --> 6;\n4 --> 5;\n3 --> 5;\n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(inputs, %{})\n```\n\n\n\n```\n%{}\n```\n\n```elixir\ninputs = %{\n \"input_1\" => Nx.iota({2, 8}, type: :f32),\n \"input_2\" => Nx.iota({2, 8}, type: :f32)\n}\n\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nIf you forget a required input, Axon will raise:\n\n```elixir\npredict_fn.(params, %{\"input_1\" => Nx.iota({2, 8}, type: :f32)})\n```","ref":"multi_input_multi_output_models.html#creating-multi-input-models","title":"Creating multi-input models - Multi-input / multi-output models","type":"extras"},{"doc":"Depending on your application, you might also want your model to have multiple outputs. You can achieve this by using `Axon.container/2` to wrap multiple nodes into any supported Nx container:\n\n```elixir\ninp = Axon.input(\"data\")\n\nx1 = inp |> Axon.dense(32) |> Axon.relu()\nx2 = inp |> Axon.dense(64) |> Axon.relu()\n\nout = Axon.container({x1, x2})\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n7[/\"data (:input) {2, 8}\"/];\n8[\"dense_0 (:dense) {2, 32}\"];\n9[\"relu_0 (:relu) {2, 32}\"];\n10[\"dense_1 (:dense) {2, 64}\"];\n11[\"relu_1 (:relu) {2, 64}\"];\n12[\"container_0 (:container) {{2, 32}, {2, 64}}\"];\n11 --> 12;\n9 --> 12;\n10 --> 11;\n7 --> 10;\n8 --> 9;\n7 --> 8;\n```\n\nWhen executed, containers will return a data structure which matches their input structure:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n{#Nx.Tensor ,\n #Nx.Tensor }\n```\n\nYou can output maps as well:\n\n```elixir\nout = Axon.container(%{x1: x1, x2: x2})\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n%{\n x1: #Nx.Tensor ,\n x2: #Nx.Tensor \n}\n```\n\nContainers even support arbitrary nesting:\n\n```elixir\nout = Axon.container({%{x1: {x1, x2}, x2: %{x1: x1, x2: {x2}}}})\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n{%{\n x1: {#Nx.Tensor ,\n #Nx.Tensor },\n x2: %{\n x1: #Nx.Tensor ,\n x2: {#Nx.Tensor }\n }\n }}\n```","ref":"multi_input_multi_output_models.html#creating-multi-output-models","title":"Creating multi-output models - Multi-input / multi-output models","type":"extras"},{"doc":"# Custom layers\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"custom_layers.html","title":"Custom layers","type":"extras"},{"doc":"While Axon has a plethora of built-in layers, more than likely you'll run into a case where you need something not provided by the framework. In these instances, you can use *custom layers*.\n\nTo Axon, layers are really just `defn` implementations with special Axon inputs. Every layer in Axon (including the built-in layers), are implemented with the `Axon.layer/3` function. The API of `Axon.layer/3` intentionally mirrors the API of `Kernel.apply/2`. To declare a custom layer you need 2 things:\n\n1. A `defn` implementation\n2. Inputs\n\nThe `defn` implementation looks like any other `defn` you'd write; however, it must always account for additional `opts` as an argument:\n\n```elixir\ndefmodule CustomLayers0 do\n import Nx.Defn\n\n defn my_layer(input, opts \\\\ []) do\n opts = keyword!(opts, mode: :train, alpha: 1.0)\n\n input\n |> Nx.sin()\n |> Nx.multiply(opts[:alpha])\n end\nend\n```\n\n\n\n```\n{:module, CustomLayers0, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}\n```\n\nRegardless of the options you configure your layer to accept, the `defn` implementation will always receive a `:mode` option indicating whether or not the model is running in training or inference mode. You can customize the behavior of your layer depending on the mode.\n\nWith an implementation defined, you need only to call `Axon.layer/3` to apply our custom layer to an Axon input:\n\n```elixir\ninput = Axon.input(\"data\")\n\nout = Axon.layer(&CustomLayers0.my_layer/2, [input])\n```\n\n\n\n```\n#Axon \n```\n\nNow you can inspect and execute your model as normal:\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n4[\"custom_0 (:custom) {2, 8}\"];\n3 --> 4;\n```\n\nNotice that by default custom layers render with a default operation marked as `:custom`. This can make it difficult to determine which layer is which during inspection. You can control the rendering by passing `:op_name` to `Axon.layer/3`:\n\n```elixir\nout = Axon.layer(&CustomLayers0.my_layer/2, [input], op_name: :my_layer)\n\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n5[\"my_layer_0 (:my_layer) {2, 8}\"];\n3 --> 5;\n```\n\nYou can also control the name of your layer via the `:name` option. All other options are forwarded to the layer implementation function:\n\n```elixir\nout =\n Axon.layer(&CustomLayers0.my_layer/2, [input],\n name: \"layer\",\n op_name: :my_layer,\n alpha: 2.0\n )\n\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n6[\"layer (:my_layer) {2, 8}\"];\n3 --> 6;\n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\n```\n\n\n\n```\n%{}\n```\n\n```elixir\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nNotice that this model does not have any trainable parameters because none of the layers have trainable parameters. You can introduce trainable parameters by passing inputs created with `Axon.param/3` to `Axon.layer/3`. For example, you can modify your original custom layer to take an additional trainable parameter:\n\n```elixir\ndefmodule CustomLayers1 do\n import Nx.Defn\n\n defn my_layer(input, alpha, _opts \\\\ []) do\n input\n |> Nx.sin()\n |> Nx.multiply(alpha)\n end\nend\n```\n\n\n\n```\n{:module, CustomLayers1, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}\n```\n\nAnd then construct the layer with a regular Axon input and a trainable parameter:\n\n```elixir\nalpha = Axon.param(\"alpha\", fn _ -> {} end)\n\nout = Axon.layer(&CustomLayers1.my_layer/3, [input, alpha], op_name: :my_layer)\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\n```\n\n\n\n```\n%{\n \"my_layer_0\" => %{\n \"alpha\" => #Nx.Tensor \n }\n}\n```\n\nNotice how your model now initializes with a trainable parameter `\"alpha\"` for your custom layer. Each parameter requires a unique (per-layer) string name and a function which determines the parameter's shape from the layer's input shapes.\n\n\n\nIf you plan on re-using custom layers in many locations, it's recommended that you wrap them in an Elixir function as an interface:\n\n```elixir\ndefmodule CustomLayers2 do\n import Nx.Defn\n\n def my_layer(%Axon{} = input, opts \\\\ []) do\n opts = Keyword.validate!(opts, [:name])\n alpha = Axon.param(\"alpha\", fn _ -> {} end)\n\n Axon.layer(&my_layer_impl/3, [input, alpha], name: opts[:name], op_name: :my_layer)\n end\n\n defnp my_layer_impl(input, alpha, _opts \\\\ []) do\n input\n |> Nx.sin()\n |> Nx.multiply(alpha)\n end\nend\n```\n\n\n\n```\n{:module, CustomLayers2, <<70, 79, 82, 49, 0, 0, 12, ...>>, true}\n```\n\n```elixir\nout =\n input\n |> CustomLayers2.my_layer()\n |> CustomLayers2.my_layer()\n |> Axon.dense(1)\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n8[\"my_layer_0 (:my_layer) {2, 8}\"];\n9[\"my_layer_1 (:my_layer) {2, 8}\"];\n10[\"dense_0 (:dense) {2, 1}\"];\n9 --> 10;\n8 --> 9;\n3 --> 8;\n```","ref":"custom_layers.html#creating-custom-layers","title":"Creating custom layers - Custom layers","type":"extras"},{"doc":"# Model hooks\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"model_hooks.html","title":"Model hooks","type":"extras"},{"doc":"Sometimes it's useful to inspect or visualize the values of intermediate layers in your model during the forward or backward pass. For example, it's common to visualize the gradients of activation functions to ensure your model is learning in a stable manner. Axon supports this functionality via model hooks.\n\nModel hooks are a means of unidirectional communication with an executing model. Hooks are unidirectional in the sense that you can only **receive** information from your model, and not send information back.\n\nHooks are attached per-layer and can execute at 4 different points in model execution: on the pre-forward, forward, or backward pass of the model or during model initialization. You can also configure the same hook to execute on all 3 events. You can attach hooks to models using `Axon.attach_hook/3`:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_forward) end, on: :forward)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_init) end, on: :initialize)\n |> Axon.relu()\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :relu) end, on: :forward)\n\n{init_fn, predict_fn} = Axon.build(model)\n\ninput = Nx.iota({2, 4}, type: :f32)\nparams = init_fn.(input, %{})\n```\n\n\n\n```\ndense_init: %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n}\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nNotice how during initialization the `:dense_init` hook fired and inspected the layer's parameters. Now when executing, you'll see outputs for `:dense` and `:relu`:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\nrelu: #Nx.Tensor \n```\n\n\n\n```\n#Nx.Tensor \n```\n\nIt's important to note that hooks execute in the order they were attached to a layer. If you attach 2 hooks to the same layer which execute different functions on the same event, they will run in order:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook1) end, on: :forward)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook2) end, on: :forward)\n |> Axon.relu()\n\n{init_fn, predict_fn} = Axon.build(model)\nparams = init_fn.(input, %{})\n\npredict_fn.(params, input)\n```\n\n\n\n```\nhook2: #Nx.Tensor \n```\n\n\n\n```\n#Nx.Tensor \n```\n\nNotice that `:hook1` fires before `:hook2`.\n\nYou can also specify a hook to fire on all events:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(&IO.inspect/1, on: :all)\n |> Axon.relu()\n |> Axon.dense(1)\n\n{init_fn, predict_fn} = Axon.build(model)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\nOn initialization:\n\n```elixir\nparams = init_fn.(input, %{})\n```\n\n\n\n```\n%{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n}\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nOn pre-forward and forward:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\n#Nx.Tensor \n#Nx.Tensor \n#Nx.Tensor \n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAnd on backwards:\n\n```elixir\nNx.Defn.grad(fn params -> predict_fn.(params, input) end).(params)\n```\n\n\n\n```\n#Nx.Tensor \n#Nx.Tensor \n#Nx.Tensor \n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nFinally, you can specify hooks to only run when the model is built in a certain mode such as training and inference mode. You can read more about training and inference mode in [Training and inference mode](../model_execution/training_and_inference_mode.livemd):\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)\n |> Axon.relu()\n\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\nparams = init_fn.(input, %{})\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nThe model was built in training mode so the hook will run:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\n\n\n```\n%{\n prediction: #Nx.Tensor ,\n state: %{}\n}\n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model, mode: :inference)\nparams = init_fn.(input, %{})\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nThe model was built in inference mode so the hook will not run:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\n#Nx.Tensor \n```","ref":"model_hooks.html#creating-models-with-hooks","title":"Creating models with hooks - Model hooks","type":"extras"},{"doc":"# Accelerating Axon\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:exla, \">= 0.5.0\"},\n {:torchx, \">= 0.5.0\"},\n {:benchee, \"~> 1.1\"},\n {:kino, \">= 0.9.0\", override: true}\n])\n```\n\n\n\n```\n:ok\n```","ref":"accelerating_axon.html","title":"Accelerating Axon","type":"extras"},{"doc":"Nx provides two mechanisms for accelerating your neural networks: backends and compilers. Before we learn how to effectively use them, first let's create a simple model for benchmarking purposes:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(32)\n |> Axon.relu()\n |> Axon.dense(1)\n |> Axon.softmax()\n```\n\n\n\n```\n#Axon \n```\n\nBackends are where your tensors (your neural network inputs and parameters) are located. By default, Nx and Axon run all computations using the `Nx.BinaryBackend` which is a pure Elixir implementation of various numerical routines. The `Nx.BinaryBackend` is guaranteed to run wherever an Elixir installation runs; however, it is **very** slow. Due to the computational expense of neural networks, you should basically never use the `Nx.BinaryBackend` and instead opt for one of the available accelerated libraries. At the time of writing, Nx officially supports two of them:\n\n1. EXLA - Acceleration via Google's [XLA project](https://www.tensorflow.org/xla)\n2. TorchX - Bindings to [LibTorch](https://pytorch.org/cppdocs/)\n\nAxon will respect the global and process-level Nx backend configuration. Compilers are covered more in-depth in the second half of this example. You can set the default backend using the following APIs:\n\n```elixir\n# Sets the global compilation options (for all Elixir processes)\nNx.global_default_backend(Torchx.Backend)\n# OR\nNx.global_default_backend(EXLA.Backend)\n\n# Sets the process-level compilation options (current process only)\nNx.default_backend(Torchx.Backend)\n# OR\nNx.default_backend(EXLA.Backend)\n```\n\nNow all tensors and operations on them will run on the configured backend:\n\n```elixir\n{inputs, _next_key} =\n Nx.Random.key(9999)\n |> Nx.Random.uniform(shape: {2, 128})\n\n{init_fn, predict_fn} = Axon.build(model)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n f32[2][1]\n [\n [1.0],\n [1.0]\n ]\n>\n```\n\nAs you swap backends above, you will get tensors allocated on different backends as results. You should be careful using multiple backends in the same project as attempting to mix tensors between backends may result in strange performance bugs or errors, as Nx will require you to explicitly convert between backends.\n\nWith most larger models, using a compiler will bring more performance benefits in addition to the backend.","ref":"accelerating_axon.html#using-nx-backends-in-axon","title":"Using Nx Backends in Axon - Accelerating Axon","type":"extras"},{"doc":"Axon is built entirely on top of Nx's numerical definitions `defn`. Functions declared with `defn` tell Nx to use *just-in-time compilation* to compile and execute the given numerical definition with an available Nx compiler. Numerical definitions enable acceleration on CPU/GPU/TPU via pluggable compilers. At the time of this writing, only EXLA supports a compiler in addition to its backend.\n\nWhen you call `Axon.build/2`, Axon can automatically mark your initialization and forward functions as JIT compiled functions. First let's make sure we are using the EXLA backend:\n\n```elixir\nNx.default_backend(EXLA.Backend)\n```\n\nAnd now let's build another model, this time passing the EXLA compiler as an option:\n\n```elixir\n{inputs, _next_key} =\n Nx.Random.key(9999)\n |> Nx.Random.uniform(shape: {2, 128})\n\n{init_fn, predict_fn} = Axon.build(model, compiler: EXLA)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n\n15:39:26.463 [info] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n\n15:39:26.473 [info] XLA service 0x7f3488329030 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n\n15:39:26.473 [info] StreamExecutor device (0): NVIDIA GeForce RTX 3050 Ti Laptop GPU, Compute Capability 8.6\n\n15:39:26.473 [info] Using BFC allocator.\n\n15:39:26.473 [info] XLA backend allocating 3605004288 bytes on device 0 for BFCAllocator.\n\n15:39:28.272 [info] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n\n```\n\n\n\n```\n#Nx.Tensor \n [\n [1.0],\n [1.0]\n ]\n>\n```\n\nYou can also instead JIT compile functions explicitly via the `Nx.Defn.jit` or compiler-specific JIT APIs. This is useful when running benchmarks against various backends:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model)\n\n# These will both JIT compile with EXLA\nexla_init_fn = Nx.Defn.jit(init_fn, compiler: EXLA)\nexla_predict_fn = EXLA.jit(predict_fn)\n```\n\n\n\n```\n#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>\n```\n\n```elixir\nBenchee.run(\n %{\n \"elixir init\" => fn -> init_fn.(inputs, %{}) end,\n \"exla init\" => fn -> exla_init_fn.(inputs, %{}) end\n },\n time: 10,\n memory_time: 5,\n warmup: 2\n)\n```\n\n\n\n```\nWarning: the benchmark elixir init is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nWarning: the benchmark exla init is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nOperating System: Linux\nCPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz\nNumber of Available Cores: 4\nAvailable memory: 24.95 GB\nElixir 1.13.4\nErlang 25.0.4\n\nBenchmark suite executing with the following configuration:\nwarmup: 2 s\ntime: 10 s\nmemory time: 5 s\nreduction time: 0 ns\nparallel: 1\ninputs: none specified\nEstimated total run time: 34 s\n\nBenchmarking elixir init ...\nBenchmarking exla init ...\n\nName ips average deviation median 99th %\nexla init 3.79 K 0.26 ms ±100.40% 0.24 ms 0.97 ms\nelixir init 0.52 K 1.91 ms ±35.03% 1.72 ms 3.72 ms\n\nComparison:\nexla init 3.79 K\nelixir init 0.52 K - 7.25x slower +1.65 ms\n\nMemory usage statistics:\n\nName Memory usage\nexla init 9.80 KB\nelixir init 644.63 KB - 65.80x memory usage +634.83 KB\n\n**All measurements for memory usage were the same**\n```\n\n```elixir\nBenchee.run(\n %{\n \"elixir predict\" => fn -> predict_fn.(params, inputs) end,\n \"exla predict\" => fn -> exla_predict_fn.(params, inputs) end\n },\n time: 10,\n memory_time: 5,\n warmup: 2\n)\n```\n\n\n\n```\nWarning: the benchmark elixir predict is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nWarning: the benchmark exla predict is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nOperating System: Linux\nCPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz\nNumber of Available Cores: 4\nAvailable memory: 24.95 GB\nElixir 1.13.4\nErlang 25.0.4\n\nBenchmark suite executing with the following configuration:\nwarmup: 2 s\ntime: 10 s\nmemory time: 5 s\nreduction time: 0 ns\nparallel: 1\ninputs: none specified\nEstimated total run time: 34 s\n\nBenchmarking elixir predict ...\nBenchmarking exla predict ...\n\nName ips average deviation median 99th %\nexla predict 2.32 K 0.43 ms ±147.05% 0.34 ms 1.61 ms\nelixir predict 0.28 K 3.53 ms ±42.21% 3.11 ms 7.26 ms\n\nComparison:\nexla predict 2.32 K\nelixir predict 0.28 K - 8.20x slower +3.10 ms\n\nMemory usage statistics:\n\nName Memory usage\nexla predict 10.95 KB\nelixir predict 91.09 KB - 8.32x memory usage +80.14 KB\n\n**All measurements for memory usage were the same**\n```\n\nNotice how calls to EXLA variants are significantly faster. These speedups become more pronounced with more complex models and workflows.\n\n\n\nIt's important to note that in order to use a given library as an Nx compiler, it must implement the Nx compilation behaviour. For example, you cannot invoke Torchx as an Nx compiler because it does not support JIT compilation at this time.","ref":"accelerating_axon.html#using-nx-compilers-in-axon","title":"Using Nx Compilers in Axon - Accelerating Axon","type":"extras"},{"doc":"While Nx mostly tries to standardize behavior across compilers and backends, some behaviors are backend-specific. For example, the API for choosing an acceleration platform (e.g. CUDA/ROCm/TPU) is backend-specific. You should refer to your chosen compiler or backend's documentation for information on targeting various accelerators. Typically, you only need to change a few configuration options and your code will run as-is on a chosen accelerator.","ref":"accelerating_axon.html#a-note-on-cpus-gpus-tpus","title":"A Note on CPUs/GPUs/TPUs - Accelerating Axon","type":"extras"},{"doc":"# Training and inference mode\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"training_and_inference_mode.html","title":"Training and inference mode","type":"extras"},{"doc":"Some layers have different considerations and behavior when running during model training versus model inference. For example *dropout layers* are intended only to be used during training as a form of model regularization. Certain stateful layers like *batch normalization* keep a running-internal state which changes during training mode but remains fixed during inference mode. Axon supports mode-dependent execution behavior via the `:mode` option passed to all building, compilation, and execution methods. By default, all models build in inference mode. You can see this behavior by adding a dropout layer with a dropout rate of 1. In inference mode this layer will have no affect:\n\n```elixir\ninputs = Nx.iota({2, 8}, type: :f32)\n\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(4)\n |> Axon.sigmoid()\n |> Axon.dropout(rate: 0.99)\n |> Axon.dense(1)\n\n{init_fn, predict_fn} = Axon.build(model)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nYou can also explicitly specify the mode:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model, mode: :inference)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nIt's important that you know which mode your model's were compiled for, as running a model built in `:inference` mode will behave drastically different than a model built in `:train` mode.","ref":"training_and_inference_mode.html#executing-models-in-inference-mode","title":"Executing models in inference mode - Training and inference mode","type":"extras"},{"doc":"By specifying `mode: :train`, you tell your models to execute in training mode. You can see the effects of this behavior here:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n%{\n prediction: #Nx.Tensor ,\n state: %{\n \"dropout_0\" => %{\n \"key\" => #Nx.Tensor \n }\n }\n}\n```\n\nFirst, notice that your model now returns a map with keys `:prediction` and `:state`. `:prediction` contains the actual model prediction, while `:state` contains the updated state for any stateful layers such as batch norm. When writing custom training loops, you should extract `:state` and use it in conjunction with the updates API to ensure your stateful layers are updated correctly. If your model has stateful layers, `:state` will look similar to your model's parameter map:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(4)\n |> Axon.sigmoid()\n |> Axon.batch_norm()\n |> Axon.dense(1)\n\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n%{\n prediction: #Nx.Tensor ,\n state: %{\n \"batch_norm_0\" => %{\n \"mean\" => #Nx.Tensor ,\n \"var\" => #Nx.Tensor \n }\n }\n}\n```","ref":"training_and_inference_mode.html#executing-models-in-training-mode","title":"Executing models in training mode - Training and inference mode","type":"extras"},{"doc":"# Your first training loop\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"your_first_training_loop.html","title":"Your first training loop","type":"extras"},{"doc":"Axon generalizes the concept of training, evaluation, hyperparameter optimization, and more into the `Axon.Loop` API. Axon loops are a instrumented reductions over Elixir Streams - that basically means you can accumulate some state over an Elixir `Stream` and control different points in the loop execution.\n\nWith Axon, you'll most commonly implement and work with supervised training loops. Because supervised training loops are so common in deep learning, Axon has a loop factory function which takes care of most of the boilerplate of creating a supervised training loop for you. In the beginning of your deep learning journey, you'll almost exclusively use Axon's loop factories to create and run loops.\n\nAxon's supervised training loop assumes you have an input stream of data with entries that look like:\n\n`{batch_inputs, batch_labels}`\n\nEach entry is a batch of input data with a corresponding batch of labels. You can simulate some real training data by constructing an Elixir stream:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n```\n\n\n\n```\n#Function<51.6935098/2 in Stream.repeatedly/1>\n```\n\nThe most basic supervised training loop in Axon requires 3 things:\n\n1. An Axon model\n2. A loss function\n3. An optimizer\n\nYou can construct an Axon model using the knowledge you've gained from going through the model creation guides:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n```\n\n\n\n```\n#Axon \n```\n\nAxon comes with built-in loss functions and optimizers which you can use directly when constructing your training loop. To construct your training loop, you use `Axon.Loop.trainer/3`:\n\n```elixir\nloop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nYou'll notice that `Axon.Loop.trainer/3` returns an `%Axon.Loop{}` data structure. This data structure contains information which Axon uses to control the execution of the loop. In order to run the loop, you need to explicitly pass it to `Axon.Loop.run/4`:\n\n```elixir\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0563023\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\n`Axon.Loop.run/4` expects a loop to execute, some data to loop over, and any initial state you explicitly want your loop to start with. `Axon.Loop.run/4` will then iterate over your data, executing a step function on each batch, and accumulating some generic loop state. In the case of a supervised training loop, this generic loop state actually represents training state including your model's trained parameters.\n\n`Axon.Loop.run/4` also accepts options which control the loops execution. This includes `:iterations` which controls the number of iterations per epoch a loop should execute for, and `:epochs` which controls the number of epochs a loop should execute for:\n\n```elixir\nAxon.Loop.run(loop, train_data, %{}, epochs: 3, iterations: 500)\n```\n\n\n\n```\nEpoch: 0, Batch: 450, loss: 0.0935063\nEpoch: 1, Batch: 450, loss: 0.0576384\nEpoch: 2, Batch: 450, loss: 0.0428323\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou may have noticed that by default `Axon.Loop.trainer/3` configures your loop to log information about training progress every 50 iterations. You can control this when constructing your supervised training loop with the `:log` option:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd, log: 100)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 900, loss: 0.1492715\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"your_first_training_loop.html#creating-an-axon-training-loop","title":"Creating an Axon training loop - Your first training loop","type":"extras"},{"doc":"# Instrumenting loops with metrics\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"instrumenting_loops_with_metrics.html","title":"Instrumenting loops with metrics","type":"extras"},{"doc":"Often times when executing a loop you want to keep track of various metrics such as accuracy or precision. For training loops, Axon by default only tracks loss; however, you can instrument the loop with additional built-in metrics. For example, you might want to track mean-absolute error on top of a mean-squared error loss:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(:mean_absolute_error)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},\n \"mean_absolute_error\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n :mean_absolute_error}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nWhen specifying a metric, you can specify an atom which maps to any of the metrics defined in `Axon.Metrics`. You can also define custom metrics. For more information on custom metrics, see [Writing custom metrics](writing_custom_metrics.livemd).\n\nWhen you run a loop with metrics, Axon will aggregate that metric over the course of the loop execution. For training loops, Axon will also report the aggregate metric in the training logs:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0590630 mean_absolute_error: 0.1463431\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nBy default, the metric will have a name which matches the string form of the given metric. You can give metrics semantic meaning by providing an explicit name:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.metric(:mean_absolute_error, \"model error\")\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0607362 model error: 0.1516546\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nAxon's default aggregation behavior is to aggregate metrics with a running average; however, you can customize this behavior by specifying an explicit accumulation function. Built-in accumulation functions are `:running_average` and `:running_sum`:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.metric(:mean_absolute_error, \"total error\", :running_sum)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0688004 total error: 151.4876404\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"instrumenting_loops_with_metrics.html#adding-metrics-to-training-loops","title":"Adding metrics to training loops - Instrumenting loops with metrics","type":"extras"},{"doc":"# Your first evaluation loop\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"your_first_evaluation_loop.html","title":"Your first evaluation loop","type":"extras"},{"doc":"Once you have a trained model, it's necessary to test the trained model on some test data. Axon's loop abstraction is general enough to work for both training and evaluating models. Just as Axon implements a canned `Axon.Loop.trainer/3` factory, it also implements a canned `Axon.Loop.evaluator/1` factory.\n\n`Axon.Loop.evaluator/1` creates an evaluation loop which you can instrument with metrics to measure the performance of a trained model on test data. First, you need a trained model:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\ntrain_loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)\n\ndata =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\ntrained_model_state = Axon.Loop.run(train_loop, data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.1285532\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nRunning loops with `Axon.Loop.trainer/3` returns a trained model state which you can use to evaluate your model. To construct an evaluation loop, you just call `Axon.Loop.evaluator/1` with your pre-trained model:\n\n```elixir\ntest_loop = Axon.Loop.evaluator(model)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nNext, you'll need to instrument your test loop with the metrics you'd like to aggregate:\n\n```elixir\ntest_loop = test_loop |> Axon.Loop.metric(:mean_absolute_error)\n```\n\n\n\n```\n#Axon.Loop ,\n :mean_absolute_error}\n },\n handlers: %{\n completed: [],\n epoch_completed: [],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nFinally, you can run your loop on test data. Because you want to test your trained model, you need to provide your model's initial state to the test loop:\n\n```elixir\nAxon.Loop.run(test_loop, data, trained_model_state, iterations: 1000)\n```\n\n\n\n```\nBatch: 999, mean_absolute_error: 0.0856894\n```\n\n\n\n```\n%{\n 0 => %{\n \"mean_absolute_error\" => #Nx.Tensor \n }\n}\n```","ref":"your_first_evaluation_loop.html#creating-an-axon-evaluation-loop","title":"Creating an Axon evaluation loop - Your first evaluation loop","type":"extras"},{"doc":"# Using loop event handlers\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"using_loop_event_handlers.html","title":"Using loop event handlers","type":"extras"},{"doc":"Often times you want more fine-grained control over things that happen during loop execution. For example, you might want to save loop state to a file every 500 iterations, or log some output to `:stdout` at the end of every epoch. Axon loops allow more fine-grained control via events and event handlers.\n\nAxon fires a number of events during loop execution which allow you to instrument various points in the loop execution cycle. You can attach event handlers to any of these events:\n\n\n\n```elixir\nevents = [\n :started, # After loop state initialization\n :epoch_started, # On epoch start\n :iteration_started, # On iteration start\n :iteration_completed, # On iteration complete\n :epoch_completed, # On epoch complete\n :epoch_halted, # On epoch halt, if early halted\n :halted, # On loop halt, if early halted\n :completed # On loop completion\n]\n```\n\nAxon packages a number of common loop event handlers for you out of the box. These handlers should cover most of the common event handlers you would need to write in practice. Axon also allows for custom event handlers. See [Writing custom event handlers](writing_custom_event_handlers.livemd) for more information.\n\nAn event handler will take the current loop state at the time of the fired event, and alter or use it in someway before returning control back to the main loop execution. You can attach any of Axon's pre-packaged event handlers to a loop by using the function directly. For example, if you want to checkpoint loop state at the end of every epoch, you can use `Axon.Loop.checkpoint/2`:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.checkpoint(event: :epoch_completed)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<17.37390314/1 in Axon.Loop.checkpoint/2>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nNow when you execute your loop, it will save a checkpoint at the end of every epoch:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.5345965\nEpoch: 1, Batch: 50, loss: 0.4578816\nEpoch: 2, Batch: 50, loss: 0.4527244\nEpoch: 3, Batch: 50, loss: 0.4466343\nEpoch: 4, Batch: 50, loss: 0.4401709\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou can also use event handlers for things as simple as implementing custom logging with the pre-packaged `Axon.Loop.log/4` event handler:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.log(fn _state -> \"epoch is over\\n\" end, event: :epoch_completed, device: :stdio)\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.3220241\nepoch is over\nEpoch: 1, Batch: 50, loss: 0.2309804\nepoch is over\nEpoch: 2, Batch: 50, loss: 0.1759415\nepoch is over\nEpoch: 3, Batch: 50, loss: 0.1457551\nepoch is over\nEpoch: 4, Batch: 50, loss: 0.1247821\nepoch is over\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nFor even more fine-grained control over when event handlers fire, you can add filters. For example, if you only want to checkpoint loop state every 2 epochs, you can use a filter:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.checkpoint(event: :epoch_completed, filter: [every: 2])\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.3180207\nEpoch: 1, Batch: 50, loss: 0.1975918\nEpoch: 2, Batch: 50, loss: 0.1353940\nEpoch: 3, Batch: 50, loss: 0.1055405\nEpoch: 4, Batch: 50, loss: 0.0890203\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nAxon event handlers support both keyword and function filters. Keyword filters include keywords such as `:every`, `:once`, and `:always`. Function filters are arity-1 functions which accept the current loop state and return a boolean.","ref":"using_loop_event_handlers.html#adding-event-handlers-to-training-loops","title":"Adding event handlers to training loops - Using loop event handlers","type":"extras"},{"doc":"\n\n# Custom models, loss functions, and optimizers\n\n```elixir\nMix.install([\n {:axon, github: \"elixir-nx/axon\"},\n {:nx, \"~> 0.3.0\", github: \"elixir-nx/nx\", sparse: \"nx\", override: true}\n])\n```\n\n\n\n```\n:ok\n```","ref":"custom_models_loss_optimizers.html","title":"Custom models, loss functions, and optimizers","type":"extras"},{"doc":"In the [Your first training loop](your_first_training_loop.livemd), you learned how to declare a supervised training loop using `Axon.Loop.trainer/3` with a model, loss function, and optimizer. Your overall model and loop declaration looked something like this:\n\n\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)\n```\n\nThis example uses an `%Axon{}` struct to represent your `model` to train, and atoms to represent your loss function and optimizer. Some of your problems will require a bit more flexibility than this example affords. Fortunately, `Axon.Loop.trainer/3` is designed for flexibility.\n\nFor example, if your model cannot be cleanly represented as an `%Axon{}` model, you can instead opt instead to define custom initialization and forward functions to pass to `Axon.Loop.trainer/3`. Actually, `Axon.Loop.trainer/3` is doing this for you under the hood - the ability to pass an `%Axon{}` struct directly is just a convenience:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nlowered_model = {init_fn, predict_fn} = Axon.build(model)\n\nloop = Axon.Loop.trainer(lowered_model, :mean_squared_error, :sgd)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<23.20267452/1 in Axon.Loop.log/5>,\n #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n metrics: %{\n \"loss\" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,\n #Function<6.20267452/2 in Axon.Loop.build_loss_fn/1>}\n },\n ...\n>\n```\n\nNotice that `Axon.Loop.trainer/3` handles the \"lowered\" form of an Axon model without issue. When you pass an `%Axon{}` struct, the trainer factory converts it to a lowered representation for you. With this construct, you can build custom models entirely with Nx `defn`, or readily mix your Axon models into custom workflows without worrying about compatibility with the `Axon.Loop` API:\n\n```elixir\ndefmodule CustomModel do\n import Nx.Defn\n\n defn custom_predict_fn(model_predict_fn, params, input) do\n %{prediction: preds} = out = model_predict_fn.(params, input)\n %{out | prediction: Nx.cos(preds)}\n end\nend\n```\n\n\n\n```\n{:module, CustomModel, <<70, 79, 82, 49, 0, 0, 9, ...>>, {:custom_predict_fn, 3}}\n```\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n xs = Nx.random_normal({8, 1})\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\ncustom_predict_fn = &CustomModel.custom_predict_fn(predict_fn, &1, &2)\n\nloop = Axon.Loop.trainer({init_fn, custom_predict_fn}, :mean_squared_error, :sgd)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 500)\n```\n\n\n\n```\nEpoch: 0, Batch: 500, loss: 0.3053460\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"custom_models_loss_optimizers.html#using-custom-models-in-training-loops","title":"Using custom models in training loops - Custom models, loss functions, and optimizers","type":"extras"},{"doc":"Just as `Axon.Loop.trainer/3` allows more flexibility with models, it also supports more flexible loss functions. In most cases, you can get away with using one of Axon's built-in loss functions by specifying an atom. Atoms map directly to a loss-function defined in `Axon.Losses`. Under the hood, `Axon.Loop.trainer/3` is doing something like:\n\n\n\n```elixir\nloss_fn = &apply(Axon.Losses, loss_atom, [&1, &2])\n```\n\nRather than pass an atom, you can pass your own custom arity-2 function to `Axon.Loop.trainer/3`. This arises most often in cases where you want to control some parameters of the loss function, such as the batch-level reduction:\n\n```elixir\nloss_fn = &Axon.Losses.mean_squared_error(&1, &2, reduction: :sum)\n\nloop = Axon.Loop.trainer(model, loss_fn, :sgd)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<23.20267452/1 in Axon.Loop.log/5>,\n #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n metrics: %{\n \"loss\" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,\n #Function<41.3316493/2 in :erl_eval.expr/6>}\n },\n ...\n>\n```\n\nYou can also define your own custom loss functions, so long as they match the following spec:\n\n\n\n```elixir\nloss(\n y_true :: tensor[batch, ...] | container(tensor),\n y_preds :: tensor[batch, ...] | container(tensor)\n ) :: scalar\n```\n\nThis is useful for constructing loss functions when dealing with multi-output scenarios. For example, it's very easy to construct a custom loss function which is a weighted average of several loss functions on multiple inputs:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n xs = Nx.random_normal({8, 1})\n y1 = Nx.sin(xs)\n y2 = Nx.cos(xs)\n {xs, {y1, y2}}\n end)\n\nshared =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n\ny1 = Axon.dense(shared, 1)\ny2 = Axon.dense(shared, 1)\n\nmodel = Axon.container({y1, y2})\n\ncustom_loss_fn = fn {y_true1, y_true2}, {y_pred1, y_pred2} ->\n loss1 = Axon.Losses.mean_squared_error(y_true1, y_pred1, reduction: :mean)\n loss2 = Axon.Losses.mean_squared_error(y_true2, y_pred2, reduction: :mean)\n\n loss1\n |> Nx.multiply(0.4)\n |> Nx.add(Nx.multiply(loss2, 0.6))\nend\n\nmodel\n|> Axon.Loop.trainer(custom_loss_fn, :sgd)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 1000, loss: 0.1098235\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_3\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"custom_models_loss_optimizers.html#using-custom-loss-functions-in-training-loops","title":"Using custom loss functions in training loops - Custom models, loss functions, and optimizers","type":"extras"},{"doc":"As you might expect, it's also possible to customize the optimizer passed to `Axon.Loop.trainer/3`. If you read the `Polaris.Updates` documentation, you'll learn that optimizers are actually represented as the tuple `{init_fn, update_fn}` where `init_fn` initializes optimizer state from model state and `update_fn` scales gradients from optimizer state, gradients, and model state.\n\nYou likely won't have to implement a custom optimizer; however, you should know how to construct optimizers with different hyperparameters and how to apply different modifiers to different optimizers to customize the optimization process.\n\nWhen you specify an optimizer as an atom in `Axon.Loop.trainer/3`, it maps directly to an optimizer declared in `Polaris.Optimizers`. You can instead opt to declare your optimizer directly. This is most useful for controlling things like the learning rate and various optimizer hyperparameters:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n xs = Nx.random_normal({8, 1})\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\noptimizer = {_init_optimizer_fn, _update_fn} = Polaris.Optimizers.sgd(learning_rate: 1.0e-3)\n\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, optimizer)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 1000, loss: 0.0992607\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"custom_models_loss_optimizers.html#using-custom-optimizers-in-training-loops","title":"Using custom optimizers in training loops - Custom models, loss functions, and optimizers","type":"extras"},{"doc":"# Writing custom metrics\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"writing_custom_metrics.html","title":"Writing custom metrics","type":"extras"},{"doc":"When passing an atom to `Axon.Loop.metric/5`, Axon dispatches the function to a built-in function in `Axon.Metrics`. If you find you'd like to use a metric that does not exist in `Axon.Metrics`, you can define a custom function:\n\n```elixir\ndefmodule CustomMetric do\n import Nx.Defn\n\n defn my_weird_metric(y_true, y_pred) do\n Nx.atan2(y_true, y_pred) |> Nx.sum()\n end\nend\n```\n\n\n\n```\n{:module, CustomMetric, <<70, 79, 82, 49, 0, 0, 8, ...>>, true}\n```\n\nThen you can pass that directly to `Axon.Loop.metric/5`. You must provide a name for your custom metric:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(&CustomMetric.my_weird_metric/2, \"my weird metric\")\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},\n \"my weird metric\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n &CustomMetric.my_weird_metric/2}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nThen when running, Axon will invoke your custom metric function and accumulate it with the given aggregator:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0681635 my weird metric: -5.2842808\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nWhile the metric defaults are designed with supervised training loops in mind, they can be used for much more flexible purposes. By default, metrics look for the fields `:y_true` and `:y_pred` in the given loop's step state. They then apply the given metric function on those inputs. You can also define metrics which work on other fields. For example you can track the running average of a given parameter with a metric just by defining a custom output transform:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\noutput_transform = fn %{model_state: model_state} ->\n [model_state[\"dense_0\"][\"kernel\"]]\nend\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(&Nx.mean/1, \"dense_0_kernel_mean\", :running_average, output_transform)\n |> Axon.Loop.metric(&Nx.variance/1, \"dense_0_kernel_var\", :running_average, output_transform)\n```\n\n\n\n```\n#Axon.Loop ,\n &Nx.mean/1},\n \"dense_0_kernel_var\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n &Nx.variance/1},\n \"loss\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nAxon will apply your custom output transform to the loop's step state and forward the result to your custom metric function:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, dense_0_kernel_mean: -0.1978206 dense_0_kernel_var: 0.2699870 loss: 0.0605523\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou can also define custom accumulation functions. Axon has definitions for computing running averages and running sums; however, you might find you need something like an exponential moving average:\n\n```elixir\ndefmodule CustomAccumulator do\n import Nx.Defn\n\n defn running_ema(acc, obs, _i, opts \\\\ []) do\n opts = keyword!(opts, alpha: 0.9)\n obs * opts[:alpha] + acc * (1 - opts[:alpha])\n end\nend\n```\n\n\n\n```\n{:module, CustomAccumulator, <<70, 79, 82, 49, 0, 0, 11, ...>>, true}\n```\n\nYour accumulator must be an arity-3 function which accepts the current accumulated value, the current observation, and the current iteration and returns the aggregated metric. You can pass a function direct as an accumulator in your metric:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\noutput_transform = fn %{model_state: model_state} ->\n [model_state[\"dense_0\"][\"kernel\"]]\nend\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(\n &Nx.mean/1,\n \"dense_0_kernel_ema_mean\",\n &CustomAccumulator.running_ema/3,\n output_transform\n )\n```\n\n\n\n```\n#Axon.Loop ,\n &Nx.mean/1},\n \"loss\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nThen when you run the loop, Axon will use your custom accumulator:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, dense_0_kernel_ema_mean: -0.0139760 loss: 0.0682910\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"writing_custom_metrics.html#writing-custom-metrics","title":"Writing custom metrics - Writing custom metrics","type":"extras"},{"doc":"# Writing custom event handlers\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"writing_custom_event_handlers.html","title":"Writing custom event handlers","type":"extras"},{"doc":"If you require functionality not offered by any of Axon's built-in event handlers, then you'll need to write a custom event handler. Custom event handlers are functions which accept loop state, perform some action, and then defer execution back to the main loop. For example, you can write custom loop handlers which visualize model outputs, communicate with an external Kino process, or simply halt the loop based on some criteria.\n\nAll event handlers must accept an `%Axon.Loop.State{}` struct and return a tuple of `{control_term, state}` where `control_term` is one of `:continue`, `:halt_epoch`, or `:halt_loop` and `state` is the updated loop state:\n\n```elixir\ndefmodule CustomEventHandler0 do\n alias Axon.Loop.State\n\n def my_weird_handler(%State{} = state) do\n IO.puts(\"My weird handler: fired\")\n {:continue, state}\n end\nend\n```\n\n\n\n```\n{:module, CustomEventHandler0, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:my_weird_handler, 1}}\n```\n\nTo register event handlers, you use `Axon.Loop.handle/4`:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler0.my_weird_handler/1)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {&CustomEventHandler0.my_weird_handler/1,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nAxon will trigger your custom handler to run on the attached event:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.0990703\nMy weird handler: fired\nEpoch: 1, Batch: 50, loss: 0.0567622\nMy weird handler: fired\nEpoch: 2, Batch: 50, loss: 0.0492784\nMy weird handler: fired\nEpoch: 3, Batch: 50, loss: 0.0462587\nMy weird handler: fired\nEpoch: 4, Batch: 50, loss: 0.0452806\nMy weird handler: fired\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou can use event handlers to early-stop a loop or loop epoch by returning a `:halt_*` control term. Halt control terms can be one of `:halt_epoch` or `:halt_loop`. `:halt_epoch` halts the current epoch and continues to the next. `:halt_loop` halts the loop altogether.\n\n```elixir\ndefmodule CustomEventHandler1 do\n alias Axon.Loop.State\n\n def always_halts(%State{} = state) do\n IO.puts(\"stopping loop\")\n {:halt_loop, state}\n end\nend\n```\n\n\n\n```\n{:module, CustomEventHandler1, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:always_halts, 1}}\n```\n\nThe loop will immediately stop executing and return the current state at the time it was halted:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler1.always_halts/1)\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.2201974\nstopping loop\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nNote that halting an epoch will fire a different event than completing an epoch. So if you implement a custom handler to halt the loop when an epoch completes, it will never fire if the epoch always halts prematurely:\n\n```elixir\ndefmodule CustomEventHandler2 do\n alias Axon.Loop.State\n\n def always_halts_epoch(%State{} = state) do\n IO.puts(\"\\nstopping epoch\")\n {:halt_epoch, state}\n end\n\n def always_halts_loop(%State{} = state) do\n IO.puts(\"stopping loop\\n\")\n {:halt_loop, state}\n end\nend\n```\n\n\n\n```\n{:module, CustomEventHandler2, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:always_halts_loop, 1}}\n```\n\nIf you run these handlers in conjunction, the loop will not terminate prematurely:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.handle_event(:iteration_completed, &CustomEventHandler2.always_halts_epoch/1)\n|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler2.always_halts_loop/1)\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 0, loss: 0.0000000\nstopping epoch\n\nstopping epoch\n\nstopping epoch\n\nstopping epoch\n\nstopping epoch\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou may access and update any portion of the loop state. Keep in mind that event handlers are **not** JIT-compiled, so you should be certain to manually JIT-compile any long-running or expensive operations.","ref":"writing_custom_event_handlers.html#writing-custom-event-handlers","title":"Writing custom event handlers - Writing custom event handlers","type":"extras"},{"doc":"# Converting ONNX models to Axon\n\n```elixir\nMix.install(\n [\n {:axon, \">= 0.5.0\"},\n {:exla, \">= 0.5.0\"},\n {:axon_onnx, \">= 0.4.0\"},\n {:stb_image, \">= 0.6.0\"},\n {:kino, \">= 0.9.0\"},\n {:req, \">= 0.3.8\"}\n ]\n # for Nvidia GPU change to \"cuda111\" for CUDA 11.1+ or \"cuda118\" for CUDA 11.8\n # CUDA 12.x not supported by XLA\n # or you can put this value in ENV variables in Livebook settings\n # XLA_TARGET=cuda111\n # system_env: %{\"XLA_TARGET\" => xla_target}\n)\n```","ref":"onnx_to_axon.html","title":"Converting ONNX models to Axon","type":"extras"},{"doc":"Axon is a new machine learning capability, specific to Elixir. We would like to take\nadvantage of a large amount of models that have been written in other languages and\nmachine learning frameworks. Let's take a look at how we could use a model developed\nin another language.\n\nConverting models developed by data scientists into a production capable implementation is a\nchallenge for all languages and frameworks. [ONNX](https://onnx.ai/) is an interchange\nformat that allows models written in one language or framework to be converted into\nanother language and framework.\n\nThe source model must use constructs mapped into ONNX. Also, the destination framework must\nsupport the model's ONNX constructs. From an Elixir focus, we are interested in ONNX models\nthat [axon_onnx](https://github.com/elixir-nx/axon_onnx) can convert into Axon models.\n\n\n\n#","ref":"onnx_to_axon.html#converting-an-onnx-model-into-axon","title":"Converting an ONNX model into Axon - Converting ONNX models to Axon","type":"extras"},{"doc":"\n\nElixir can get access to thousands of public models and your organization may have private models\nwritten in other languages and frameworks. Axon will be hard pressed to quickly repeat the\ncountless person-hours spent on developing models in other languages like Tensorflow and PyTorch.\nHowever, if the model can be converted into ONNX and then into Axon, we can directly run the model\nin Elixir.\n\n\n\n#","ref":"onnx_to_axon.html#why-is-onnx-important-to-axon","title":"Why is ONNX important to Axon? - Converting ONNX models to Axon","type":"extras"},{"doc":"\n\nAxon runs on top of [Nx (Numerical Elixir)](https://hexdocs.pm/nx). Nx has backends for\nboth Google's XLA (via EXLA) and PyTorch (via Torchx). In this guide, we will use EXLA.\nWe'll also convert from an ONNX model into an Axon model using\n[`axon_onnx`](https://github.com/elixir-nx/axon_onnx).\n\nYou can find all dependencies in the installation cell at the top of the notebook.\nIn there, you will also find the `XLA_TARGET` environment variable which you can set\nto \"cuda111\" or \"rocm\" if you have any of those GPUs available. Let's also configure\nNx to store tensors in EXLA by default:\n\n```elixir\n# Nx.default_backend(EXLA.Backend)\n```\n\nWe'll also need local access to ONNX files. For this notebook, the models/onnx folder\ncontains the ONNX model file. This notebook assumes the output file location will be\nin models axon. Copy your ONNX model files into the models/onnx folder.\n\nThis opinionated module presents a simple API for loading in an ONNX file and saving\nthe converted Axon model in the provided directory. This API will allow us to\nsave multiple models pretty quickly.\n\n```elixir\ndefmodule OnnxToAxon do\n @moduledoc \"\"\"\n Helper module from ONNX to Axon.\n \"\"\"\n\n @doc \"\"\"\n Loads an ONNX model into Axon and saves the model","ref":"onnx_to_axon.html#setting-up-our-environment","title":"Setting up our environment - Converting ONNX models to Axon","type":"extras"},{"doc":"OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)\n\n \"\"\"\n def onnx_axon(path_to_onnx_file, path_to_axon_dir) do\n axon_name = axon_name_from_onnx_path(path_to_onnx_file)\n path_to_axon = Path.join(path_to_axon_dir, axon_name)\n\n {model, parameters} = AxonOnnx.import(path_to_onnx_file)\n model_bytes = Axon.serialize(model, parameters)\n File.write!(path_to_axon, model_bytes)\n end\n\n defp axon_name_from_onnx_path(onnx_path) do\n model_root = onnx_path |> Path.basename() |> Path.rootname()\n \"#{model_root}.axon\"\n end\nend\n```","ref":"onnx_to_axon.html#examples","title":"Examples - Converting ONNX models to Axon","type":"extras"},{"doc":"For this example, we'll use a couple ONNX models that have been saved in the Huggingface Hub.\n\n\n\nThe ONNX models were trained in Fast.ai (PyTorch) using the following notebooks:\n\n* https://github.com/meanderingstream/fastai_course22/blob/main/saving-a-basic-fastai-model-in-onnx.ipynb\n* https://github.com/meanderingstream/fastai_course22/blob/main/saving-cat-dog-breed-fastai-model-in-onnx.ipynb\n\nTo repeat this notebook, the onnx files for this notebook can be found on huggingface hub. Download the onnx models from:\n\n* https://huggingface.co/ScottMueller/Cats_v_Dogs.ONNX\n* https://huggingface.co/ScottMueller/Cat_Dog_Breeds.ONNX\n\nDownload the files and place them in a directory of your choice. By default, we will assume you downloaded them to the same directory as the notebook:\n\n```elixir\nFile.cd!(__DIR__)\n```\n\nNow let's convert an ONNX model into Axon\n\n```elixir\npath_to_onnx_file = \"cats_v_dogs.onnx\"\npath_to_axon_dir = \".\"\nOnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)\n```\n\n```elixir\npath_to_onnx_file = \"cat_dog_breeds.onnx\"\npath_to_axon_dir = \".\"\nOnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)\n```","ref":"onnx_to_axon.html#onnx-model","title":"ONNX model - Converting ONNX models to Axon","type":"extras"},{"doc":"To run inference on the model, you'll need 10 images focused on cats or dogs. You can download the images used in training the model at:\n\n\"https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet.tgz\"\n\nOr you can find or use your own images. In this notebook, we are going to use the local copies of the Oxford Pets dataset that was used in training the model.\n\n\n\nLet's load the Axon model.\n\n```elixir\ncats_v_dogs = File.read!(\"cats_v_dogs.axon\")\n{cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)\n```\n\nWe need a tensor representation of an image. Let's start by looking at samples of\nour data.\n\n```elixir\nFile.read!(\"oxford-iiit-pet/images/havanese_71.jpg\")\n|> Kino.Image.new(:jpeg)\n```\n\nTo manipulate the images, we will use the `StbImage` library:\n\n```elixir\n{:ok, img} = StbImage.read_file(\"oxford-iiit-pet/images/havanese_71.jpg\")\n%StbImage{data: binary, shape: shape, type: type} = StbImage.resize(img, 224, 224)\n```\n\nNow let's work on a batch of images and convert them to tensors. Here are the images we will work with:\n\n```elixir\nfile_names = [\n \"havanese_71.jpg\",\n \"yorkshire_terrier_9.jpg\",\n \"Sphynx_206.jpg\",\n \"Siamese_95.jpg\",\n \"Egyptian_Mau_63.jpg\",\n \"keeshond_175.jpg\",\n \"samoyed_88.jpg\",\n \"British_Shorthair_122.jpg\",\n \"Russian_Blue_20.jpg\",\n \"boxer_99.jpg\"\n]\n```\n\nNext we resize the images:\n\n```elixir\nresized_images =\n Enum.map(file_names, fn file_name ->\n (\"oxford-iiit-pet/images/\" <> file_name)\n |> IO.inspect(label: file_name)\n |> StbImage.read_file!()\n |> StbImage.resize(224, 224)\n end)\n```\n\nAnd finally convert them into tensors by using `StbImage.to_nx/1`. The created tensor will have three axes, named `:height`, `:width`, and `:channel` respectively. Our goal is to stack the tensors, then normalize and transpose their axes to the order expected by the neural network:\n\n```elixir\nimg_tensors =\n resized_images\n |> Enum.map(&StbImage.to_nx/1)\n |> Nx.stack(name: :index)\n |> Nx.divide(255.0)\n |> Nx.transpose(axes: [:index, :channels, :height, :width])\n```\n\nWith our input data, it is finally time to work on predictions. First let's define a helper module:\n\n```elixir\ndefmodule Predictions do\n @doc \"\"\"\n When provided a Tensor of single label predictions, returns the best vocabulary match for\n each row in the prediction tensor.","ref":"onnx_to_axon.html#inference-on-onnx-derived-models","title":"Inference on ONNX derived models - Converting ONNX models to Axon","type":"extras"},{"doc":"# iex> Predictions.sindle_label_prediction(path_to_onnx_file, path_to_axon_dir)\n # [\"dog\", \"cat\", \"dog\"]\n\n \"\"\"\n def single_label_classification(predictions_batch, vocabulary) do\n IO.inspect(Nx.shape(predictions_batch), label: \"predictions batch shape\")\n\n for prediction_tensor <- Nx.to_batched(predictions_batch, 1) do\n {_prediction_value, prediction_label} =\n prediction_tensor\n |> Nx.to_flat_list()\n |> Enum.zip(vocabulary)\n |> Enum.max()\n\n prediction_label\n end\n end\nend\n```\n\nNow we deserialize the model\n\n```elixir\n{cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)\n```\n\nrun a prediction using the `EXLA` compiler for performance\n\n```elixir\ntensor_of_predictions =\n Axon.predict(cats_v_dogs_model, cats_v_dogs_params, img_tensors, compiler: EXLA)\n```\n\nand finally retrieve the predicted label\n\n```elixir\ndog_cat_vocabulary = [\n \"dog\",\n \"cat\"\n]\n\nPredictions.single_label_classification(tensor_of_predictions, dog_cat_vocabulary)\n```\n\nLet's repeat the above process for the dog and cat breed model.\n\n```elixir\ncat_dog_vocabulary = [\n \"abyssinian\",\n \"american_bulldog\",\n \"american_pit_bull_terrier\",\n \"basset_hound\",\n \"beagle\",\n \"bengal\",\n \"birman\",\n \"bombay\",\n \"boxer\",\n \"british_shorthair\",\n \"chihuahua\",\n \"egyptian_mau\",\n \"english_cocker_spaniel\",\n \"english_setter\",\n \"german_shorthaired\",\n \"great_pyrenees\",\n \"havanese\",\n \"japanese_chin\",\n \"keeshond\",\n \"leonberger\",\n \"maine_coon\",\n \"miniature_pinscher\",\n \"newfoundland\",\n \"persian\",\n \"pomeranian\",\n \"pug\",\n \"ragdoll\",\n \"russian_blue\",\n \"saint_bernard\",\n \"samoyed\",\n \"scottish_terrier\",\n \"shiba_inu\",\n \"siamese\",\n \"sphynx\",\n \"staffordshire_bull_terrier\",\n \"wheaten_terrier\",\n \"yorkshire_terrier\"\n]\n```\n\n```elixir\ncat_dog_breeds = File.read!(\"cat_dog_breeds.axon\")\n{cat_dog_breeds_model, cat_dog_breeds_params} = Axon.deserialize(cat_dog_breeds)\n```\n\n```elixir\nAxon.predict(cat_dog_breeds_model, cat_dog_breeds_params, img_tensors)\n|> Predictions.single_label_classification(cat_dog_vocabulary)\n```\n\nFor cat and dog breeds, the model performed pretty well, but it was not perfect.","ref":"onnx_to_axon.html#examples","title":"Examples - Converting ONNX models to Axon","type":"extras"},{"doc":"# Modeling XOR with a neural network\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:kino_vega_lite, \"~> 0.1.6\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\n\nalias VegaLite, as: Vl\n```","ref":"xor.html","title":"Modeling XOR with a neural network","type":"extras"},{"doc":"In this notebook we try to create a model and learn it the **logical XOR**.\n\nEven though XOR seems like a trivial operation, it cannot be modeled using a single dense layer ([single-layer perceptron](https://en.wikipedia.org/wiki/Feedforward_neural_network#Single-layer_perceptron)). The underlying reason is that the classes in XOR are not linearly separable. We cannot draw a straight line to separate the points $(0,0)$, $(1,1)$ from the points $(0,1)$, $(1,0)$. To model this properly, we need to turn to deep learning methods. Deep learning is capable of learning non-linear relationships like XOR.","ref":"xor.html#introduction","title":"Introduction - Modeling XOR with a neural network","type":"extras"},{"doc":"Let's start with the model. We need two inputs, since XOR has two operands. We then concatenate them into a single input vector with `Axon.concatenate/3`. Then we have one hidden layer and one output layer, both of them dense.\n\nNote: the model is a sequential neural network. In Axon, we can conveniently create such a model by using the pipe operator (`|>`) to add layers one by one.\n\n```elixir\nx1_input = Axon.input(\"x1\", shape: {nil, 1})\nx2_input = Axon.input(\"x2\", shape: {nil, 1})\n\nmodel =\n x1_input\n |> Axon.concatenate(x2_input)\n |> Axon.dense(8, activation: :tanh)\n |> Axon.dense(1, activation: :sigmoid)\n```","ref":"xor.html#the-model","title":"The model - Modeling XOR with a neural network","type":"extras"},{"doc":"The next step is to prepare training data. Since we are modeling a well-defined operation, we can just generate random operands and compute the expected XOR result for them.\n\nThe training works with batches of examples, so we *repeatedly* generate a whole batch of inputs and the expected result.\n\n```elixir\nbatch_size = 32\n\ndata =\n Stream.repeatedly(fn ->\n x1 = Nx.random_uniform({batch_size, 1}, 0, 2)\n x2 = Nx.random_uniform({batch_size, 1}, 0, 2)\n y = Nx.logical_xor(x1, x2)\n\n {%{\"x1\" => x1, \"x2\" => x2}, y}\n end)\n```\n\nHere's how a sample batch looks:\n\n```elixir\nEnum.at(data, 0)\n```","ref":"xor.html#training-data","title":"Training data - Modeling XOR with a neural network","type":"extras"},{"doc":"It's time to train our model. In this case we use *binary cross entropy* for the loss and *stochastic gradient descent* as the optimizer. We use binary cross entropy because we can consider the task of computing XOR the same as a binary classification problem. We want our output to have a binary label `0` or `1`, and binary cross entropy is typically used in these cases. Having defined our training loop, we run it with `Axon.Loop.run/4`.\n\n```elixir\nepochs = 10\n\nparams =\n model\n |> Axon.Loop.trainer(:binary_cross_entropy, :sgd)\n |> Axon.Loop.run(data, %{}, epochs: epochs, iterations: 1000)\n```","ref":"xor.html#training","title":"Training - Modeling XOR with a neural network","type":"extras"},{"doc":"Finally, we can test our model on sample data.\n\n```elixir\nAxon.predict(model, params, %{\n \"x1\" => Nx.tensor([[0]]),\n \"x2\" => Nx.tensor([[1]])\n})\n```\n\nTry other combinations of $x_1$ and $x_2$ and see what the output is. To improve the model performance, you can increase the number of training epochs.","ref":"xor.html#trying-the-model","title":"Trying the model - Modeling XOR with a neural network","type":"extras"},{"doc":"The original XOR we modeled only works with binary values $0$ and $1$, however our model operates in continuous space. This means that we can give it $x_1 = 0.5$, $x_2 = 0.5$ as input and we expect _some_ output. We can use this to visualize the non-linear relationship between inputs $x_1$, $x_2$ and outputs that our model has learned.\n\n```elixir\n# The number of points per axis, determines the resolution\nn = 50\n\n# We generate coordinates of inputs in the (n x n) grid\nx1 = Nx.iota({n, n}, axis: 0) |> Nx.divide(n) |> Nx.reshape({:auto, 1})\nx2 = Nx.iota({n, n}, axis: 1) |> Nx.divide(n) |> Nx.reshape({:auto, 1})\n\n# The output is also a real number, but we round it into one of the two classes\ny = Axon.predict(model, params, %{\"x1\" => x1, \"x2\" => x2}) |> Nx.round()\n\nVl.new(width: 300, height: 300)\n|> Vl.data_from_values(\n x1: Nx.to_flat_list(x1),\n x2: Nx.to_flat_list(x2),\n y: Nx.to_flat_list(y)\n)\n|> Vl.mark(:circle)\n|> Vl.encode_field(:x, \"x1\", type: :quantitative)\n|> Vl.encode_field(:y, \"x2\", type: :quantitative)\n|> Vl.encode_field(:color, \"y\", type: :nominal)\n```\n\nFrom the plot we can clearly see that during training our model learnt two clean boundaries to separate $(0,0)$, $(1,1)$ from $(0,1)$, $(1,0)$.","ref":"xor.html#visualizing-the-model-predictions","title":"Visualizing the model predictions - Modeling XOR with a neural network","type":"extras"},{"doc":"# Classifying handwritten digits\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:req, \"~> 0.3.1\"}\n])\n```","ref":"mnist.html","title":"Classifying handwritten digits","type":"extras"},{"doc":"This livebook will walk you through training a basic neural network using Axon, accelerated by the EXLA compiler. We'll be working on the [MNIST](https://en.wikipedia.org/wiki/MNIST_database) dataset which is a dataset of handwritten digits with corresponding labels. The goal is to train a model that correctly classifies these handwritten digits with a single label [0-9].","ref":"mnist.html#introduction","title":"Introduction - Classifying handwritten digits","type":"extras"},{"doc":"The MNIST dataset is available for free online. Using `Req` we'll download both training images and training labels. Both `train_images` and `train_labels` are compressed binary data. Fortunately, `Req` takes care of the decompression for us.\n\nYou can read more about the format of the ubyte files [here](http://yann.lecun.com/exdb/mnist/). Each file starts with a magic number and some metadata. We can use binary pattern matching to extract the information we want. In this case we extract the raw binary images and labels.\n\n```elixir\nbase_url = \"https://storage.googleapis.com/cvdf-datasets/mnist/\"\n%{body: train_images} = Req.get!(base_url <> \"train-images-idx3-ubyte.gz\")\n%{body: train_labels} = Req.get!(base_url <> \"train-labels-idx1-ubyte.gz\")\n\n<<_::32, n_images::32, n_rows::32, n_cols::32, images::binary>> = train_images\n<<_::32, n_labels::32, labels::binary>> = train_labels\n```\n\nWe can easily read that binary data into a tensor using `Nx.from_binary/2`. `Nx.from_binary/2` expects a raw binary and a data type. In this case, both images and labels are stored as unsigned 8-bit integers. We can start by parsing our images:\n\n```elixir\nimages =\n images\n |> Nx.from_binary({:u, 8})\n |> Nx.reshape({n_images, 1, n_rows, n_cols}, names: [:images, :channels, :height, :width])\n |> Nx.divide(255)\n```\n\n`Nx.from_binary/2` returns a flat tensor. Using `Nx.reshape/3` we can manipulate this flat tensor into meaningful dimensions. Notice we also *normalized* the tensor by dividing the input data by 255. This squeezes the data between 0 and 1 which often leads to better behavior when training models. Now, let's see what these images look like:\n\n```elixir\nimages[[images: 0..4]] |> Nx.to_heatmap()\n```\n\nIn the reshape operation above, we give each dimension of the tensor a name. This makes it much easier to do things like slicing, and helps make your code easier to understand. Here we slice the `images` dimension of the images tensor to obtain the first 5 training images. Then, we convert them to a heatmap for easy visualization.\n\nIt's common to train neural networks in batches (actually correctly called minibatches, but you'll see batch and minibatch used interchangeably). We can \"batch\" our images into batches of 32 like this:\n\n```elixir\nimages = Nx.to_batched(images, 32)\n```\n\nNow, we'll need to get our labels into batches as well, but first we need to *one-hot encode* the labels. One-hot encoding converts input data from labels such as `3`, `5`, `7`, etc. into vectors of 0's and a single 1 at the correct labels index. As an example, a label of: `3` gets converted to: `[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]`.\n\n```elixir\ntargets =\n labels\n |> Nx.from_binary({:u, 8})\n |> Nx.new_axis(-1)\n |> Nx.equal(Nx.tensor(Enum.to_list(0..9)))\n |> Nx.to_batched(32)\n```","ref":"mnist.html#retrieving-and-exploring-the-dataset","title":"Retrieving and exploring the dataset - Classifying handwritten digits","type":"extras"},{"doc":"Let's start by defining a simple model:\n\n```elixir\nmodel =\n Axon.input(\"input\", shape: {nil, 1, 28, 28})\n |> Axon.flatten()\n |> Axon.dense(128, activation: :relu)\n |> Axon.dense(10, activation: :softmax)\n```\n\nAll `Axon` models start with an input layer to tell subsequent layers what shapes to expect. We then use `Axon.flatten/2` which flattens the previous layer by squeezing all dimensions but the first dimension into a single dimension. Our model consists of 2 fully connected layers with 128 and 10 units respectively. The first layer uses `:relu` activation which returns `max(0, input)` element-wise. The final layer uses `:softmax` activation to return a probability distribution over the 10 labels [0 - 9].","ref":"mnist.html#defining-the-model","title":"Defining the model - Classifying handwritten digits","type":"extras"},{"doc":"In Axon we express the task of training using a declarative loop API. First, we need to specify a loss function and optimizer, there are many built-in variants to choose from. In this example, we'll use *categorical cross-entropy* and the *Adam* optimizer. We will also keep track of the *accuracy* metric. Finally, we run training loop passing our batched images and labels. We'll train for 10 epochs using the `EXLA` compiler.\n\n```elixir\nparams =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, :adam)\n |> Axon.Loop.metric(:accuracy, \"Accuracy\")\n |> Axon.Loop.run(Stream.zip(images, targets), %{}, epochs: 10, compiler: EXLA)\n```","ref":"mnist.html#training","title":"Training - Classifying handwritten digits","type":"extras"},{"doc":"Now that we have the parameters from the training step, we can use them for predictions.\nFor this the `Axon.predict` can be used.\n\n```elixir\nfirst_batch = Enum.at(images, 0)\n\noutput = Axon.predict(model, params, first_batch)\n```\n\nFor each image, the model outputs probability distribution. This informs us how certain the model is about its prediction. Let's see the most probable digit for each image:\n\n```elixir\nNx.argmax(output, axis: 1)\n```\n\nIf you look at the original images and you will see the predictions match the data!","ref":"mnist.html#prediction","title":"Prediction - Classifying handwritten digits","type":"extras"},{"doc":"# Classifying horses and humans\n\n```elixir\nMix.install([\n {:axon, \"~> 0.6.0\"},\n {:nx, \"~> 0.6.0\"},\n {:exla, \"~> 0.6.0\"},\n {:stb_image, \"~> 0.6.0\"},\n {:req, \"~> 0.4.5\"},\n {:kino, \"~> 0.11.0\"}\n])\n\nNx.global_default_backend(EXLA.Backend)\nNx.Defn.global_default_options(compiler: EXLA)\n```","ref":"horses_or_humans.html","title":"Classifying horses and humans","type":"extras"},{"doc":"In this notebook, we want to predict whether an image presents a horse or a human. To do this efficiently, we will build a Convolutional Neural Network (CNN) and compare the learning process with and without gradient centralization.","ref":"horses_or_humans.html#introduction","title":"Introduction - Classifying horses and humans","type":"extras"},{"doc":"We will be using the [Horses or Humans Dataset](https://laurencemoroney.com/datasets.html#horses-or-humans-dataset). The dataset is available as a ZIP with image files, we will download it using `req`. Conveniently, `req` will unzip the files for us, we just need to convert the filenames from strings.\n\n```elixir\n%{body: files} =\n Req.get!(\"https://storage.googleapis.com/learning-datasets/horse-or-human.zip\")\n\nfiles = for {name, binary} <- files, do: {List.to_string(name), binary}\n```\n\n#","ref":"horses_or_humans.html#loading-the-data","title":"Loading the data - Classifying horses and humans","type":"extras"},{"doc":"We need to know how many images to include in a batch. A batch is a group of images to load into the GPU at a time. If the batch size is too big for your GPU, it will run out of memory, in such case you can reduce the batch size. It is generally optimal to utilize almost all of the GPU memory during training. It will take more time to train with a lower batch size.\n\n```elixir\nbatch_size = 32\nbatches_per_epoch = div(length(files), batch_size)\n```","ref":"horses_or_humans.html#note-on-batching","title":"Note on batching - Classifying horses and humans","type":"extras"},{"doc":"We'll have a really quick look at our data. Let's see what we are dealing with:\n\n```elixir\n{name, binary} = Enum.random(files)\nKino.Markdown.new(name) |> Kino.render()\nKino.Image.new(binary, :png)\n```\n\nReevaluate the cell a couple times to view different images. Note that the file names are either `horse[N]-[M].png` or `human[N]-[M].png`, so we can derive the expected class from that.\n\n\n\nWhile we are at it, look at this beautiful animation:\n\n```elixir\nnames_to_animate = [\"horse01\", \"horse05\", \"human01\", \"human05\"]\n\nimages_to_animate =\n for {name, binary} <- files, Enum.any?(names_to_animate, &String.contains?(name, &1)) do\n Kino.Image.new(binary, :png)\n end\n\nKino.animate(50, images_to_animate, fn\n _i, [image | images] -> {:cont, image, images}\n _i, [] -> :halt\nend)\n```\n\nHow many images are there?\n\n```elixir\nlength(files)\n```\n\nHow many images will not be used for training? The remainder of the integer division will be ignored.\n\n```elixir\nfiles\n|> length()\n|> rem(batch_size)\n```","ref":"horses_or_humans.html#a-look-at-the-data","title":"A look at the data - Classifying horses and humans","type":"extras"},{"doc":"First, we need to preprocess the data for our CNN. At the beginning of the process, we chunk images into batches. Then, we use the `parse_file/1` function to load images and label them accurately. Finally, we \"augment\" the input, which means that we normalize data and flip the images along one of the axes. The last procedure helps a neural network to make predictions regardless of the orientation of the image.\n\n```elixir\ndefmodule HorsesHumans.DataProcessing do\n import Nx.Defn\n\n def data_stream(files, batch_size) do\n files\n |> Enum.shuffle()\n |> Stream.chunk_every(batch_size, batch_size, :discard)\n |> Task.async_stream(\n fn batch ->\n {images, labels} = batch |> Enum.map(&parse_file/1) |> Enum.unzip()\n {Nx.stack(images), Nx.stack(labels)}\n end,\n timeout: :infinity\n )\n |> Stream.map(fn {:ok, {images, labels}} -> {augment(images), labels} end)\n |> Stream.cycle()\n end\n\n defp parse_file({filename, binary}) do\n label =\n if String.starts_with?(filename, \"horses/\"),\n do: Nx.tensor([1, 0], type: {:u, 8}),\n else: Nx.tensor([0, 1], type: {:u, 8})\n\n image = binary |> StbImage.read_binary!() |> StbImage.to_nx()\n\n {image, label}\n end\n\n defnp augment(images) do\n # Normalize\n images = images / 255.0\n\n # Optional vertical/horizontal flip\n { u, _new_key } = Nx.Random.key(1987) |> Nx.Random.uniform()\n\n cond do\n u < 0.25 -> images\n u < 0.5 -> Nx.reverse(images, axes: [2])\n u < 0.75 -> Nx.reverse(images, axes: [3])\n true -> Nx.reverse(images, axes: [2, 3])\n end\n end\nend\n```","ref":"horses_or_humans.html#data-processing","title":"Data processing - Classifying horses and humans","type":"extras"},{"doc":"The next step is creating our model. In this notebook, we choose the classic Convolutional Neural Network architecture. Let's dive in to the core components of a CNN.\n\n\n\n`Axon.conv/3` adds a convolutional layer, which is at the core of a CNN. A convolutional layer applies a filter function throughout the image, sliding a window with shape `:kernel_size`. As opposed to dense layers, a convolutional layer exploits weight sharing to better model data where locality matters. This feature is a natural fit for images.\n\n\n\n| ![](https://miroslawmamczur.pl/wp-content/uploads/2021/03/06.gif) |\n| :-------------------------------------------------------------------------------------: |\n| Figure 1: A step-by-step visualization of a convolution layer for `kernel_size: {3, 3}` |\n\n\n\n`Axon.max_pool/2` adds a downscaling operation that takes the maximum value from a subtensor according to `:kernel_size`.\n\n\n\n| ![](https://production-media.paperswithcode.com/methods/MaxpoolSample2.png) |\n| :-------------------------------------------------------------------------: |\n| Figure 2: Max pooling operation for `kernel_size: {2, 2}` |\n\n\n\n`Axon.dropout/2` and `Axon.spatial_dropout/2` add dropout layers which prevent a neural network from overfitting. Standard dropout drops a given rate of randomly chosen neurons during the training process. On the other hand, spatial dropout gets rid of whole feature maps. The graphical difference between dropout and spatial dropout is presented in a picture below.\n\n\n\n| ![](https://miro.medium.com/max/1400/1*KkqxjvXTIV_b365B41ltfg.png) |\n| :-------------------------------------------------------------------: |\n| Figure 3: The difference between standard dropout and spatial dropout |\n\n\n\nKnowing the relevant building blocks, let's build our network! It will have a convolutional part, composed of convolutional and pooling layers, this part should capture the spatial features of an image. Then at the end, we will add a dense layer with 512 neurons fed with all the spatial features, and a final two-neuron layer for as our classification output.\n\n```elixir\nmodel =\n Axon.input(\"input\", shape: {nil, 300, 300, 4})\n |> Axon.conv(16, kernel_size: {3, 3}, activation: :relu)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(32, kernel_size: {3, 3}, activation: :relu)\n |> Axon.spatial_dropout(rate: 0.5)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)\n |> Axon.spatial_dropout(rate: 0.5)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.flatten()\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(512, activation: :relu)\n |> Axon.dense(2, activation: :softmax)\n```","ref":"horses_or_humans.html#building-the-model","title":"Building the model - Classifying horses and humans","type":"extras"},{"doc":"It's time to train our model. We specify the loss, optimizer and choose accuracy as our metric. We also set `log: 1` to frequently update the training progress. We manually specify the number of iterations, such that each epoch goes through all of the baches once.\n\n```elixir\ndata = HorsesHumans.DataProcessing.data_stream(files, batch_size)\n\noptimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-4)\n\nparams =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, log: 1)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)\n```\n\n","ref":"horses_or_humans.html#training-the-model","title":"Training the model - Classifying horses and humans","type":"extras"},{"doc":"We can improve the training by applying gradient centralization. It is a technique with a similar purpose to batch normalization. For each loss gradient, we subtract a mean value to have a gradient with mean equal to zero. This process prevents gradients from exploding.\n\n```elixir\ncentralized_optimizer = Polaris.Updates.compose(Polaris.Updates.centralize(), optimizer)\n\nmodel\n|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, log: 1)\n|> Axon.Loop.metric(:accuracy)\n|> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)\n```","ref":"horses_or_humans.html#extra-gradient-centralization","title":"Extra: gradient centralization - Classifying horses and humans","type":"extras"},{"doc":"We can now use our trained model, let's try a couple examples.\n\n```elixir\n{name, binary} = Enum.random(files)\nKino.Markdown.new(name) |> Kino.render()\nKino.Image.new(binary, :png) |> Kino.render()\n\ninput =\n binary\n |> StbImage.read_binary!()\n |> StbImage.to_nx()\n |> Nx.new_axis(0)\n |> Nx.divide(255.0)\n\nAxon.predict(model, params, input)\n```\n\n_Note: the model output refers to the probability that the image presents a horse and a human respectively._\n\n\n\nYou can find a validation set [here](https://storage.googleapis.com/learning-datasets/validation-horse-or-human.zip), in case you want to experiment further!","ref":"horses_or_humans.html#inference","title":"Inference - Classifying horses and humans","type":"extras"},{"doc":"# Generating text with LSTM\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:req, \"~> 0.3.1\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\nNx.global_default_backend(EXLA.Backend)\n```","ref":"lstm_generation.html","title":"Generating text with LSTM","type":"extras"},{"doc":"Recurrent Neural Networks (RNNs) can be used as generative models. This means that in addition to being used for predictive models (making predictions) they can learn the sequences of a problem and then generate entirely new plausible sequences for the problem domain.\n\nGenerative models like this are useful not only to study how well a model has learned a problem, but to learn more about the problem domain itself.\n\nIn this example, we will discover how to create a generative model for text, character-by-character using Long Short-Term Memory (LSTM) recurrent neural networks in Elixir with Axon.","ref":"lstm_generation.html#introduction","title":"Introduction - Generating text with LSTM","type":"extras"},{"doc":"Using [Project Gutenburg](https://www.gutenberg.org/) we can download a text books that are no longer protected under copywrite, so we can experiment with them.\n\nThe one that we will use for this experiment is [Alice's Adventures in Wonderland by Lewis Carroll](https://www.gutenberg.org/ebooks/11). You can choose any other text or book that you like for this experiment.\n\n```elixir\n# Change the URL if you'd like to experiment with other books\ndownload_url = \"https://www.gutenberg.org/files/11/11-0.txt\"\n\nbook_text = Req.get!(download_url).body\n```\n\nFirst of all, we need to normalize the content of the book. We are only interested in the sequence of English characters, periods and new lines. Also currently we don't care about the capitalization and things like apostrophe so we can remove all other unknown characters and downcase everything. We can use a regular expression for that.\n\nWe can also convert the string into a list of characters so we can handle them easier. You will understand exactly why a bit further.\n\n```elixir\nnormalized_book_text =\n book_text\n |> String.downcase()\n |> String.replace(~r/[^a-z \\.\\n]/, \"\")\n |> String.to_charlist()\n```\n\nWe converted the text to a list of characters, where each character is a number (specifically, a Unicode code point). Lowercase English characters are represented with numbers between `97 = a` and `122 = z`, a space is `32 = [ ]`, a new line is `10 = \\n` and the period is `46 = .`.\n\nSo we should have 26 + 3 (= 29) characters in total. Let's see if that's true.\n\n```elixir\nnormalized_book_text |> Enum.uniq() |> Enum.count()\n```\n\nSince we want to use this 29 characters as possible values for each input in our neural network, we can re-map them to values between 0 and 28. So each specific neuron will indicate a specific character.\n\n```elixir\n# Extract all then unique characters we have and sort them for clarity\ncharacters = normalized_book_text |> Enum.uniq() |> Enum.sort()\ncharacters_count = Enum.count(characters)\n\n# Create a mapping for every character\nchar_to_idx = characters |> Enum.with_index() |> Map.new()\n# And a reverse mapping to convert back to characters\nidx_to_char = characters |> Enum.with_index(&{&2, &1}) |> Map.new()\n\nIO.puts(\"Total book characters: #{Enum.count(normalized_book_text)}\")\nIO.puts(\"Total unique characters: #{characters_count}\")\n```\n\nNow we need to create our training and testing data sets. But how?\n\nOur goal is to teach the machine what comes after a sequence of characters (usually). For example given the following sequence **\"Hello, My name i\"** the computer should be able to guess that the next character is probably **\"s\"**.\n\n\n\n\n\n```mermaid\ngraph LR;\n A[Input: Hello my name i]-->NN[Neural Network]-->B[Output: s];\n```\n\n\n\nLet's choose an arbitrary sequence length and create a data set from the book text. All we need to do is read X amount of characters from the book as the input and then read 1 more as the designated output.\n\nAfter doing all that, we also want to convert every character to it's index using the `char_to_idx` mapping that we have created before.\n\nNeural networks work best if you scale your inputs and outputs. In this case we are going to scale everything between 0 and 1 by dividing them by the number of unique characters that we have.\n\nAnd for the final step we will reshape it so we can use the data in our LSTM model.\n\n```elixir\nsequence_length = 100\n\ntrain_data =\n normalized_book_text\n |> Enum.map(&Map.fetch!(char_to_idx, &1))\n |> Enum.chunk_every(sequence_length, 1, :discard)\n # We don't want the last chunk since we don't have a prediction for it.\n |> Enum.drop(-1)\n |> Nx.tensor()\n |> Nx.divide(characters_count)\n |> Nx.reshape({:auto, sequence_length, 1})\n```\n\nFor our train results, We will do the same. Drop the first `sequence_length` characters and then convert them to the mapping. Additionally, we will do **one-hot encoding**.\n\nThe reason we want to use one-hot encoding is that in our model we don't want to only return a character as the output. We want it to return the probability of each character for the output. This way we can decide if certain probability is good or not or even we can decide between multiple possible outputs or even discard everything if the network is not confident enough.\n\nIn Nx, you can achieve this encoding by using this snippet\n\n```elixir\nNx.tensor([\n [0],\n [1],\n [2]\n])\n|> Nx.equal(Nx.iota({1, 3}))\n```\n\nTo sum it up, Here is how we generate the train results.\n\n```elixir\ntrain_results =\n normalized_book_text\n |> Enum.drop(sequence_length)\n |> Enum.map(&Map.fetch!(char_to_idx, &1))\n |> Nx.tensor()\n |> Nx.reshape({:auto, 1})\n |> Nx.equal(Nx.iota({1, characters_count}))\n```","ref":"lstm_generation.html#preparation","title":"Preparation - Generating text with LSTM","type":"extras"},{"doc":"```elixir\n# As the input, we expect the sequence_length characters\n\nmodel =\n Axon.input(\"input_chars\", shape: {nil, sequence_length, 1})\n # The LSTM layer of our network\n |> Axon.lstm(256)\n # Selecting only the output from the LSTM Layer\n |> then(fn {out, _} -> out end)\n # Since we only want the last sequence in LSTM we will slice it and\n # select the last one\n |> Axon.nx(fn t -> t[[0..-1//1, -1]] end)\n # 20% dropout so we will not become too dependent on specific neurons\n |> Axon.dropout(rate: 0.2)\n # The output layer. One neuron for each character and using softmax,\n # as activation so every node represents a probability\n |> Axon.dense(characters_count, activation: :softmax)\n```","ref":"lstm_generation.html#defining-the-model","title":"Defining the Model - Generating text with LSTM","type":"extras"},{"doc":"To train the network, we will use Axon's Loop API. It is pretty straightforward.\n\nFor the loss function we can use _categorical cross-entropy_ since we are dealing with categories (each character) in our output. For the optimizer we can use _Adam_.\n\nWe will train our network for 20 epochs. Note that we are working with a fair amount data, so it may take a long time unless you run it on a GPU.\n\n```elixir\nbatch_size = 128\ntrain_batches = Nx.to_batched(train_data, batch_size)\nresult_batches = Nx.to_batched(train_results, batch_size)\n\nIO.puts(\"Total batches: #{Enum.count(train_batches)}\")\n\nparams =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))\n |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 20, compiler: EXLA)\n\n:ok\n```","ref":"lstm_generation.html#training-the-network","title":"Training the network - Generating text with LSTM","type":"extras"},{"doc":"Now we have a trained neural network, so we can start generating text with it! We just need to pass the initial sequence as the input to the network and select the most probable output. `Axon.predict/3` will give us the output layer and then using `Nx.argmax/1` we get the most confident neuron index, then simply convert that index back to its Unicode representation.\n\n```elixir\ngenerate_fn = fn model, params, init_seq ->\n # The initial sequence that we want the network to complete for us.\n init_seq =\n init_seq\n |> String.trim()\n |> String.downcase()\n |> String.to_charlist()\n |> Enum.map(&Map.fetch!(char_to_idx, &1))\n\n Enum.reduce(1..100, init_seq, fn _, seq ->\n init_seq =\n seq\n |> Enum.take(-sequence_length)\n |> Nx.tensor()\n |> Nx.divide(characters_count)\n |> Nx.reshape({1, sequence_length, 1})\n\n char =\n Axon.predict(model, params, init_seq)\n |> Nx.argmax()\n |> Nx.to_number()\n\n seq ++ [char]\n end)\n |> Enum.map(&Map.fetch!(idx_to_char, &1))\nend\n\n# The initial sequence that we want the network to complete for us.\ninit_seq = \"\"\"\nnot like to drop the jar for fear\nof killing somebody underneath so managed to put it into one of the\ncupboards as she fell past it.\n\"\"\"\n\ngenerate_fn.(model, params, init_seq) |> IO.puts()\n```","ref":"lstm_generation.html#generating-text","title":"Generating text - Generating text with LSTM","type":"extras"},{"doc":"We can improve our network by stacking multiple LSTM layers together. We just need to change our model and re-train our network.\n\n```elixir\nnew_model =\n Axon.input(\"input_chars\", shape: {nil, sequence_length, 1})\n |> Axon.lstm(256)\n |> then(fn {out, _} -> out end)\n |> Axon.dropout(rate: 0.2)\n # This time we will pass all of the `out` to the next lstm layer.\n # We just need to slice the last one.\n |> Axon.lstm(256)\n |> then(fn {out, _} -> out end)\n |> Axon.nx(fn x -> x[[0..-1//1, -1]] end)\n |> Axon.dropout(rate: 0.2)\n |> Axon.dense(characters_count, activation: :softmax)\n```\n\nThen we can train the network using the exact same code as before\n\n```elixir\n# Using a smaller batch size in this case will give the network more opportunity to learn\nbatch_size = 64\ntrain_batches = Nx.to_batched(train_data, batch_size)\nresult_batches = Nx.to_batched(train_results, batch_size)\n\nIO.puts(\"Total batches: #{Enum.count(train_batches)}\")\n\nnew_params =\n new_model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))\n |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 50, compiler: EXLA)\n\n:ok\n```","ref":"lstm_generation.html#multi-lstm-layers","title":"Multi LSTM layers - Generating text with LSTM","type":"extras"},{"doc":"```elixir\ngenerate_fn.(new_model, new_params, init_seq) |> IO.puts()\n```\n\nAs you may see, it improved a lot with this new model and the extensive training. This time it knows about rules like adding a space after period.","ref":"lstm_generation.html#generate-text-with-the-new-network","title":"Generate text with the new network - Generating text with LSTM","type":"extras"},{"doc":"The above example was written heavily inspired by [this article](https://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/) by Jason Brownlee.","ref":"lstm_generation.html#references","title":"References - Generating text with LSTM","type":"extras"},{"doc":"# Classifying fraudulent transactions\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:explorer, \"~> 0.3.1\"},\n {:kino, \"~> 0.7.0\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\nNx.global_default_backend(EXLA.Backend)\n\nalias Explorer.{DataFrame, Series}\n```","ref":"credit_card_fraud.html","title":"Classifying fraudulent transactions","type":"extras"},{"doc":"This time we will examine the Credit Card Fraud Dataset. Due to confidentiality, the original data were preprocessed by principal component analysis (PCA), and then 31 principal components were selected for the final data set. The dataset is highly imbalanced. The positive class (frauds) account for 0.172% of all transactions. Eventually, we will create a classifier which has not only great accuracy but, what is even more important, a high _recall_ and _precision_ - two metrics that are much more indicative of performance with imbalanced classification problems.","ref":"credit_card_fraud.html#introduction","title":"Introduction - Classifying fraudulent transactions","type":"extras"},{"doc":"The first step is to prepare the data for training and evaluation. Please download the dataset in the CSV format from https://www.kaggle.com/mlg-ulb/creditcardfraud (this requires a Kaggla account). Once done, put the file path in the input below.\n\n```elixir\ndata_path_input = Kino.Input.text(\"Data path (CSV)\")\n```\n\nNow, let's read the data into an `Explorer.Dataframe`:\n\n```elixir\ndata_path = Kino.Input.read(data_path_input)\n\ndf = DataFrame.from_csv!(data_path, dtypes: [{\"Time\", :float}])\n```\n\nFor further processing, we will need a couple helper functions. We will group them in a module for convenience.\n\n```elixir\ndefmodule CredidCard.Data do\n import Nx.Defn\n\n def split_train_test(df, portion) do\n num_examples = DataFrame.n_rows(df)\n num_train = ceil(portion * num_examples)\n num_test = num_examples - num_train\n\n train = DataFrame.slice(df, 0, num_train)\n test = DataFrame.slice(df, num_train, num_test)\n {train, test}\n end\n\n def split_features_targets(df) do\n features = DataFrame.select(df, &(&1 == \"Class\"), :drop)\n targets = DataFrame.select(df, &(&1 == \"Class\"), :keep)\n {features, targets}\n end\n\n def df_to_tensor(df) do\n df\n |> DataFrame.names()\n |> Enum.map(&Series.to_tensor(df[&1]))\n |> Nx.stack(axis: 1)\n end\n\n defn normalize_features(tensor) do\n max =\n tensor\n |> Nx.abs()\n |> Nx.reduce_max(axes: [0], keep_axes: true)\n\n tensor / max\n end\nend\n```\n\nWith that, we can start converting the data into the desired format. First, we split the data into training and test data (in proportion 80% into a training set and 20% into a test set).\n\n```elixir\n{train_df, test_df} = CredidCard.Data.split_train_test(df, 0.8)\n{DataFrame.n_rows(train_df), DataFrame.n_rows(test_df)}\n```\n\nNext, we separate features from labels and convert both to tensors. In case of features we additionally normalize each of them, dividing by the maximum absolute value of that feature.\n\n```elixir\n{train_features, train_targets} = CredidCard.Data.split_features_targets(train_df)\n{test_features, test_targets} = CredidCard.Data.split_features_targets(test_df)\n\ntrain_inputs =\n train_features\n |> CredidCard.Data.df_to_tensor()\n |> CredidCard.Data.normalize_features()\n\ntest_inputs =\n test_features\n |> CredidCard.Data.df_to_tensor()\n |> CredidCard.Data.normalize_features()\n\ntrain_targets = CredidCard.Data.df_to_tensor(train_targets)\ntest_targets = CredidCard.Data.df_to_tensor(test_targets)\n\n:ok\n```","ref":"credit_card_fraud.html#data-processing","title":"Data processing - Classifying fraudulent transactions","type":"extras"},{"doc":"Our model for predicting whether a transaction was fraudulent or not is a dense neural network. It consists of two dense layers with 256 neurons, ReLU activation functions, one dropout layer, and a dense layer with one neuron (since the problem is a binary prediction) followed by a sigmoid activation function.\n\n```elixir\nmodel =\n Axon.input(\"input\")\n |> Axon.dense(256)\n |> Axon.relu()\n |> Axon.dense(256)\n |> Axon.relu()\n |> Axon.dropout(rate: 0.3)\n |> Axon.dense(1)\n |> Axon.sigmoid()\n```","ref":"credit_card_fraud.html#building-the-model","title":"Building the model - Classifying fraudulent transactions","type":"extras"},{"doc":"Now we have both data and model architecture prepared, it's time to train!\n\nNote the disproportion in the data samples:\n\n```elixir\nfraud = Nx.sum(train_targets) |> Nx.to_number()\nlegit = Nx.size(train_targets) - fraud\n\nbatched_train_inputs = Nx.to_batched(train_inputs, 2048)\nbatched_train_targets = Nx.to_batched(train_targets, 2048)\nbatched_train = Stream.zip(batched_train_inputs, batched_train_targets)\n\nIO.puts(\"# of legit transactions (train): #{legit}\")\nIO.puts(\"# of fraudulent transactions (train): #{fraud}\")\nIO.puts(\"% fraudlent transactions (train): #{100 * (fraud / (legit + fraud))}%\")\n```\n\nAs always, we define our train loop. We are using _binary cross-entropy_ as our loss function and Adam as the optimizer with a learning rate of 0.01. Then we immediately start the training passing our train portion of the dataset.\n\n```elixir\nloss =\n &Axon.Losses.binary_cross_entropy(\n &1,\n &2,\n negative_weight: 1 / legit,\n positive_weight: 1 / fraud,\n reduction: :mean\n )\n\noptimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-2)\n\nparams =\n model\n |> Axon.Loop.trainer(loss, optimizer)\n |> Axon.Loop.run(batched_train, %{}, epochs: 30, compiler: EXLA)\n\n:ok\n```","ref":"credit_card_fraud.html#training-our-model","title":"Training our model - Classifying fraudulent transactions","type":"extras"},{"doc":"After the training, there is only one thing left: testing. Here, we will focus on the number of true positive, true negative, false positive, and false negative values, but also on the likelihood of denying legit and fraudulent transactions.\n\n```elixir\nbatched_test_inputs = Nx.to_batched(test_inputs, 2048)\nbatched_test_targets = Nx.to_batched(test_targets, 2048)\nbatched_test = Stream.zip(batched_test_inputs, batched_test_targets)\n\nsummarize = fn %Axon.Loop.State{metrics: metrics} = state ->\n legit_transactions_declined = Nx.to_number(metrics[\"fp\"])\n legit_transactions_accepted = Nx.to_number(metrics[\"tn\"])\n fraud_transactions_accepted = Nx.to_number(metrics[\"fn\"])\n fraud_transactions_declined = Nx.to_number(metrics[\"tp\"])\n total_fraud = fraud_transactions_declined + fraud_transactions_accepted\n total_legit = legit_transactions_declined + legit_transactions_accepted\n\n fraud_denial_percent = 100 * (fraud_transactions_declined / total_fraud)\n legit_denial_percent = 100 * (legit_transactions_declined / total_legit)\n\n IO.write(\"\\n\")\n IO.puts(\"Legit Transactions Declined: #{legit_transactions_declined}\")\n IO.puts(\"Fraudulent Transactions Caught: #{fraud_transactions_declined}\")\n IO.puts(\"Fraudulent Transactions Missed: #{fraud_transactions_accepted}\")\n IO.puts(\"Likelihood of catching fraud: #{fraud_denial_percent}%\")\n IO.puts(\"Likelihood of denying legit transaction: #{legit_denial_percent}%\")\n\n {:continue, state}\nend\n\nmodel\n|> Axon.Loop.evaluator()\n|> Axon.Loop.metric(:true_positives, \"tp\", :running_sum)\n|> Axon.Loop.metric(:true_negatives, \"tn\", :running_sum)\n|> Axon.Loop.metric(:false_positives, \"fp\", :running_sum)\n|> Axon.Loop.metric(:false_negatives, \"fn\", :running_sum)\n|> Axon.Loop.handle(:epoch_completed, summarize)\n|> Axon.Loop.run(batched_test, params, compiler: EXLA)\n\n:ok\n```","ref":"credit_card_fraud.html#model-evaluation","title":"Model evaluation - Classifying fraudulent transactions","type":"extras"},{"doc":"# MNIST Denoising Autoencoder using Kino for visualization\n\n```elixir\nMix.install([\n {:exla, \"~> 0.4.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:axon, \"~> 0.3.0\"},\n {:req, \"~> 0.3.1\"},\n {:kino, \"~> 0.7.0\"},\n {:scidata, \"~> 0.1.9\"},\n {:stb_image, \"~> 0.5.2\"},\n {:table_rex, \"~> 3.1.1\"}\n])\n```","ref":"mnist_autoencoder_using_kino.html","title":"MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"The goal of this notebook is to build a Denoising Autoencoder from scratch using Livebook. This notebook is based on [Training an Autoencoder on Fashion MNIST](fashionmnist_autoencoder.livemd), but includes some tips on using Livebook to train the model and using [Kino](https://hexdocs.pm/kino/Kino.html) (Livebook's interactive widget library) to play with and visualize our results.","ref":"mnist_autoencoder_using_kino.html#introduction","title":"Introduction - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"An autoencoder learns to recreate data it's seen in the dataset. For this notebook, we're going to try something simple: generating images of digits using the MNIST digit recognition dataset.\n\n\n\nFollowing along with the [Fashion MNIST Autoencoder example](fashionmnist_autoencoder.livemd), we'll use [Scidata](https://github.com/elixir-nx/scidata) to download the MNIST dataset and then preprocess the data.\n\n```elixir\n# We're not going to use the labels so we'll ignore them\n{train_images, _train_labels} = Scidata.MNIST.download()\n{train_images_binary, type, shape} = train_images\n```\n\nThe `shape` tells us we have 60,000 images with a single channel of size 28x28.\n\nAccording to [the MNIST website](http://yann.lecun.com/exdb/mnist/):\n\n> Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n\nLet's preprocess and normalize the data accordingly.\n\n```elixir\ntrain_images =\n train_images_binary\n |> Nx.from_binary(type)\n # Since pixels are organized row-wise, reshape into rows x columns\n |> Nx.reshape(shape, names: [:images, :channels, :height, :width])\n # Normalize the pixel values to be between 0 and 1\n |> Nx.divide(255)\n```\n\n```elixir\n# Make sure they look like numbers\ntrain_images[[images: 0..2]] |> Nx.to_heatmap()\n```\n\nThat looks right! Let's repeat the process for the test set.\n\n```elixir\n{test_images, _train_labels} = Scidata.MNIST.download_test()\n{test_images_binary, type, shape} = test_images\n\ntest_images =\n test_images_binary\n |> Nx.from_binary(type)\n # Since pixels are organized row-wise, reshape into rows x columns\n |> Nx.reshape(shape, names: [:images, :channels, :height, :width])\n # Normalize the pixel values to be between 0 and 1\n |> Nx.divide(255)\n\ntest_images[[images: 0..2]] |> Nx.to_heatmap()\n```","ref":"mnist_autoencoder_using_kino.html#data-loading","title":"Data loading - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"An autoencoder is a a network that has the same sized input as output, with a \"bottleneck\" layer in the middle with far fewer parameters than the input. Its goal is to force the output to reconstruct the input. The bottleneck layer forces the network to learn a compressed representation of the input space.\n\nA _denoising_ autoencoder is a small tweak on an autoencoder that takes a corrupted input (often corrupted by adding noise or zeroing out pixels) and reconstructs the original input, removing the noise in the process.\n\nThe part of the autoencoder that takes the input and compresses it into the bottleneck layer is called the _encoder_ and the part that takes the compressed representation and reconstructs the input is called the _decoder_. Usually the decoder mirrors the encoder.\n\nMNIST is a pretty easy dataset, so we're going to try a fairly small autoencoder.\n\nThe input image has size 784 (28 rows _ 28 cols _ 1 pixel). We'll set up the encoder to turn that into 256 features, then 128, 64, and then 10 features for the bottleneck layer. The decoder will do the reverse, take the 10 features and go to 64, 128, 256 and 784. I'll use fully-connected (dense) layers.\n\n\n\n#","ref":"mnist_autoencoder_using_kino.html#building-the-model","title":"Building the model - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"```elixir\nmodel =\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu)\n |> Axon.dense(128, activation: :relu)\n |> Axon.dense(64, activation: :relu)\n # Bottleneck layer\n |> Axon.dense(10, activation: :relu)\n # The decoder\n |> Axon.dense(64, activation: :relu)\n |> Axon.dense(128, activation: :relu)\n |> Axon.dense(256, activation: :relu)\n |> Axon.dense(784, activation: :sigmoid)\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n\n# We can use Axon.Display to show us what each of the layers would look like\n# assuming we send in a batch of 4 images\nAxon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()\n```\n\nChecking our understanding, since the layers are all dense layers, the number of parameters should be `input_features * output_features` parameters for the weights + `output_features` parameters for the biases for each layer.\n\nThis should match the `Total Parameters` output from Axon.Display (486298 parameters)\n\n```elixir\n# encoder\nencoder_parameters = 784 * 256 + 256 + (256 * 128 + 128) + (128 * 64 + 64) + (64 * 10 + 10)\ndecoder_parameters = 10 * 64 + 64 + (64 * 128 + 128) + (128 * 256 + 256) + (256 * 784 + 784)\ntotal_parameters = encoder_parameters + decoder_parameters\n```\n\n#","ref":"mnist_autoencoder_using_kino.html#the-model","title":"The model - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"With the model set up, we can now try to train the model. We'll use MSE loss to compare our reconstruction with the original\n\n\n\nWe'll create the training input by turning our image list into batches of size 128 and then using the same image as both the input and the target. However, the input image will have noise added to it that the autoencoder will have to remove.\n\nFor validation data, we'll use the test set and look at how the autoencoder does at reconstructing the test set to make sure we're not overfitting\n\n\n\nThe function below adds some noise to the image by adding the image with gaussian noise scaled by a noise factor. We then have to make sure the pixel values are still within the 0..1.0 range.\n\nWe have to define this function using `defn` so that `Nx` can optimize it. If we don't do this, adding noise will take a really long time, making our training loop very slow. See [Nx.defn](https://hexdocs.pm/nx/Nx.Defn.html) for more details. `defn` can only be used in a module so we'll define a little module to contain it.\n\n```elixir\ndefmodule Noiser do\n import Nx.Defn\n\n @noise_factor 0.4\n\n defn add_noise(images) do\n @noise_factor\n |> Nx.multiply(Nx.random_normal(images))\n |> Nx.add(images)\n |> Nx.clip(0.0, 1.0)\n end\nend\n\nadd_noise = Nx.Defn.jit(&Noiser.add_noise/1, compiler: EXLA)\n```\n\n```elixir\nbatch_size = 128\n\n# The original image which is the target the network will trying to match\nbatched_train_images =\n train_images\n |> Nx.to_batched(batch_size)\n\nbatched_noisy_train_images =\n train_images\n |> Nx.to_batched(batch_size)\n # goes after to_batched so the noise is different every time\n |> Stream.map(add_noise)\n\n# The noisy image is the input to the network\n# and the original image is the target it's trying to match\ntrain_data = Stream.zip(batched_noisy_train_images, batched_train_images)\n\nbatched_test_images =\n test_images\n |> Nx.to_batched(batch_size)\n\nbatched_noisy_test_images =\n test_images\n |> Nx.to_batched(batch_size)\n |> Stream.map(add_noise)\n\ntest_data = Stream.zip(batched_noisy_test_images, batched_test_images)\n```\n\nLet's see what an element of the input and target look like\n\n```elixir\n{input_batch, target_batch} = Enum.at(train_data, 0)\n{Nx.to_heatmap(input_batch[images: 0]), Nx.to_heatmap(target_batch[images: 0])}\n```\n\nLooks right (and tricky). Let's see how the model does.\n\n```elixir\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))\n |> Axon.Loop.validate(model, test_data)\n |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)\n\n:ok\n```\n\nNow that we have a model that theoretically has learned _something_, we'll see what it's learned by running it on some images from the test set. We'll use Kino to allow us to select the image from the test set to run the model against. To avoid losing the params that took a while to train, we'll create another branch so we can experiment with the params and stop execution when needed without having to retrain.\n\n","ref":"mnist_autoencoder_using_kino.html#training","title":"Training - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"**A note on branching**\n\nBy default, everything in Livebook runs sequentially in a single process. Stopping a running cell aborts that process and consequently all its state is lost. A **branching section** copies everything from its parent and runs in a separate process. Thanks to this **isolation**, when we stop a cell in a branching section, only the state within that section is gone.\n\nSince we just spent a bunch of time training the model and don't want to lose that memory state as we continue to experiment, we create a branching section. This does add some memory overhead, but it's worth it so we can experiment without fear!\n\n\n\nTo use `Kino` to give us an interactive tool to evaluate the model, we'll create a `Kino.Frame` that we can dynamically update. We'll also create a form using `Kino.Control` to allow the user to select which image from the test set they'd like to evaluate the model on. Finally `Kino.Control.stream` enables us to respond to changes in the user's selection when the user clicks the \"Render\" button.\n\nWe can use `Nx.concatenate` to stack the images side by side for a prettier output.\n\n```elixir\nform =\n Kino.Control.form(\n [\n test_image_index: Kino.Input.number(\"Test Image Index\", default: 0)\n ],\n submit: \"Render\"\n )\n\nKino.render(form)\n\nform\n|> Kino.Control.stream()\n|> Kino.animate(fn %{data: %{test_image_index: image_index}} ->\n test_image = test_images[[images: image_index]] |> add_noise.()\n\n reconstructed_image =\n model\n |> Axon.predict(params, test_image)\n # Get rid of the batch dimension\n |> Nx.squeeze(axes: [0])\n\n combined_image = Nx.concatenate([test_image, reconstructed_image], axis: :width)\n Nx.to_heatmap(combined_image)\nend)\n```\n\nThat looks pretty good!\n\nNote we used `Kino.animate/2` which runs asynchronously so we don't block execution of the rest of the notebook.\n\n","ref":"mnist_autoencoder_using_kino.html#evaluation","title":"Evaluation - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"_Note that we branch from the \"Building a model\" section since we only need the model definition for this section and not the previously trained model._\n\n\n\nIt'd be nice to see how the model improves as it trains. In this section (also a branch since I plan to experiment and don't want to lose the execution state) we'll improve the training loop to use `Kino` to show us how it's doing.\n\n[Axon.Loop.handle](https://hexdocs.pm/axon/Axon.Loop.html#handle/4) gives us a hook into various points of the training loop. We'll can use it with the `:iteration_completed` event to get a copy of the state of the params after some number of completed iterations of the training loop. By using those params to render an image in the test set, we can get a live view of the autoencoder learning to reconstruct its inputs.\n\n```elixir\n# A helper function to display the input and output side by side\ncombined_input_output = fn params, image_index ->\n test_image = test_images[[images: image_index]] |> add_noise.()\n reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])\n Nx.concatenate([test_image, reconstructed_image], axis: :width)\nend\n\nNx.to_heatmap(combined_input_output.(params, 0))\n```\n\nIt'd also be nice to have a prettier version of the output. Let's convert the heatmap to a png to make that happen.\n\n```elixir\nimage_to_kino = fn image ->\n image\n |> Nx.multiply(255)\n |> Nx.as_type(:u8)\n |> Nx.transpose(axes: [:height, :width, :channels])\n |> StbImage.from_nx()\n |> StbImage.resize(200, 400)\n |> StbImage.to_binary(:png)\n |> Kino.Image.new(:png)\nend\n\nimage_to_kino.(combined_input_output.(params, 0))\n```\n\nMuch nicer!\n\nOnce again we'll use `Kino.Frame` for dynamically updating output:\n\n```elixir\nframe = Kino.Frame.new() |> Kino.render()\n\nrender_example_handler = fn state ->\n Kino.Frame.append(frame, \"Epoch: #{state.epoch}, Iteration: #{state.iteration}\")\n # state.step_state[:model_state] contains the model params when this event is fired\n params = state.step_state[:model_state]\n image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))\n image = combined_input_output.(params, image_index) |> image_to_kino.()\n Kino.Frame.append(frame, image)\n {:continue, state}\nend\n\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))\n |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)\n |> Axon.Loop.validate(model, test_data)\n |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)\n\n:ok\n```\n\nAwesome! We have a working denoising autoencoder that we can visualize getting better in 20 epochs!","ref":"mnist_autoencoder_using_kino.html#a-better-training-loop","title":"A better training loop - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"# Training an Autoencoder on Fashion MNIST\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:scidata, \"~> 0.1.9\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\n```","ref":"fashionmnist_autoencoder.html","title":"Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"An autoencoder is a deep learning model which consists of two parts: encoder and decoder. The encoder compresses high dimensional data into a low dimensional representation and feeds it to the decoder. The decoder tries to recreate the original data from the low dimensional representation.\nAutoencoders can be used in the following problems:\n\n* Dimensionality reduction\n* Noise reduction\n* Generative models\n* Data augmentation\n\nLet's walk through a basic autoencoder implementation in Axon to get a better understanding of how they work in practice.","ref":"fashionmnist_autoencoder.html#introduction","title":"Introduction - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"To train and test how our model works, we use one of the most popular data sets: [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist). It consists of small black and white images of clothes. Loading this data set is very simple with the help of `Scidata`.\n\n```elixir\n{image_data, _label_data} = Scidata.FashionMNIST.download()\n{bin, type, shape} = image_data\n```\n\nWe get the data in a raw format, but this is exactly the information we need to build an Nx tensor.\n\n```elixir\ntrain_images =\n bin\n |> Nx.from_binary(type)\n |> Nx.reshape(shape)\n |> Nx.divide(255.0)\n```\n\nWe also normalize pixel values into the range $[0, 1]$.\n\n\n\nWe can visualize one of the images by looking at the tensor heatmap:\n\n```elixir\nNx.to_heatmap(train_images[1])\n```","ref":"fashionmnist_autoencoder.html#downloading-the-data","title":"Downloading the data - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"First we need to define the encoder and decoder. Both are one-layer neural networks.\n\nIn the encoder, we start by flattening the input, so we get from shape `{batch_size, 1, 28, 28}` to `{batch_size, 784}` and we pass the input into a dense layer. Our dense layer has only `latent_dim` number of neurons. The `latent_dim` (or the latent space) is a compressed representation of data. Remember, we want our encoder to compress the input data into a lower-dimensional representation, so we choose a `latent_dim` which is less than the dimensionality of the input.\n\n```elixir\nencoder = fn x, latent_dim ->\n x\n |> Axon.flatten()\n |> Axon.dense(latent_dim, activation: :relu)\nend\n```\n\nNext, we pass the output of the encoder to the decoder and try to reconstruct the compressed data into its original form. Since our original input had a dimensionality of 784, we use a dense layer with 784 neurons. Because our original data was normalized to have pixel values between 0 and 1, we use a `:sigmoid` activation in our dense layer to squeeze output values between 0 and 1. Our original input shape was 28x28, so we use `Axon.reshape` to convert the flattened representation of the outputs into an image with correct the width and height.\n\n```elixir\ndecoder = fn x ->\n x\n |> Axon.dense(784, activation: :sigmoid)\n |> Axon.reshape({:batch, 1, 28, 28})\nend\n```\n\nIf we just bind the encoder and decoder sequentially, we'll get the desired model. This was pretty smooth, wasn't it?\n\n```elixir\nmodel =\n Axon.input(\"input\", shape: {nil, 1, 28, 28})\n |> encoder.(64)\n |> decoder.()\n```","ref":"fashionmnist_autoencoder.html#encoder-and-decoder","title":"Encoder and decoder - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"Finally, we can train the model. We'll use the `:adam` and `:mean_squared_error` loss with `Axon.Loop.trainer`. Our loss function will measure the aggregate error between pixels of original images and the model's reconstructed images. We'll also `:mean_absolute_error` using `Axon.Loop.metric`. `Axon.Loop.run` trains the model with the given training data.\n\n```elixir\nbatch_size = 32\nepochs = 5\n\nbatched_images = Nx.to_batched(train_images, batch_size)\ntrain_batches = Stream.zip(batched_images, batched_images)\n\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :adam)\n |> Axon.Loop.metric(:mean_absolute_error, \"Error\")\n |> Axon.Loop.run(train_batches, %{}, epochs: epochs, compiler: EXLA)\n```","ref":"fashionmnist_autoencoder.html#training-the-model","title":"Training the model - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"To better understand what is mean absolute error (MAE) and mean square error (MSE) let's go through an example.\n\n```elixir\n# Error definitions for a single sample\n\nmean_square_error = fn y_pred, y ->\n y_pred\n |> Nx.subtract(y)\n |> Nx.power(2)\n |> Nx.mean()\nend\n\nmean_absolute_error = fn y_pred, y ->\n y_pred\n |> Nx.subtract(y)\n |> Nx.abs()\n |> Nx.mean()\nend\n```\n\nWe will work with a sample image of a shoe, a slightly noised version of that image, and also an entirely different image from the dataset.\n\n```elixir\nshoe_image = train_images[0]\nnoised_shoe_image = Nx.add(shoe_image, Nx.random_normal(shoe_image, 0.0, 0.05))\nother_image = train_images[1]\n:ok\n```\n\nFor the same image both errors should be 0, because when we have two exact copies, there is no pixel difference.\n\n```elixir\n{\n mean_square_error.(shoe_image, shoe_image),\n mean_absolute_error.(shoe_image, shoe_image)\n}\n```\n\nNow the noised image:\n\n```elixir\n{\n mean_square_error.(shoe_image, noised_shoe_image),\n mean_absolute_error.(shoe_image, noised_shoe_image)\n}\n```\n\nAnd a different image:\n\n```elixir\n{\n mean_square_error.(shoe_image, other_image),\n mean_absolute_error.(shoe_image, other_image)\n}\n```\n\nAs we can see, the noised image has a non-zero MSE and MAE but is much smaller than the error of two completely different pictures. In other words, both of these error types measure the level of similarity between images. A small error implies decent prediction values. On the other hand, a large error value suggests poor quality of predictions.\n\nIf you look at our implementation of MAE and MSE, you will notice that they are very similar. MAE and MSE can also be called the $L_1$ and $L_2$ loss respectively for the $L_1$ and $L_2$ norm. The $L_2$ loss (MSE) is typically preferred because it's a smoother function whereas $L_1$ is often difficult to optimize with stochastic gradient descent (SGD).","ref":"fashionmnist_autoencoder.html#extra-losses","title":"Extra: losses - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"Now, let's see how our model is doing! We will compare a sample image before and after compression.\n\n```elixir\nsample_image = train_images[0..0//1]\ncompressed_image = Axon.predict(model, params, sample_image, compiler: EXLA)\n\nsample_image\n|> Nx.to_heatmap()\n|> IO.inspect(label: \"Original\")\n\ncompressed_image\n|> Nx.to_heatmap()\n|> IO.inspect(label: \"Compressed\")\n\n:ok\n```\n\nAs we can see, the generated image is similar to the input image. The only difference between them is the absence of a sign in the middle of the second shoe. The model treated the sign as noise and bled this into the plain shoe.","ref":"fashionmnist_autoencoder.html#inference","title":"Inference - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"# A Variational Autoencoder for MNIST\n\n```elixir\nMix.install([\n {:exla, \"~> 0.4.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:axon, \"~> 0.3.0\"},\n {:req, \"~> 0.3.1\"},\n {:kino, \"~> 0.7.0\"},\n {:scidata, \"~> 0.1.9\"},\n {:stb_image, \"~> 0.5.2\"},\n {:kino_vega_lite, \"~> 0.1.6\"},\n {:vega_lite, \"~> 0.1.6\"},\n {:table_rex, \"~> 3.1.1\"}\n])\n\nalias VegaLite, as: Vl\n\n# This speeds up all our `Nx` operations without having to use `defn`\nNx.global_default_backend(EXLA.Backend)\n\n:ok\n```","ref":"fashionmnist_vae.html","title":"A Variational Autoencoder for MNIST","type":"extras"},{"doc":"In this notebook, we'll be building a variational autoencoder (VAE). This will help demonstrate splitting up models, defining custom layers and loss functions, using multiple outputs, and a few additional Kino tricks for training models.\n\nThis notebook builds on the [denoising autoencoder example](mnist_autoencoder_using_kino.livemd) and turns the simple autoencoder into a variational one for the same dataset.","ref":"fashionmnist_vae.html#introduction","title":"Introduction - A Variational Autoencoder for MNIST","type":"extras"},{"doc":"This section will proceed without much explanation as most of it is extracted from [denoising autoencoder example](mnist_autoencoder_using_kino.livemd). If anything here doesn't make sense, take a look at that notebook for an explanation.\n\n```elixir\ndefmodule Data do\n @moduledoc \"\"\"\n A module to hold useful data processing utilities,\n mostly extracted from the previous notebook\n \"\"\"\n\n @doc \"\"\"\n Converts the given image into a `Kino.Image`.\n\n `image` must be a single channel `Nx` tensor with pixel values between 0 and 1.\n `height` and `width` are the output size in pixels\n \"\"\"\n def image_to_kino(image, height \\\\ 200, width \\\\ 200) do\n image\n |> Nx.multiply(255)\n |> Nx.as_type(:u8)\n |> Nx.transpose(axes: [:height, :width, :channels])\n |> StbImage.from_nx()\n |> StbImage.resize(height, width)\n |> StbImage.to_binary(:png)\n |> Kino.Image.new(:png)\n end\n\n @doc \"\"\"\n Converts image data from `Scidata.MNIST` into an `Nx` tensor and normalizes it.\n \"\"\"\n def preprocess_data(data) do\n {image_data, _labels} = data\n {images_binary, type, shape} = image_data\n\n images_binary\n |> Nx.from_binary(type)\n # Since pixels are organized row-wise, reshape into rows x columns\n |> Nx.reshape(shape, names: [:images, :channels, :height, :width])\n # Normalize the pixel values to be between 0 and 1\n |> Nx.divide(255)\n end\n\n @doc \"\"\"\n Converts a tensor of images into random batches of paired images for model training\n \"\"\"\n def prepare_training_data(images, batch_size) do\n Stream.flat_map([nil], fn nil ->\n images |> Nx.shuffle(axis: :images) |> Nx.to_batched(batch_size)\n end)\n |> Stream.map(fn batch -> {batch, batch} end)\n end\nend\n```\n\n```elixir\ntrain_images = Data.preprocess_data(Scidata.FashionMNIST.download())\ntest_images = Data.preprocess_data(Scidata.FashionMNIST.download_test())\n\nKino.render(train_images[[images: 0]] |> Data.image_to_kino())\nKino.render(test_images[[images: 0]] |> Data.image_to_kino())\n\n:ok\n```\n\nNow for our simple autoencoder model. We won't be using a denoising autoencoder here.\n\nNote that we're giving each of the layers a name - the reason for this will be apparent later.\n\nI'm also using a small custom layer to shift and scale the output of the sigmoid layer slightly so it can hit the 0 and 1 targets. I noticed the gradients tend to explode without this.\n\n```elixir\ndefmodule CustomLayer do\n import Nx.Defn\n\n def scaling_layer(%Axon{} = input, _opts \\\\ []) do\n Axon.layer(&scaling_layer_impl/2, [input])\n end\n\n defnp scaling_layer_impl(x, _opts \\\\ []) do\n x\n |> Nx.subtract(0.05)\n |> Nx.multiply(1.2)\n end\nend\n```\n\n```elixir\nmodel =\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu, name: \"encoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"encoder_layer_2\")\n |> Axon.dense(64, activation: :relu, name: \"encoder_layer_3\")\n # Bottleneck layer\n |> Axon.dense(10, activation: :relu, name: \"bottleneck_layer\")\n # The decoder\n |> Axon.dense(64, activation: :relu, name: \"decoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"decoder_layer_2\")\n |> Axon.dense(256, activation: :relu, name: \"decoder_layer_3\")\n |> Axon.dense(784, activation: :sigmoid, name: \"decoder_layer_4\")\n |> CustomLayer.scaling_layer()\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n\n# We can use Axon.Display to show us what each of the layers would look like\n# assuming we send in a batch of 4 images\nAxon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()\n```\n\n```elixir\nbatch_size = 128\n\ntrain_data = Data.prepare_training_data(train_images, 128)\ntest_data = Data.prepare_training_data(test_images, 128)\n\n{input_batch, target_batch} = Enum.at(train_data, 0)\nKino.render(input_batch[[images: 0]] |> Data.image_to_kino())\nKino.render(target_batch[[images: 0]] |> Data.image_to_kino())\n\n:ok\n```\n\nWhen training, it can be useful to stop execution early - either when you see it's failing and you don't want to waste time waiting for the remaining epochs to finish, or if it's good enough and you want to start experimenting with it.\n\nThe `kino_early_stop/1` function below is a handy handler to give us a `Kino.Control.button` that will stop the training loop when clicked.\n\nWe also have `plot_losses/1` function to visualize our train and validation losses using `VegaLite`.\n\n```elixir\ndefmodule KinoAxon do\n @doc \"\"\"\n Adds handler function which adds a frame with a \"stop\" button\n to the cell with the training loop.\n\n Clicking \"stop\" will halt the training loop.\n \"\"\"\n def kino_early_stop(loop) do\n frame = Kino.Frame.new() |> Kino.render()\n stop_button = Kino.Control.button(\"stop\")\n Kino.Frame.render(frame, stop_button)\n\n {:ok, button_agent} = Agent.start_link(fn -> nil end)\n\n stop_button\n |> Kino.Control.stream()\n |> Kino.listen(fn _event ->\n Agent.update(button_agent, fn _ -> :stop end)\n end)\n\n handler = fn state ->\n stop_state = Agent.get(button_agent, & &1)\n\n if stop_state == :stop do\n Agent.stop(button_agent)\n Kino.Frame.render(frame, \"stopped\")\n {:halt_loop, state}\n else\n {:continue, state}\n end\n end\n\n Axon.Loop.handle(loop, :iteration_completed, handler)\n end\n\n @doc \"\"\"\n Plots the training and validation losses using Kino and VegaLite.\n\n This *must* come after `Axon.Loop.validate`.\n \"\"\"\n def plot_losses(loop) do\n vl_widget =\n Vl.new(width: 600, height: 400)\n |> Vl.mark(:point, tooltip: true)\n |> Vl.encode_field(:x, \"epoch\", type: :ordinal)\n |> Vl.encode_field(:y, \"loss\", type: :quantitative)\n |> Vl.encode_field(:color, \"dataset\", type: :nominal)\n |> Kino.VegaLite.new()\n |> Kino.render()\n\n handler = fn state ->\n %Axon.Loop.State{metrics: metrics, epoch: epoch} = state\n loss = metrics[\"loss\"] |> Nx.to_number()\n val_loss = metrics[\"validation_loss\"] |> Nx.to_number()\n\n points = [\n %{epoch: epoch, loss: loss, dataset: \"train\"},\n %{epoch: epoch, loss: val_loss, dataset: \"validation\"}\n ]\n\n Kino.VegaLite.push_many(vl_widget, points)\n {:continue, state}\n end\n\n Axon.Loop.handle(loop, :epoch_completed, handler)\n end\nend\n```\n\n```elixir\n# A helper function to display the input and output side by side\ncombined_input_output = fn params, image_index ->\n test_image = test_images[[images: image_index]]\n reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])\n Nx.concatenate([test_image, reconstructed_image], axis: :width)\nend\n\nframe = Kino.Frame.new() |> Kino.render()\n\nrender_example_handler = fn state ->\n # state.step_state[:model_state] contains the model params when this event is fired\n params = state.step_state[:model_state]\n image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))\n image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)\n Kino.Frame.render(frame, image)\n Kino.Frame.append(frame, \"Epoch: #{state.epoch}, Iteration: #{state.iteration}\")\n {:continue, state}\nend\n\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))\n |> KinoAxon.kino_early_stop()\n |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)\n |> Axon.Loop.validate(model, test_data)\n |> KinoAxon.plot_losses()\n |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)\n\n:ok\n```\n\n","ref":"fashionmnist_vae.html#training-a-simple-autoencoder","title":"Training a simple autoencoder - A Variational Autoencoder for MNIST","type":"extras"},{"doc":"Cool! We now have the parameters for a trained, simple autoencoder. Our next step is to split up the model so we can use the encoder and decoder separately. By doing that, we'll be able to take an image and _encode_ it to get the model's compressed image representation (the latent vector). We can then manipulate the latent vector and run the manipulated latent vector through the _decoder_ to get a new image.\n\nLet's start by defining the encoder and decoder separately as two different models.\n\n```elixir\nencoder =\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu, name: \"encoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"encoder_layer_2\")\n |> Axon.dense(64, activation: :relu, name: \"encoder_layer_3\")\n # Bottleneck layer\n |> Axon.dense(10, activation: :relu, name: \"bottleneck_layer\")\n\n# The output from the encoder\ndecoder =\n Axon.input(\"latent\", shape: {nil, 10})\n # The decoder\n |> Axon.dense(64, activation: :relu, name: \"decoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"decoder_layer_2\")\n |> Axon.dense(256, activation: :relu, name: \"decoder_layer_3\")\n |> Axon.dense(784, activation: :sigmoid, name: \"decoder_layer_4\")\n |> CustomLayer.scaling_layer()\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n\nAxon.Display.as_table(encoder, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()\nAxon.Display.as_table(decoder, Nx.template({4, 10}, :f32)) |> IO.puts()\n```\n\nWe have the two models, but the problem is these are untrained models so we don't have the corresponding set of parameters. We'd like to use the parameters from the autoencoder we just trained and apply them to our split up models.\n\nLet's first take a look at what params actually are:\n\n```elixir\nparams\n```\n\nParams are just a `Map` with the layer name as the key identifying which parameters to use. We can easily match up the layer names with the output from the `Axon.Display.as_table/2` call for the autoencoder model.\n\nSo all we need to do is create a new Map that plucks out the right layers from our autoencoder `params` for each model and use that to run inference on our split up models.\n\nFortunately, since we gave each of the layers names, this requires no work at all - we can use the Map as it is since the layer names match up! Axon will ignore any extra keys so those won't be a problem.\n\nNote that naming the layers wasn't _required_, if the layers didn't have names we would have some renaming to do to get the names to match between the models. But giving them names made it very convenient :)\n\nLet's try encoding an image, printing the latent and then decoding the latent using our split up model to make sure it's working.\n\n```elixir\nimage = test_images[[images: 0]]\n\n# Encode the image\nlatent = Axon.predict(encoder, params, image)\nIO.inspect(latent, label: \"Latent\")\n# Decode the image\nreconstructed_image = Axon.predict(decoder, params, latent) |> Nx.squeeze(axes: [0])\n\ncombined_image = Nx.concatenate([image, reconstructed_image], axis: :width)\nData.image_to_kino(combined_image, 200, 400)\n```\n\nPerfect! Seems like the split up models are working as expected. Now let's try to generate some new images using our autoencoder. To do this, we'll manipulate the latent so it's slightly different from what the encoder gave us. Specifically, we'll try to interpolate between two images, showing 100 steps from our starting image to our final image.\n\n```elixir\nnum_steps = 100\n\n# Get our latents, image at index 0 is our starting point\n# index 1 is where we'll end\nlatents = Axon.predict(encoder, params, test_images[[images: 0..1]])\n# Latents is a {2, 10} tensor\n# The step we'll add to our latent to move it towards image[1]\nstep = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)\n# We can make a batch of all our new latents\nnew_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])\n\nreconstructed_images = Axon.predict(decoder, params, new_latents)\n\nreconstructed_images =\n Nx.reshape(\n reconstructed_images,\n Nx.shape(reconstructed_images),\n names: [:images, :channels, :height, :width]\n )\n\nStream.interval(div(5000, num_steps))\n|> Stream.take(num_steps + 1)\n|> Kino.animate(fn i ->\n Data.image_to_kino(reconstructed_images[i])\nend)\n```\n\nCool! We have interpolation! But did you notice that some of the intermediate frames don't look fashionable at all? Autoencoders don't generally return good results for random vectors in their latent space. That's where a VAE can help.\n\n","ref":"fashionmnist_vae.html#splitting-up-the-model","title":"Splitting up the model - A Variational Autoencoder for MNIST","type":"extras"},{"doc":"In a VAE, instead of outputting a latent vector, our encoder will output a distribution. Essentially this means instead of 10 outputs we'll have 20. 10 of them will represent the mean and 10 will represent the log of the variance of the latent. We'll have to sample from this distribution to get our latent vector. Finally, we'll have to modify our loss function to also compute the KL Divergence between the latent distribution and a standard normal distribution (this acts as a regularizer of the latent space).\n\nWe'll start by defining our model:\n\n```elixir\ndefmodule Vae do\n import Nx.Defn\n\n @latent_features 10\n\n defp sampling_layer(%Axon{} = input, _opts \\\\ []) do\n Axon.layer(&sampling_layer_impl/2, [input], name: \"sampling_layer\", op_name: :sample)\n end\n\n defnp sampling_layer_impl(x, _opts \\\\ []) do\n mu = x[[0..-1//1, 0, 0..-1//1]]\n log_var = x[[0..-1//1, 1, 0..-1//1]]\n std_dev = Nx.exp(0.5 * log_var)\n eps = Nx.random_normal(std_dev)\n sample = mu + std_dev * eps\n Nx.stack([sample, mu, std_dev], axis: 1)\n end\n\n defp encoder_partial() do\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu, name: \"encoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"encoder_layer_2\")\n |> Axon.dense(64, activation: :relu, name: \"encoder_layer_3\")\n # Bottleneck layer\n |> Axon.dense(@latent_features * 2, name: \"bottleneck_layer\")\n # Split up the mu and logvar\n |> Axon.reshape({:auto, 2, @latent_features})\n |> sampling_layer()\n end\n\n def encoder() do\n encoder_partial()\n # Grab only the sample (ie. the sampled latent)\n |> Axon.nx(fn x -> x[[0..-1//1, 0]] end)\n end\n\n def decoder(input_latent) do\n input_latent\n |> Axon.dense(64, activation: :relu, name: \"decoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"decoder_layer_2\")\n |> Axon.dense(256, activation: :relu, name: \"decoder_layer_3\")\n |> Axon.dense(784, activation: :sigmoid, name: \"decoder_layer_4\")\n |> CustomLayer.scaling_layer()\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n end\n\n def autoencoder() do\n encoder_partial = encoder_partial()\n encoder = encoder()\n autoencoder = decoder(encoder)\n Axon.container(%{mu_sigma: encoder_partial, reconstruction: autoencoder})\n end\nend\n```\n\nThere's a few interesting things going on here. First, since our model has become more complex, we've used a module to keep it organized. We also built a custom layer to do the sampling and output the sampled latent vector as well as the distribution parameters (mu and sigma).\n\nFinally, we need the distribution itself so we can calculate the KL Divergence in our loss function. To make the model output the distribution parameters (mu and sigma), we use `Axon.container/1` to produce two outputs from our model instead of one. Now, instead of getting a tensor as an output, we'll get a map with the two tensors we need for our loss function.\n\nOur loss function also has to be modified so be the sum of the KL divergence and MSE. Here's our custom loss function:\n\n```elixir\ndefmodule CustomLoss do\n import Nx.Defn\n\n defn loss(y_true, %{reconstruction: reconstruction, mu_sigma: mu_sigma}) do\n mu = mu_sigma[[0..-1//1, 1, 0..-1//1]]\n sigma = mu_sigma[[0..-1//1, 2, 0..-1//1]]\n kld = Nx.sum(-Nx.log(sigma) - 0.5 + Nx.multiply(sigma, sigma) + Nx.multiply(mu, mu))\n kld * 0.1 + Axon.Losses.mean_squared_error(y_true, reconstruction, reduction: :sum)\n end\nend\n```\n\nWith all our pieces ready, we can pretty much use the same training loop as we did earlier. The only modifications needed are to account for the fact that the model outputs a map with two values instead of a single tensor and telling the trainer to use our custom loss.\n\n```elixir\nmodel = Vae.autoencoder()\n\n# A helper function to display the input and output side by side\ncombined_input_output = fn params, image_index ->\n test_image = test_images[[images: image_index]]\n %{reconstruction: reconstructed_image} = Axon.predict(model, params, test_image)\n reconstructed_image = reconstructed_image |> Nx.squeeze(axes: [0])\n Nx.concatenate([test_image, reconstructed_image], axis: :width)\nend\n\nframe = Kino.Frame.new() |> Kino.render()\n\nrender_example_handler = fn state ->\n # state.step_state[:model_state] contains the model params when this event is fired\n params = state.step_state[:model_state]\n image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))\n image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)\n Kino.Frame.render(frame, image)\n Kino.Frame.append(frame, \"Epoch: #{state.epoch}, Iteration: #{state.iteration}\")\n {:continue, state}\nend\n\nparams =\n model\n |> Axon.Loop.trainer(&CustomLoss.loss/2, Polaris.Optimizers.adam(learning_rate: 0.001))\n |> KinoAxon.kino_early_stop()\n |> Axon.Loop.handle(:epoch_completed, render_example_handler)\n |> Axon.Loop.validate(model, test_data)\n |> KinoAxon.plot_losses()\n |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)\n\n:ok\n```\n\nFinally, we can try our interpolation again:\n\n```elixir\nnum_steps = 100\n\n# Get our latents, image at index 0 is our starting point\n# index 1 is where we'll end\nlatents = Axon.predict(Vae.encoder(), params, test_images[[images: 0..1]])\n# Latents is a {2, 10} tensor\n# The step we'll add to our latent to move it towards image[1]\nstep = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)\n# We can make a batch of all our new latents\nnew_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])\n\ndecoder = Axon.input(\"latent\", shape: {nil, 10}) |> Vae.decoder()\n\nreconstructed_images = Axon.predict(decoder, params, new_latents)\n\nreconstructed_images =\n Nx.reshape(\n reconstructed_images,\n Nx.shape(reconstructed_images),\n names: [:images, :channels, :height, :width]\n )\n\nStream.interval(div(5000, num_steps))\n|> Stream.take(num_steps + 1)\n|> Kino.animate(fn i ->\n Data.image_to_kino(reconstructed_images[i])\nend)\n```\n\nDid you notice the difference? Every step in our interpolation looks similar to items in our dataset! This is the benefit of the VAE: we can generate new items by using random latents. In contrast, in the simple autoencoder, for the most part only latents we got from our encoder were likely to produce sensible outputs.","ref":"fashionmnist_vae.html#making-it-variational","title":"Making it variational - A Variational Autoencoder for MNIST","type":"extras"}]} \ No newline at end of file diff --git a/dist/search_data-BDD49AAD.js b/dist/search_data-BDD49AAD.js new file mode 100644 index 00000000..214f2191 --- /dev/null +++ b/dist/search_data-BDD49AAD.js @@ -0,0 +1 @@ +searchData={"content_type":"text/markdown","items":[{"doc":"Model State Data Structure.\n\nThis data structure represents all the state needed for\na model to perform inference.","ref":"Axon.ModelState.html","title":"Axon.ModelState","type":"module"},{"doc":"Returns an empty model state.","ref":"Axon.ModelState.html#empty/0","title":"Axon.ModelState.empty/0","type":"function"},{"doc":"Freezes parameters and state in the given model state\nusing the given mask.\n\nThe mask is an arity 1 function which takes the access path to the\nleaf parameter and returns `true` if the parameter should be frozen\nor `false` otherwise. With this, you can construct flexible masking\npolicies:\n\n fn\n [\"dense_\" <> n, \"kernel\"] -> String.to_integer(n) < 3\n _ -> false\n end\n\nThe default mask returns `true` for all paths, and is equivalent to\nfreezing the entire model.","ref":"Axon.ModelState.html#freeze/2","title":"Axon.ModelState.freeze/2","type":"function"},{"doc":"Returns the frozen parameters in the given model state.","ref":"Axon.ModelState.html#frozen_parameters/1","title":"Axon.ModelState.frozen_parameters/1","type":"function"},{"doc":"Returns the frozen state in the given model state.","ref":"Axon.ModelState.html#frozen_state/1","title":"Axon.ModelState.frozen_state/1","type":"function"},{"doc":"Returns a new model state struct from the given parameter\nmap.","ref":"Axon.ModelState.html#new/1","title":"Axon.ModelState.new/1","type":"function"},{"doc":"Returns the trainable parameters in the given model state.","ref":"Axon.ModelState.html#trainable_parameters/1","title":"Axon.ModelState.trainable_parameters/1","type":"function"},{"doc":"Returns the trainable state in the given model state.","ref":"Axon.ModelState.html#trainable_state/1","title":"Axon.ModelState.trainable_state/1","type":"function"},{"doc":"Unfreezes parameters and state in the given model state\nusing the given mask.\n\nThe mask is an arity 1 function which takes the access path to the\nleaf parameter and returns `true` if the parameter should be unfrozen\nor `false` otherwise. With this, you can construct flexible masking\npolicies:\n\n fn\n [\"dense_\" <> n, \"kernel\"] -> n < 3\n _ -> false\n end\n\nThe default mask returns `true` for all paths, and is equivalent to\nunfreezing the entire model.","ref":"Axon.ModelState.html#unfreeze/2","title":"Axon.ModelState.unfreeze/2","type":"function"},{"doc":"Updates the given model state.","ref":"Axon.ModelState.html#update/3","title":"Axon.ModelState.update/3","type":"function"},{"doc":"A high-level interface for creating neural network models.\n\nAxon is built entirely on top of Nx numerical definitions,\nso every neural network can be JIT or AOT compiled using\nany Nx compiler, or even transformed into high-level neural\nnetwork formats like TensorFlow Lite and\n[ONNX](https://github.com/elixir-nx/axon_onnx).\n\nFor a more in-depth overview of Axon, refer to the [Guides](guides.html).","ref":"Axon.html","title":"Axon","type":"module"},{"doc":"All Axon models start with an input layer, optionally specifying\nthe expected shape of the input data:\n\n input = Axon.input(\"input\", shape: {nil, 784})\n\nNotice you can specify some dimensions as `nil`, indicating\nthat the dimension size will be filled in at model runtime.\nYou can then compose inputs with other layers:\n\n model =\n input\n |> Axon.dense(128, activation: :relu)\n |> Axon.batch_norm()\n |> Axon.dropout(rate: 0.8)\n |> Axon.dense(64)\n |> Axon.tanh()\n |> Axon.dense(10)\n |> Axon.activation(:softmax)\n\nYou can inspect the model for a nice summary:\n\n IO.inspect(model)\n\n #Axon \n\nOr use the `Axon.Display` module to see more in-depth summaries:\n\n Axon.Display.as_table(model, Nx.template({1, 784}, :f32)) |> IO.puts\n\n +----------------------------------------------------------------------------------------------------------------+\n | Model |\n +=======================================+=============+==============+===================+=======================+\n | Layer | Input Shape | Output Shape | Options | Parameters |\n +=======================================+=============+==============+===================+=======================+\n | input ( input ) | [] | {1, 784} | shape: {nil, 784} | |\n | | | | optional: false | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dense_0 ( dense[\"input\"] ) | [{1, 784}] | {1, 128} | | kernel: f32[784][128] |\n | | | | | bias: f32[128] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | relu_0 ( relu[\"dense_0\"] ) | [{1, 128}] | {1, 128} | | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | batch_norm_0 ( batch_norm[\"relu_0\"] ) | [{1, 128}] | {1, 128} | epsilon: 1.0e-5 | gamma: f32[128] |\n | | | | channel_index: 1 | beta: f32[128] |\n | | | | momentum: 0.1 | mean: f32[128] |\n | | | | | var: f32[128] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dropout_0 ( dropout[\"batch_norm_0\"] ) | [{1, 128}] | {1, 128} | rate: 0.8 | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dense_1 ( dense[\"dropout_0\"] ) | [{1, 128}] | {1, 64} | | kernel: f32[128][64] |\n | | | | | bias: f32[64] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | tanh_0 ( tanh[\"dense_1\"] ) | [{1, 64}] | {1, 64} | | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | dense_2 ( dense[\"tanh_0\"] ) | [{1, 64}] | {1, 10} | | kernel: f32[64][10] |\n | | | | | bias: f32[10] |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n | softmax_0 ( softmax[\"dense_2\"] ) | [{1, 10}] | {1, 10} | | |\n +---------------------------------------+-------------+--------------+-------------------+-----------------------+\n\n#","ref":"Axon.html#module-model-creation","title":"Model Creation - Axon","type":"module"},{"doc":"Creating a model with multiple inputs is as easy as declaring an\nadditional input in your Axon graph. Every input layer present in\nthe final Axon graph will be required to be passed as input at the\ntime of model execution.\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n\n # Both inputs will be used\n model1 = Axon.add(inp1, inp2)\n\n # Only inp2 will be used\n model2 = Axon.add(inp2, inp2)\n\nAxon graphs are immutable, which means composing and manipulating\nan Axon graph creates an entirely new graph. Additionally, layer\nnames are lazily generated at model execution time. To avoid\nnon-deterministic input orderings and names, Axon requires each\ninput to have a unique binary identifier. You can then reference\ninputs by name when passing to models at execution time:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n\n model1 = Axon.add(inp1, inp2)\n\n {init_fn, predict_fn} = Axon.build(model1)\n\n params1 = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})\n # Inputs are referenced by name\n predict_fn.(params1, %{\"input_0\" => x, \"input_1\" => y})\n\n#","ref":"Axon.html#module-multiple-inputs","title":"Multiple Inputs - Axon","type":"module"},{"doc":"Nx offers robust [container](https://hexdocs.pm/nx/Nx.Container.html) support\nwhich is extended to Axon. Axon allows you to wrap any valid Nx container\nin a layer. Containers are most commonly used to structure outputs:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n model = Axon.container(%{foo: inp1, bar: inp2})\n\nContainers can be arbitrarily nested:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n model = Axon.container({%{foo: {inp1, %{bar: inp2}}}})\n\nYou can even use custom structs which implement the container protocol:\n\n inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n inp2 = Axon.input(\"input_1\", shape: {nil, 1})\n model = Axon.container(%MyStruct{foo: inp1, bar: inp2})\n\n#","ref":"Axon.html#module-multiple-outputs","title":"Multiple Outputs - Axon","type":"module"},{"doc":"If you find that Axon's built-in layers are insufficient for your needs,\nyou can create your own using the custom layer API. All of Axon's built-in\nlayers (aside from special ones such as `input`, `constant`, and `container`)\nmake use of this same API.\n\nAxon layers are really just placeholders for Nx computations with trainable\nparameters and possibly state. To define a custom layer, you just need to\ndefine a `defn` implementation:\n\n defn my_layer(x, weight, _opts \\\\ []) do\n Nx.atan2(x, weight)\n end\n\nNotice the only stipulation is that your custom layer implementation must\naccept at least 1 input and a list of options. At execution time, every\nlayer will be passed a `:mode` option which can be used to control behavior\nat training and inference time.\n\nInputs to your custom layer can be either Axon graph inputs or trainable\nparameters. You can pass Axon graph inputs as-is to a custom layer. To\ndeclare trainable parameters, use `Axon.param/3`:\n\n weight = Axon.param(\"weight\", param_shape)\n\nTo create a custom layer, you \"wrap\" your implementation and inputs into\na layer using `Axon.layer`. You'll notice the API mirrors Elixir's `apply`:\n\n def atan2_layer(%Axon{} = input) do\n weight = Axon.param(\"weight\", param_shape)\n Axon.layer(&my_layer/3, [input, weight])\n end","ref":"Axon.html#module-custom-layers","title":"Custom Layers - Axon","type":"module"},{"doc":"Under the hood, Axon models are represented as Elixir structs. You\ncan initialize and apply models by building or compiling them with\n`Axon.build/2` or `Axon.compile/4` and then calling the produced\ninitialization and predict functions:\n\n {init_fn, predict_fn} = Axon.build(model)\n\n params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})\n predict_fn.(params, inputs)\n\nYou may either set the default JIT compiler or backend globally, or\npass a specific compiler to `Axon.build/2`:\n\n EXLA.set_as_nx_default([:tpu, :cuda, :rocm, :host])\n\n {init_fn, predict_fn} = Axon.build(model, compiler: EXLA, mode: :train)\n\n params = init_fn.(Nx.template({1, 1}, {:f, 32}), %{})\n predict_fn.(params, inputs)\n\n`predict_fn` by default runs in inference mode, which performs certain\noptimizations and removes layers such as dropout layers. If constructing\na training step using `Axon.predict/4` or `Axon.build/2`, be sure to specify\n`mode: :train`.","ref":"Axon.html#module-model-execution","title":"Model Execution - Axon","type":"module"},{"doc":"Combining the Axon model creation API with the optimization and training\nAPIs, you can create and train neural networks with ease:\n\n model =\n Axon.input(\"input_0\", shape: {nil, 784})\n |> Axon.dense(128, activation: :relu)\n |> Axon.layer_norm()\n |> Axon.dropout()\n |> Axon.dense(10, activation: :softmax)\n\n IO.inspect model\n\n model_state =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adamw(learning_rate: 0.005))\n |> Axon.Loop.run(train_data, epochs: 10, compiler: EXLA)\n\nSee `Polaris.Updates` and `Axon.Loop` for a more in-depth treatment of\nmodel optimization and model training.","ref":"Axon.html#module-model-training","title":"Model Training - Axon","type":"module"},{"doc":"When deploying an `Axon` model to production, you usually want to batch\nmultiple prediction requests and run the inference for all of them at\nonce. Conveniently, `Nx` already has an abstraction for this task in the\nform of `Nx.Serving`. Here's how you could define a serving for an `Axon`\nmodel:\n\n def build_serving() do\n # Configuration\n batch_size = 4\n defn_options = [compiler: EXLA]\n\n Nx.Serving.new(\n # This function runs on the serving startup\n fn ->\n # Build the Axon model and load params (usually from file)\n model = build_model()\n params = load_params()\n\n # Build the prediction defn function\n {_init_fun, predict_fun} = Axon.build(model)\n\n inputs_template = %{\"pixel_values\" => Nx.template({batch_size, 224, 224, 3}, :f32)}\n template_args = [Nx.to_template(params), inputs_template]\n\n # Compile the prediction function upfront for the configured batch_size\n predict_fun = Nx.Defn.compile(predict_fun, template_args, defn_options)\n\n # The returned function is called for every accumulated batch\n fn inputs ->\n inputs = Nx.Batch.pad(inputs, batch_size - inputs.size)\n predict_fun.(params, inputs)\n end\n end,\n batch_size: batch_size\n )\n end\n\nThen you would start the serving server as part of your application's\nsupervision tree:\n\n children = [\n ...,\n {Nx.Serving, serving: build_serving(), name: MyApp.Serving, batch_timeout: 100}\n ]\n\nWith that in place, you can now ask serving for predictions all across\nyour application (controllers, live views, async jobs, etc.). Having a\ntensor input you would do:\n\n inputs = %{\"pixel_values\" => ...}\n batch = Nx.Batch.concatenate([inputs])\n result = Nx.Serving.batched_run(MyApp.Serving, batch)\n\nUsually you also want to do pre/post-processing of the model input/output.\nYou could make those preparations directly before/after `Nx.Serving.batched_run/2`,\nhowever you can also make use of `Nx.Serving.client_preprocessing/2` and\n`Nx.Serving.client_postprocessing/2` to encapsulate that logic as part of\nthe serving.","ref":"Axon.html#module-using-with-nx-serving","title":"Using with `Nx.Serving` - Axon","type":"module"},{"doc":"Adds an activation layer to the network.\n\nActivation layers are element-wise functions typically called\nafter the output of another layer.","ref":"Axon.html#activation/3","title":"Axon.activation/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#activation/3-options","title":"Options - Axon.activation/3","type":"function"},{"doc":"Adds an Adaptive average pool layer to the network.\n\nSee `Axon.Layers.adaptive_avg_pool/2` for more details.","ref":"Axon.html#adaptive_avg_pool/2","title":"Axon.adaptive_avg_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:output_size` - layer output size.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#adaptive_avg_pool/2-options","title":"Options - Axon.adaptive_avg_pool/2","type":"function"},{"doc":"Adds an Adaptive power average pool layer to the network.\n\nSee `Axon.Layers.adaptive_lp_pool/2` for more details.","ref":"Axon.html#adaptive_lp_pool/2","title":"Axon.adaptive_lp_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:output_size` - layer output size.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#adaptive_lp_pool/2-options","title":"Options - Axon.adaptive_lp_pool/2","type":"function"},{"doc":"Adds an Adaptive max pool layer to the network.\n\nSee `Axon.Layers.adaptive_max_pool/2` for more details.","ref":"Axon.html#adaptive_max_pool/2","title":"Axon.adaptive_max_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:output_size` - layer output size.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#adaptive_max_pool/2-options","title":"Options - Axon.adaptive_max_pool/2","type":"function"},{"doc":"Adds a add layer to the network.\n\nThis layer performs an element-wise add operation\non input layers. All input layers must be capable of being\nbroadcast together.\n\nIf one shape has a static batch size, all other shapes must have a\nstatic batch size as well.","ref":"Axon.html#add/3","title":"Axon.add/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#add/3-options","title":"Options - Axon.add/3","type":"function"},{"doc":"Adds an Alpha dropout layer to the network.\n\nSee `Axon.Layers.alpha_dropout/2` for more details.","ref":"Axon.html#alpha_dropout/2","title":"Axon.alpha_dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#alpha_dropout/2-options","title":"Options - Axon.alpha_dropout/2","type":"function"},{"doc":"Attaches a hook to the given Axon model.\n\nHooks compile down to `Nx.Defn.Kernel.hook/3` and provide the same\nfunctionality for adding side-effecting operations to a compiled\nmodel. For example, you can use hooks to inspect intermediate activations,\nsend data to an external service, and more.\n\nHooks can be configured to be invoked on the following events:\n\n * `:initialize` - on model initialization.\n * `:pre_forward` - before layer forward pass is invoked.\n * `:forward` - after layer forward pass is invoked.\n * `:backward` - after layer backward pass is invoked.\n\nTo invoke a hook on every single event, you may pass `:all` to `on:`.\n\n Axon.input(\"input\", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :all)\n\nThe default event is `:forward`, assuming you want a hook invoked\non the layers forward pass.\n\nYou may configure hooks to run in one of only training or inference\nmode using the `:mode` option. The default mode is `:both` to be invoked\nduring both train and inference mode.\n\n Axon.input(\"input\", shape: {nil, 1}) |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)\n\nYou can also attach multiple hooks to a single layer. Hooks are invoked in\nthe order in which they are declared. If order is important, you should attach\nhooks in the order you want them to be executed:\n\n Axon.input(\"input\", shape: {nil, 1})\n # I will be executed first\n |> Axon.attach_hook(&IO.inspect/1)\n # I will be executed second\n |> Axon.attach_hook(fn _ -> IO.write(\"HERE\") end)\n\nHooks are executed at their point of attachment. You must insert hooks at each point\nyou want a hook to execute during model execution.\n\n Axon.input(\"input\", shape: {nil, 1})\n |> Axon.attach_hook(&IO.inspect/1)\n |> Axon.relu()\n |> Axon.attach_hook(&IO.inspect/1)","ref":"Axon.html#attach_hook/3","title":"Axon.attach_hook/3","type":"function"},{"doc":"Adds an Average pool layer to the network.\n\nSee `Axon.Layers.avg_pool/2` for more details.","ref":"Axon.html#avg_pool/2","title":"Axon.avg_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to size of kernel.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:dilations` - window dilations. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#avg_pool/2-options","title":"Options - Axon.avg_pool/2","type":"function"},{"doc":"Adds a Batch normalization layer to the network.\n\nSee `Axon.Layers.batch_norm/6` for more details.","ref":"Axon.html#batch_norm/2","title":"Axon.batch_norm/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term. Defaults to `1.0e-5`.","ref":"Axon.html#batch_norm/2-options","title":"Options - Axon.batch_norm/2","type":"function"},{"doc":"Adds a bias layer to the network.\n\nA bias layer simply adds a trainable bias to an input.","ref":"Axon.html#bias/2","title":"Axon.bias/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`.","ref":"Axon.html#bias/2-options","title":"Options - Axon.bias/2","type":"function"},{"doc":"Applies the given forward function bidirectionally and merges\nthe results with the given merge function.\n\nThis is most commonly used with RNNs to capture the dependencies\nof a sequence in both directions.","ref":"Axon.html#bidirectional/4","title":"Axon.bidirectional/4","type":"function"},{"doc":"* `axis` - Axis to reverse.","ref":"Axon.html#bidirectional/4-options","title":"Options - Axon.bidirectional/4","type":"function"},{"doc":"Adds a bilinear layer to the network.\n\nThe bilinear layer implements:\n\n output = activation(dot(dot(input1, kernel), input2) + bias)\n\nwhere `activation` is given by the `:activation` option and both\n`kernel` and `bias` are layer parameters. `units` specifies the\nnumber of output units.\n\nAll dimensions but the last of `input1` and `input2` must match. The\nbatch sizes of both inputs must also match or at least one must be `nil`.\nInferred output batch size coerces to the strictest input batch size.\n\nCompiles to `Axon.Layers.bilinear/5`.","ref":"Axon.html#bilinear/4","title":"Axon.bilinear/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`.\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#bilinear/4-options","title":"Options - Axon.bilinear/4","type":"function"},{"doc":"Returns a function which represents a self-contained re-usable block\nof operations in a neural network. All parameters in the block are\nshared between every usage of the block.\n\nThis returns an arity-1 function which accepts a list of inputs which\nare forwarded to `fun`. This is most often used in situations where\nyou wish to re-use parameters in a block:\n\n reused_dense = Axon.block(&Axon.dense(&1, 32))\n\nEverytime `reused_dense` is invoked, it re-uses the same parameters:\n\n input = Axon.input(\"features\")\n # unique parameters\n x1 = Axon.dense(input, 32)\n # unique parameters\n x2 = reused_dense.(x1)\n # parameters shared\n x3 = reused_dense.(x2)\n\nSubgraphs in blocks can be arbitrarily complex:\n\n reused_block = Axon.block(fn x ->\n x\n |> Axon.dense(32)\n |> Axon.dense(64)\n |> Axon.dense(32)\n end)\n\nBlocks can also have multiple inputs, you can invoke a block with multiple\ninputs by passing a list of arguments:\n\n reused_block = Axon.block(fn x, y, z ->\n x = Axon.dense(x, 32)\n y = Axon.dense(y, 32)\n z = Axon.dense(z, 32)\n\n Axon.add([x, y, z])\n end)\n\n # invoke with a list\n reused_block.([x, y, z])\n\nBlocks prefix subgraph parameters with their name and a dot. As with other\nAxon layers, if a name is not explicitly provided, one will be dynamically\ngenerated.","ref":"Axon.html#block/2","title":"Axon.block/2","type":"function"},{"doc":"Adds a blur pooling layer to the network.\n\nSee `Axon.Layers.blur_pool/2` for more details.","ref":"Axon.html#blur_pool/2","title":"Axon.blur_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#blur_pool/2-options","title":"Options - Axon.blur_pool/2","type":"function"},{"doc":"Builds the given model to `{init_fn, predict_fn}`.\n\nThe given functions can be either given as arguments to `Nx.Defn`\nfunctions or be invoked directly, to perform just-in-time compilation\nand execution. If you want to compile the model (instead of just-in-time)\nbased on a predefined initialization shape, see `compile/4`.\n\n## `init_fn`\n\nThe `init_fn` receives two arguments, the input template and\nan optional map with initial parameters for layers or namespaces:\n\n {init_fn, predict_fn} = Axon.build(model)\n init_fn.(Nx.template({1, 1}, {:f, 32}), %{\"dense_0\" => dense_params})\n\n## `predict_fn`\n\nThe `predict_fn` receives two arguments, the trained parameters\nand the actual inputs:\n\n {_init_fn, predict_fn} = Axon.build(model, opts)\n predict_fn.(params, input)","ref":"Axon.html#build/2","title":"Axon.build/2","type":"function"},{"doc":"* `:compiler` - the underlying `Nx.Defn` compiler to perform\n JIT compilation when the functions are invoked. If none is\n passed, it uses the default compiler configured in `Nx.Defn`;\n\n * `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`\n\n * `:mode` - one of `:inference` or `:train`. Forwarded to layers\n to control differences in compilation at training or inference time.\n Defaults to `:inference`\n\n * `:global_layer_options` - a keyword list of options passed to\n layers that accept said options\n\nAll other options are forwarded to the underlying JIT compiler.","ref":"Axon.html#build/2-options","title":"Options - Axon.build/2","type":"function"},{"doc":"Adds a Continuously-differentiable exponential linear unit activation layer to the network.\n\nSee `Axon.Activations.celu/1` for more details.","ref":"Axon.html#celu/2","title":"Axon.celu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#celu/2-options","title":"Options - Axon.celu/2","type":"function"},{"doc":"Compiles the given model to `{init_fn, predict_fn}`.\n\nThis function will compile a model specialized to the given\ninput shapes and types. This is useful for avoiding the overhead\nof long compilations at program runtime. You must provide template\ninputs which match the expected shapes and types of inputs at\nexecution time.\n\nThis function makes use of the built-in `Nx.Defn.compile/3`. Note\nthat passing inputs which differ in shape or type from the templates\nprovided to this function will result in a crash.","ref":"Axon.html#compile/4","title":"Axon.compile/4","type":"function"},{"doc":"It accepts the same options as `build/2`.","ref":"Axon.html#compile/4-options","title":"Options - Axon.compile/4","type":"function"},{"doc":"Adds a concatenate layer to the network.\n\nThis layer will concatenate inputs along the last\ndimension unless specified otherwise.","ref":"Axon.html#concatenate/3","title":"Axon.concatenate/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:axis` - concatenate axis. Defaults to `-1`.","ref":"Axon.html#concatenate/3-options","title":"Options - Axon.concatenate/3","type":"function"},{"doc":"Adds a conditional layer which conditionally executes\n`true_graph` or `false_graph` based on the condition `cond_fn`\nat runtime.\n\n`cond_fn` is an arity-1 function executed on the output of the\nparent graph. It must return a boolean scalar tensor (e.g. 1 or 0).\n\nThe shapes of `true_graph` and `false_graph` must be equal.","ref":"Axon.html#cond/5","title":"Axon.cond/5","type":"function"},{"doc":"Adds a constant layer to the network.\n\nConstant layers encapsulate Nx tensors in an Axon layer for ease\nof use with other Axon layers. They can be used interchangeably\nwith other Axon layers:\n\n inp = Axon.input(\"input\", shape: {nil, 32})\n my_constant = Axon.constant(Nx.iota({1, 32}))\n model = Axon.add(inp, my_constant)\n\nConstant layers will be cast according to the mixed precision policy.\nIf it's important for your constant to retain it's type during\nthe computation, you will need to set the mixed precision policy to\nignore constant layers.","ref":"Axon.html#constant/2","title":"Axon.constant/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#constant/2-options","title":"Options - Axon.constant/2","type":"function"},{"doc":"Adds a container layer to the network.\n\nIn certain cases you may want your model to have multiple\noutputs. In order to make this work, you must \"join\" the\noutputs into an Axon layer using this function for use in\ninitialization and inference later on.\n\nThe given container can be any valid Axon Nx container.","ref":"Axon.html#container/2","title":"Axon.container/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#container/2-options","title":"Options - Axon.container/2","type":"function"},{"doc":"iex> inp1 = Axon.input(\"input_0\", shape: {nil, 1})\n iex> inp2 = Axon.input(\"input_1\", shape: {nil, 2})\n iex> model = Axon.container(%{a: inp1, b: inp2})\n iex> %{a: a, b: b} = Axon.predict(model, Axon.ModelState.empty(), %{\n ...> \"input_0\" => Nx.tensor([[1.0]]),\n ...> \"input_1\" => Nx.tensor([[1.0, 2.0]])\n ...> })\n iex> a\n #Nx.Tensor \n iex> b\n #Nx.Tensor","ref":"Axon.html#container/2-examples","title":"Examples - Axon.container/2","type":"function"},{"doc":"Adds a convolution layer to the network.\n\nThe convolution layer implements a general dimensional\nconvolutional layer - which convolves a kernel over the input\nto produce an output.\n\nCompiles to `Axon.Layers.conv/4`.","ref":"Axon.html#conv/3","title":"Axon.conv/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:feature_group_size` - feature group size for convolution. Defaults\n to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#conv/3-options","title":"Options - Axon.conv/3","type":"function"},{"doc":"See `conv_lstm/3`.","ref":"Axon.html#conv_lstm/2","title":"Axon.conv_lstm/2","type":"function"},{"doc":"Adds a convolutional long short-term memory (LSTM) layer to the network\nwith a random initial hidden state.\n\nSee `conv_lstm/4` for more details.","ref":"Axon.html#conv_lstm/3","title":"Axon.conv_lstm/3","type":"function"},{"doc":"* `:recurrent_initializer` - initializer for hidden state. Defaults\n to `:orthogonal`.","ref":"Axon.html#conv_lstm/3-additional-options","title":"Additional options - Axon.conv_lstm/3","type":"function"},{"doc":"Adds a convolutional long short-term memory (LSTM) layer to the network\nwith the given initial hidden state..\n\nConvLSTMs apply `Axon.Layers.conv_lstm_cell/5` over an entire input\nsequence and return:\n\n {{new_cell, new_hidden}, output_sequence}\n\nYou can use the output state as the hidden state of another\nConvLSTM layer.","ref":"Axon.html#conv_lstm/4","title":"Axon.conv_lstm/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:padding` - convolutional padding. Defaults to `:same`.\n\n * `:kernel_size` - convolutional kernel size. Defaults to `1`.\n\n * `:strides` - convolutional strides. Defaults to `1`.\n\n * `:unroll` - `:dynamic` (loop preserving) or `:static` (compiled)\n unrolling of RNN.\n\n * `:kernel_initializer` - initializer for kernel weights. Defaults\n to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for bias weights. Defaults to\n `:zeros`.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#conv_lstm/4-options","title":"Options - Axon.conv_lstm/4","type":"function"},{"doc":"Adds a transposed convolution layer to the network.\n\nThe transposed convolution layer is sometimes referred to as a\nfractionally strided convolution or (incorrectly) as a deconvolution.\n\nCompiles to `Axon.Layers.conv_transpose/4`.","ref":"Axon.html#conv_transpose/3","title":"Axon.conv_transpose/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#conv_transpose/3-options","title":"Options - Axon.conv_transpose/3","type":"function"},{"doc":"Adds a dense layer to the network.\n\nThe dense layer implements:\n\n output = activation(dot(input, kernel) + bias)\n\nwhere `activation` is given by the `:activation` option and both\n`kernel` and `bias` are layer parameters. `units` specifies the\nnumber of output units.\n\nCompiles to `Axon.Layers.dense/4`.","ref":"Axon.html#dense/3","title":"Axon.dense/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`.\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#dense/3-options","title":"Options - Axon.dense/3","type":"function"},{"doc":"Adds a depthwise convolution layer to the network.\n\nThe depthwise convolution layer implements a general\ndimensional depthwise convolution - which is a convolution\nwhere the feature group size is equal to the number of\ninput channels.\n\nChannel multiplier grows the input channels by the given\nfactor. An input factor of 1 means the output channels\nare the same as the input channels.\n\nCompiles to `Axon.Layers.depthwise_conv/4`.","ref":"Axon.html#depthwise_conv/3","title":"Axon.depthwise_conv/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#depthwise_conv/3-options","title":"Options - Axon.depthwise_conv/3","type":"function"},{"doc":"Adds a Dropout layer to the network.\n\nSee `Axon.Layers.dropout/2` for more details.","ref":"Axon.html#dropout/2","title":"Axon.dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#dropout/2-options","title":"Options - Axon.dropout/2","type":"function"},{"doc":"Adds an Exponential linear unit activation layer to the network.\n\nSee `Axon.Activations.elu/1` for more details.","ref":"Axon.html#elu/2","title":"Axon.elu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#elu/2-options","title":"Options - Axon.elu/2","type":"function"},{"doc":"Adds an embedding layer to the network.\n\nAn embedding layer initializes a kernel of shape `{vocab_size, embedding_size}`\nwhich acts as a lookup table for sequences of discrete tokens (e.g. sentences).\nEmbeddings are typically used to obtain a dense representation of a sparse input\nspace.","ref":"Axon.html#embedding/4","title":"Axon.embedding/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights. Defaults\n to `:uniform`.","ref":"Axon.html#embedding/4-options","title":"Options - Axon.embedding/4","type":"function"},{"doc":"Adds an Exponential activation layer to the network.\n\nSee `Axon.Activations.exp/1` for more details.","ref":"Axon.html#exp/2","title":"Axon.exp/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#exp/2-options","title":"Options - Axon.exp/2","type":"function"},{"doc":"Adds a Feature alpha dropout layer to the network.\n\nSee `Axon.Layers.feature_alpha_dropout/2` for more details.","ref":"Axon.html#feature_alpha_dropout/2","title":"Axon.feature_alpha_dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#feature_alpha_dropout/2-options","title":"Options - Axon.feature_alpha_dropout/2","type":"function"},{"doc":"Adds a flatten layer to the network.\n\nThis layer will flatten all but the batch dimensions\nof the input into a single layer. Typically called to flatten\nthe output of a convolution for use with a dense layer.","ref":"Axon.html#flatten/2","title":"Axon.flatten/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#flatten/2-options","title":"Options - Axon.flatten/2","type":"function"},{"doc":"Freezes parameters returned from the given function or predicate.\n\n`fun` can be a predicate `:all`, `up: n`, or `down: n`. `:all`\nfreezes all parameters in the model, `up: n` freezes the first `n`\nlayers up (starting from output), and `down: n` freezes the first `n`\nlayers down (starting from input).\n\n`fun` may also be a predicate function which takes a parameter and\nreturns `true` if a parameter should be frozen or `false` otherwise.\n\nFreezing parameters is useful when performing transfer learning\nto leverage features learned from another problem in a new problem.\nFor example, it's common to combine the convolutional base from\nlarger models trained on ImageNet with fresh fully-connected classifiers.\nThe combined model is then trained on fresh data, with the convolutional\nbase frozen so as not to lose information. You can see this example\nin code here:\n\n cnn_base = get_pretrained_cnn_base()\n model =\n cnn_base\n |> Axon.freeze()\n |> Axon.flatten()\n |> Axon.dense(1024, activation: :relu)\n |> Axon.dropout()\n |> Axon.dense(1000, activation: :softmax)\n\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.005))\n |> Axon.Loop.run(data, epochs: 10)\n\nWhen compiled, frozen parameters are wrapped in `Nx.Defn.Kernel.stop_grad/1`,\nwhich zeros out the gradient with respect to the frozen parameter. Gradients\nof frozen parameters will return `0.0`, meaning they won't be changed during\nthe update process.","ref":"Axon.html#freeze/2","title":"Axon.freeze/2","type":"function"},{"doc":"Adds a Gaussian error linear unit activation layer to the network.\n\nSee `Axon.Activations.gelu/1` for more details.","ref":"Axon.html#gelu/2","title":"Axon.gelu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#gelu/2-options","title":"Options - Axon.gelu/2","type":"function"},{"doc":"Returns information about a model's inputs.","ref":"Axon.html#get_inputs/1","title":"Axon.get_inputs/1","type":"function"},{"doc":"Returns a map of model op counts for each unique operation\nin a model by their given `:op_name`.","ref":"Axon.html#get_op_counts/1","title":"Axon.get_op_counts/1","type":"function"},{"doc":"iex> model = Axon.input(\"input\", shape: {nil, 1}) |> Axon.dense(2)\n iex> Axon.get_op_counts(model)\n %{input: 1, dense: 1}\n\n iex> model = Axon.input(\"input\", shape: {nil, 1}) |> Axon.tanh() |> Axon.tanh()\n iex> Axon.get_op_counts(model)\n %{input: 1, tanh: 2}","ref":"Axon.html#get_op_counts/1-examples","title":"Examples - Axon.get_op_counts/1","type":"function"},{"doc":"Returns a node's immediate input options.\n\nNote that this does not take into account options of\nparent layers, only the option which belong to the\nimmediate layer.","ref":"Axon.html#get_options/1","title":"Axon.get_options/1","type":"function"},{"doc":"Returns a model's output shape from the given input\ntemplate.","ref":"Axon.html#get_output_shape/3","title":"Axon.get_output_shape/3","type":"function"},{"doc":"Returns a node's immediate parameters.\n\nNote this does not take into account parameters of\nparent layers - only the parameters which belong to\nthe immediate layer.","ref":"Axon.html#get_parameters/1","title":"Axon.get_parameters/1","type":"function"},{"doc":"Adds a Global average pool layer to the network.\n\nSee `Axon.Layers.global_avg_pool/2` for more details.\n\nTypically used to connect feature extractors such as those in convolutional\nneural networks to fully-connected models by reducing inputs along spatial\ndimensions to only feature and batch dimensions.","ref":"Axon.html#global_avg_pool/2","title":"Axon.global_avg_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:keep_axes` - option to keep reduced axes. If `true`, keeps reduced axes\n with a dimension size of 1.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#global_avg_pool/2-options","title":"Options - Axon.global_avg_pool/2","type":"function"},{"doc":"Adds a Global LP pool layer to the network.\n\nSee `Axon.Layers.global_lp_pool/2` for more details.\n\nTypically used to connect feature extractors such as those in convolutional\nneural networks to fully-connected models by reducing inputs along spatial\ndimensions to only feature and batch dimensions.","ref":"Axon.html#global_lp_pool/2","title":"Axon.global_lp_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:keep_axes` - option to keep reduced axes. If `true`, keeps reduced axes\n with a dimension size of 1.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#global_lp_pool/2-options","title":"Options - Axon.global_lp_pool/2","type":"function"},{"doc":"Adds a Global max pool layer to the network.\n\nSee `Axon.Layers.global_max_pool/2` for more details.\n\nTypically used to connect feature extractors such as those in convolutional\nneural networks to fully-connected models by reducing inputs along spatial\ndimensions to only feature and batch dimensions.","ref":"Axon.html#global_max_pool/2","title":"Axon.global_max_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:keep_axes` - option to keep reduced axes. If `true`, keeps reduced axes\n with a dimension size of 1.\n\n * `:channels` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#global_max_pool/2-options","title":"Options - Axon.global_max_pool/2","type":"function"},{"doc":"Adds a group normalization layer to the network.\n\nSee `Axon.Layers.group_norm/4` for more details.","ref":"Axon.html#group_norm/3","title":"Axon.group_norm/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term.","ref":"Axon.html#group_norm/3-options","title":"Options - Axon.group_norm/3","type":"function"},{"doc":"See `gru/3`.","ref":"Axon.html#gru/2","title":"Axon.gru/2","type":"function"},{"doc":"Adds a gated recurrent unit (GRU) layer to the network with\na random initial hidden state.\n\nSee `gru/4` for more details.","ref":"Axon.html#gru/3","title":"Axon.gru/3","type":"function"},{"doc":"* `:recurrent_initializer` - initializer for hidden state.\n Defaults to `:orthogonal`.","ref":"Axon.html#gru/3-additional-options","title":"Additional options - Axon.gru/3","type":"function"},{"doc":"Adds a gated recurrent unit (GRU) layer to the network with\nthe given initial hidden state.\n\nGRUs apply `Axon.Layers.gru_cell/7` over an entire input\nsequence and return:\n\n {{new_hidden}, output_sequence}\n\nYou can use the output state as the hidden state of another\nGRU layer.","ref":"Axon.html#gru/4","title":"Axon.gru/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:activation` - recurrent activation. Defaults to `:tanh`.\n\n * `:gate` - recurrent gate function. Defaults to `:sigmoid`.\n\n * `:unroll` - `:dynamic` (loop preserving) or `:static` (compiled)\n unrolling of RNN.\n\n * `:kernel_initializer` - initializer for kernel weights. Defaults\n to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for bias weights. Defaults to\n `:zeros`.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#gru/4-options","title":"Options - Axon.gru/4","type":"function"},{"doc":"Adds a Hard sigmoid activation layer to the network.\n\nSee `Axon.Activations.hard_sigmoid/1` for more details.","ref":"Axon.html#hard_sigmoid/2","title":"Axon.hard_sigmoid/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#hard_sigmoid/2-options","title":"Options - Axon.hard_sigmoid/2","type":"function"},{"doc":"Adds a Hard sigmoid weighted linear unit activation layer to the network.\n\nSee `Axon.Activations.hard_silu/1` for more details.","ref":"Axon.html#hard_silu/2","title":"Axon.hard_silu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#hard_silu/2-options","title":"Options - Axon.hard_silu/2","type":"function"},{"doc":"Adds a Hard hyperbolic tangent activation layer to the network.\n\nSee `Axon.Activations.hard_tanh/1` for more details.","ref":"Axon.html#hard_tanh/2","title":"Axon.hard_tanh/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#hard_tanh/2-options","title":"Options - Axon.hard_tanh/2","type":"function"},{"doc":"Adds an input layer to the network.\n\nInput layers specify a model's inputs. Input layers are\nalways the root layers of the neural network.\n\nYou must specify the input layers name, which will be used\nto uniquely identify it in the case of multiple inputs.","ref":"Axon.html#input/2","title":"Axon.input/2","type":"function"},{"doc":"* `:shape` - the expected input shape, use `nil` for dimensions\n of a dynamic size.\n\n * `:optional` - if `true`, the input may be omitted when using\n the model. This needs to be handled in one of the subsequent\n layers. See `optional/2` for more details.","ref":"Axon.html#input/2-options","title":"Options - Axon.input/2","type":"function"},{"doc":"Adds an Instance normalization layer to the network.\n\nSee `Axon.Layers.instance_norm/6` for more details.","ref":"Axon.html#instance_norm/2","title":"Axon.instance_norm/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term. Defaults to `1.0e-5`.","ref":"Axon.html#instance_norm/2-options","title":"Options - Axon.instance_norm/2","type":"function"},{"doc":"Custom Axon layer with given inputs.\n\nInputs may be other Axon layers or trainable parameters created\nwith `Axon.param`. At inference time, `op` will be applied with\ninputs in specified order and an additional `opts` parameter which\nspecifies inference options. All options passed to layer are forwarded\nto inference function except:\n\n * `:name` - layer name.\n\n * `:op_name` - layer operation for inspection and building parameter map.\n\n * `:mode` - if the layer should run only on `:inference` or `:train`. Defaults to `:both`\n\n * `:global_options` - a list of global option names that this layer\n supports. Global options passed to `build/2` will be forwarded to\n the layer, as long as they are declared\n\nNote this means your layer should not use these as input options,\nas they will always be dropped during inference compilation.\n\nAxon's compiler will additionally forward the following options to\nevery layer at inference time:\n\n * `:mode` - `:inference` or `:train`. To control layer behavior\n based on inference or train time.\n\n`op` is a function of the form:\n\n fun = fn input, weight, bias, _opts ->\n input * weight + bias\n end","ref":"Axon.html#layer/3","title":"Axon.layer/3","type":"function"},{"doc":"Adds a Layer normalization layer to the network.\n\nSee `Axon.Layers.layer_norm/4` for more details.","ref":"Axon.html#layer_norm/2","title":"Axon.layer_norm/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:gamma_initializer` - gamma parameter initializer. Defaults\n to `:glorot_uniform`.\n\n * `:beta_initializer` - beta parameter initializer. Defaults to\n `:zeros`.\n\n * `:channel_index` - input feature index used for calculating\n mean and variance. Defaults to `-1`.\n\n * `:epsilon` - numerical stability term.","ref":"Axon.html#layer_norm/2-options","title":"Options - Axon.layer_norm/2","type":"function"},{"doc":"Adds a Leaky rectified linear unit activation layer to the network.\n\nSee `Axon.Activations.leaky_relu/1` for more details.","ref":"Axon.html#leaky_relu/2","title":"Axon.leaky_relu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#leaky_relu/2-options","title":"Options - Axon.leaky_relu/2","type":"function"},{"doc":"Adds a Linear activation layer to the network.\n\nSee `Axon.Activations.linear/1` for more details.","ref":"Axon.html#linear/2","title":"Axon.linear/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#linear/2-options","title":"Options - Axon.linear/2","type":"function"},{"doc":"Adds a Log-sigmoid activation layer to the network.\n\nSee `Axon.Activations.log_sigmoid/1` for more details.","ref":"Axon.html#log_sigmoid/2","title":"Axon.log_sigmoid/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#log_sigmoid/2-options","title":"Options - Axon.log_sigmoid/2","type":"function"},{"doc":"Adds a Log-softmax activation layer to the network.\n\nSee `Axon.Activations.log_softmax/1` for more details.","ref":"Axon.html#log_softmax/2","title":"Axon.log_softmax/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#log_softmax/2-options","title":"Options - Axon.log_softmax/2","type":"function"},{"doc":"Adds a Log-sumexp activation layer to the network.\n\nSee `Axon.Activations.log_sumexp/1` for more details.","ref":"Axon.html#log_sumexp/2","title":"Axon.log_sumexp/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#log_sumexp/2-options","title":"Options - Axon.log_sumexp/2","type":"function"},{"doc":"Adds a Power average pool layer to the network.\n\nSee `Axon.Layers.lp_pool/2` for more details.","ref":"Axon.html#lp_pool/2","title":"Axon.lp_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to size of kernel.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:dilations` - window dilations. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#lp_pool/2-options","title":"Options - Axon.lp_pool/2","type":"function"},{"doc":"See `lstm/3`.","ref":"Axon.html#lstm/2","title":"Axon.lstm/2","type":"function"},{"doc":"Adds a long short-term memory (LSTM) layer to the network\nwith a random initial hidden state.\n\nSee `lstm/4` for more details.","ref":"Axon.html#lstm/3","title":"Axon.lstm/3","type":"function"},{"doc":"* `:recurrent_initializer` - initializer for hidden state.\n Defaults to `:orthogonal`.","ref":"Axon.html#lstm/3-additional-options","title":"Additional options - Axon.lstm/3","type":"function"},{"doc":"Adds a long short-term memory (LSTM) layer to the network\nwith the given initial hidden state.\n\nLSTMs apply `Axon.Layers.lstm_cell/7` over an entire input\nsequence and return:\n\n {output_sequence, {new_cell, new_hidden}}\n\nYou can use the output state as the hidden state of another\nLSTM layer.","ref":"Axon.html#lstm/4","title":"Axon.lstm/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:activation` - recurrent activation. Defaults to `:tanh`.\n\n * `:gate` - recurrent gate function. Defaults to `:sigmoid`.\n\n * `:unroll` - `:dynamic` (loop preserving) or `:static` (compiled)\n unrolling of RNN.\n\n * `:kernel_initializer` - initializer for kernel weights. Defaults\n to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for bias weights. Defaults to\n `:zeros`.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`.","ref":"Axon.html#lstm/4-options","title":"Options - Axon.lstm/4","type":"function"},{"doc":"Traverses graph nodes in order, applying `fun` to each\nnode exactly once to return a transformed node in its\nplace(s) in the graph.\n\nThis function maintains an internal cache which ensures\neach node is only visited and transformed exactly once.\n\n`fun` must accept an Axon node and return an Axon node.\n\nPlease note that modifying node lineage (e.g. altering\na node's parent) will result in disconnected graphs.","ref":"Axon.html#map_nodes/2","title":"Axon.map_nodes/2","type":"function"},{"doc":"One common use of this function is to implement common\ninstrumentation between layers without needing to build\na new explicitly instrumented version of a model. For example,\nyou can use this function to visualize intermediate activations\nof all convolutional layers in a model:\n\n instrumented_model = Axon.map_nodes(model, fn\n %Axon.Node{op: :conv} = axon_node ->\n Axon.attach_hook(axon_node, &visualize_activations/1)\n\n axon_node ->\n axon_node\n end)\n\nAnother use case is to replace entire classes of layers\nwith another. For example, you may want to replace all\nrelu layers with tanh layers:\n\n new_model = Axon.map_nodes(model, fn\n %Axon{op: :relu} = graph ->\n # Get nodes immediate parent\n parent = Axon.get_parent(graph)\n # Replace node with a tanh\n Axon.tanh(parent)\n\n graph ->\n graph\n end)","ref":"Axon.html#map_nodes/2-examples","title":"Examples - Axon.map_nodes/2","type":"function"},{"doc":"Computes a sequence mask according to the given EOS token.\n\nMasks can be propagated to recurrent layers or custom layers to\nindicate that a given token should be ignored in processing. This\nis useful when you have sequences of variable length.\n\nMost commonly, `eos_token` is `0`.","ref":"Axon.html#mask/3","title":"Axon.mask/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#mask/3-options","title":"Options - Axon.mask/3","type":"function"},{"doc":"Adds a Max pool layer to the network.\n\nSee `Axon.Layers.max_pool/2` for more details.","ref":"Axon.html#max_pool/2","title":"Axon.max_pool/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to size of kernel.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:dilations` - window dilations. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#max_pool/2-options","title":"Options - Axon.max_pool/2","type":"function"},{"doc":"Adds a Mish activation layer to the network.\n\nSee `Axon.Activations.mish/1` for more details.","ref":"Axon.html#mish/2","title":"Axon.mish/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#mish/2-options","title":"Options - Axon.mish/2","type":"function"},{"doc":"Adds a multiply layer to the network.\n\nThis layer performs an element-wise multiply operation\non input layers. All input layers must be capable of being\nbroadcast together.\n\nIf one shape has a static batch size, all other shapes must have a\nstatic batch size as well.","ref":"Axon.html#multiply/3","title":"Axon.multiply/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#multiply/3-options","title":"Options - Axon.multiply/3","type":"function"},{"doc":"Applies the given `Nx` expression to the input.\n\nNx layers are meant for quick applications of functions without\ntrainable parameters. For example, they are useful for applying\nfunctions which apply accessors to containers:\n\n model = Axon.container({foo, bar})\n Axon.nx(model, &elem(&1, 0))","ref":"Axon.html#nx/3","title":"Axon.nx/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#nx/3-options","title":"Options - Axon.nx/3","type":"function"},{"doc":"Wraps an Axon model in an optional node.\n\nBy default, when an optional input is missing, all subsequent layers\nare nullified. For example, consider this model:\n\n values = Axon.input(\"values\")\n mask = Axon.input(\"mask\", optional: true)\n\n model =\n values\n |> Axon.dense(10)\n |> Axon.multiply(mask)\n |> Axon.dense(1)\n |> Axon.sigmoid()\n\nIn case the mask is not provided, the input node will resolve to\n`%Axon.None{}` and so will all the layers that depend on it. By\nusing `optional/2` a layer may opt-in to receive `%Axon.None{}`.\nTo fix our example, we could define a custom layer to apply the\nmask only when present\n\n def apply_optional_mask(%Axon{} = x, %Axon{} = mask) do\n Axon.layer(\n fn x, mask, _opts ->\n case mask do\n %Axon.None{} -> x\n mask -> Nx.multiply(x, mask)\n end\n end,\n [x, Axon.optional(mask)]\n )\n end\n\n # ...\n\n model =\n values\n |> Axon.dense(10)\n |> apply_optional_mask(mask)\n |> Axon.dense(1)\n |> Axon.sigmoid()","ref":"Axon.html#optional/2","title":"Axon.optional/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#optional/2-options","title":"Options - Axon.optional/2","type":"function"},{"doc":"Adds a pad layer to the network.\n\nThis layer will pad the spatial dimensions of the input.\nPadding configuration is a list of tuples for each spatial\ndimension.","ref":"Axon.html#pad/4","title":"Axon.pad/4","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:channels` - channel configuration. One of `:first` or\n `:last`. Defaults to `:last`.","ref":"Axon.html#pad/4-options","title":"Options - Axon.pad/4","type":"function"},{"doc":"Trainable Axon parameter used to create custom layers.\n\nParameters are specified in usages of `Axon.layer` and will\nbe automatically initialized and used in subsequent applications\nof Axon models.\n\nYou may specify the parameter shape as either a static shape or\nas function of the inputs to the given layer. If you specify the\nparameter shape as a function, it will be given the","ref":"Axon.html#param/3","title":"Axon.param/3","type":"function"},{"doc":"* `:initializer` - parameter initializer. Defaults to `:glorot_uniform`.","ref":"Axon.html#param/3-options","title":"Options - Axon.param/3","type":"function"},{"doc":"Pops the top node off of the graph.\n\nThis returns the popped node and the updated graph:\n\n {_node, model} = Axon.pop_node(model)","ref":"Axon.html#pop_node/1","title":"Axon.pop_node/1","type":"function"},{"doc":"Builds and runs the given Axon `model` with `params` and `input`.\n\nThis is equivalent to calling `build/2` and then invoking the\npredict function.","ref":"Axon.html#predict/4","title":"Axon.predict/4","type":"function"},{"doc":"* `:mode` - one of `:inference` or `:train`. Forwarded to layers\n to control differences in compilation at training or inference time.\n Defaults to `:inference`\n\n * `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`\n\nAll other options are forwarded to the default JIT compiler\nor backend.","ref":"Axon.html#predict/4-options","title":"Options - Axon.predict/4","type":"function"},{"doc":"Traverses graph nodes in order, applying `fun` to each\nnode exactly once to return a transformed node in its\nplace(s) in the graph.\n\nThis function maintains an internal cache which ensures\neach node is only visited and transformed exactly once.\n\n`fun` must accept an Axon node and accumulator and return\nan updated accumulator.","ref":"Axon.html#reduce_nodes/3","title":"Axon.reduce_nodes/3","type":"function"},{"doc":"Internally this function is used in several places to accumulate\ngraph metadata. For example, you can use it to count the number\nof a certain type of operation in the graph:\n\n Axon.reduce_nodes(model, 0, fn\n %Axon.Nodes{op: :relu}, acc -> acc + 1\n _, acc -> acc\n end)","ref":"Axon.html#reduce_nodes/3-examples","title":"Examples - Axon.reduce_nodes/3","type":"function"},{"doc":"Adds a Rectified linear unit 6 activation layer to the network.\n\nSee `Axon.Activations.relu6/1` for more details.","ref":"Axon.html#relu6/2","title":"Axon.relu6/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#relu6/2-options","title":"Options - Axon.relu6/2","type":"function"},{"doc":"Adds a Rectified linear unit activation layer to the network.\n\nSee `Axon.Activations.relu/1` for more details.","ref":"Axon.html#relu/2","title":"Axon.relu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#relu/2-options","title":"Options - Axon.relu/2","type":"function"},{"doc":"Adds a reshape layer to the network.\n\nThis layer implements a special case of `Nx.reshape` which accounts\nfor possible batch dimensions in the input tensor. You may pass the\nmagic dimension `:batch` as a placeholder for dynamic batch sizes.\nYou can use `:batch` seamlessly with `:auto` dimension sizes.\n\nIf the input is an Axon constant, the reshape behavior matches that of\n`Nx.reshape/2`.","ref":"Axon.html#reshape/3","title":"Axon.reshape/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#reshape/3-options","title":"Options - Axon.reshape/3","type":"function"},{"doc":"Adds a resize layer to the network.\n\nResizing can be used for interpolation or upsampling input\nvalues in a neural network. For example, you can use this\nlayer as an upsampling layer within a GAN.\n\nResize shape must be a tuple representing the resized spatial\ndimensions of the input tensor.\n\nCompiles to `Axon.Layers.resize/2`.","ref":"Axon.html#resize/3","title":"Axon.resize/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:method` - resize method. Defaults to `:nearest`.\n\n * `:antialias` - whether an anti-aliasing filter should be used\n when downsampling. Defaults to `true`.\n\n * `:channels` - channel configuration. One of `:first` or\n `:last`. Defaults to `:last`.","ref":"Axon.html#resize/3-options","title":"Options - Axon.resize/3","type":"function"},{"doc":"Adds a Scaled exponential linear unit activation layer to the network.\n\nSee `Axon.Activations.selu/1` for more details.","ref":"Axon.html#selu/2","title":"Axon.selu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#selu/2-options","title":"Options - Axon.selu/2","type":"function"},{"doc":"Adds a depthwise separable 2-dimensional convolution to the\nnetwork.\n\nDepthwise separable convolutions break the kernel into kernels\nfor each dimension of the input and perform a depthwise conv\nover the input with each kernel.\n\nCompiles to `Axon.Layers.separable_conv2d/6`.","ref":"Axon.html#separable_conv2d/3","title":"Axon.separable_conv2d/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#separable_conv2d/3-options","title":"Options - Axon.separable_conv2d/3","type":"function"},{"doc":"Adds a depthwise separable 3-dimensional convolution to the\nnetwork.\n\nDepthwise separable convolutions break the kernel into kernels\nfor each dimension of the input and perform a depthwise conv\nover the input with each kernel.\n\nCompiles to `Axon.Layers.separable_conv3d/8`.","ref":"Axon.html#separable_conv3d/3","title":"Axon.separable_conv3d/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:kernel_initializer` - initializer for `kernel` weights.\n Defaults to `:glorot_uniform`.\n\n * `:bias_initializer` - initializer for `bias` weights. Defaults\n to `:zeros`\n\n * `:activation` - element-wise activation function.\n\n * `:use_bias` - whether the layer should add bias to the output.\n Defaults to `true`\n\n * `:kernel_size` - size of the kernel spatial dimensions. Defaults\n to `1`.\n\n * `:strides` - stride during convolution. Defaults to `1`.\n\n * `:padding` - padding to the spatial dimensions of the input.\n Defaults to `:valid`.\n\n * `:input_dilation` - dilation to apply to input. Defaults to `1`.\n\n * `:kernel_dilation` - dilation to apply to kernel. Defaults to `1`.\n\n * `:channels` - channels location. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.html#separable_conv3d/3-options","title":"Options - Axon.separable_conv3d/3","type":"function"},{"doc":"Sets a node's immediate options to the given input\noptions.\n\nNote that this does not take into account options of\nparent layers, only the option which belong to the\nimmediate layer.\n\nNew options must be compatible with the given layer\nop. Adding unsupported options to an Axon layer will\nresult in an error at graph execution time.","ref":"Axon.html#set_options/2","title":"Axon.set_options/2","type":"function"},{"doc":"Sets a node's immediate parameters to the given\nparameters.\n\nNote this does not take into account parameters of\nparent layers - only the parameters which belong to\nthe immediate layer.\n\nThe new parameters must be compatible with the layer's\nold parameters.","ref":"Axon.html#set_parameters/2","title":"Axon.set_parameters/2","type":"function"},{"doc":"Adds a Sigmoid activation layer to the network.\n\nSee `Axon.Activations.sigmoid/1` for more details.","ref":"Axon.html#sigmoid/2","title":"Axon.sigmoid/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#sigmoid/2-options","title":"Options - Axon.sigmoid/2","type":"function"},{"doc":"Adds a Sigmoid weighted linear unit activation layer to the network.\n\nSee `Axon.Activations.silu/1` for more details.","ref":"Axon.html#silu/2","title":"Axon.silu/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#silu/2-options","title":"Options - Axon.silu/2","type":"function"},{"doc":"Adds a Softmax activation layer to the network.\n\nSee `Axon.Activations.softmax/1` for more details.","ref":"Axon.html#softmax/2","title":"Axon.softmax/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#softmax/2-options","title":"Options - Axon.softmax/2","type":"function"},{"doc":"Adds a Softplus activation layer to the network.\n\nSee `Axon.Activations.softplus/1` for more details.","ref":"Axon.html#softplus/2","title":"Axon.softplus/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#softplus/2-options","title":"Options - Axon.softplus/2","type":"function"},{"doc":"Adds a Softsign activation layer to the network.\n\nSee `Axon.Activations.softsign/1` for more details.","ref":"Axon.html#softsign/2","title":"Axon.softsign/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#softsign/2-options","title":"Options - Axon.softsign/2","type":"function"},{"doc":"Adds a Spatial dropout layer to the network.\n\nSee `Axon.Layers.spatial_dropout/2` for more details.","ref":"Axon.html#spatial_dropout/2","title":"Axon.spatial_dropout/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:rate` - dropout rate. Defaults to `0.5`.\n Needs to be equal or greater than zero and less than one.","ref":"Axon.html#spatial_dropout/2-options","title":"Options - Axon.spatial_dropout/2","type":"function"},{"doc":"Splits input graph into a container of `n` input graphs\nalong the given axis.","ref":"Axon.html#split/3","title":"Axon.split/3","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:axis` - concatenate axis. Defaults to `-1`.","ref":"Axon.html#split/3-options","title":"Options - Axon.split/3","type":"function"},{"doc":"Adds a stack columns layer to the network.\n\nA stack columns layer is designed to be used with `Nx.LazyContainer`\ndata structures like Explorer DataFrames. Given an input which is a\nDataFrame, `stack_columns/2` will stack the columns in each row to\ncreate a single vector.\n\nYou may optionally specify `:ignore` to ignore certain columns in\nthe container.","ref":"Axon.html#stack_columns/2","title":"Axon.stack_columns/2","type":"function"},{"doc":"* `:name` - layer name.\n\n * `:ignore` - keys to ignore when stacking.","ref":"Axon.html#stack_columns/2-options","title":"Options - Axon.stack_columns/2","type":"function"},{"doc":"Adds a subtract layer to the network.\n\nThis layer performs an element-wise subtract operation\non input layers. All input layers must be capable of being\nbroadcast together.\n\nIf one shape has a static batch size, all other shapes must have a\nstatic batch size as well.","ref":"Axon.html#subtract/3","title":"Axon.subtract/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#subtract/3-options","title":"Options - Axon.subtract/3","type":"function"},{"doc":"Adds a Hyperbolic tangent activation layer to the network.\n\nSee `Axon.Activations.tanh/1` for more details.","ref":"Axon.html#tanh/2","title":"Axon.tanh/2","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#tanh/2-options","title":"Options - Axon.tanh/2","type":"function"},{"doc":"Compiles and returns the given model's backward function\nexpression with respect to the given loss function.\n\nThe returned expression is an Nx expression which can be\ntraversed and lowered to an IR or inspected for debugging\npurposes.\n\nThe given loss function must be a scalar loss function which\nexpects inputs and targets with the same shapes as the model's\noutput shapes as determined by the model's signature.","ref":"Axon.html#trace_backward/5","title":"Axon.trace_backward/5","type":"function"},{"doc":"* `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`","ref":"Axon.html#trace_backward/5-options","title":"Options - Axon.trace_backward/5","type":"function"},{"doc":"Compiles and returns the given model's forward function\nexpression with the given options.\n\nThe returned expression is an Nx expression which can be\ntraversed and lowered to an IR or inspected for debugging\npurposes.","ref":"Axon.html#trace_forward/4","title":"Axon.trace_forward/4","type":"function"},{"doc":"* `:mode` - one of `:inference` or `:train`. Forwarded to layers\n to control differences in compilation at training or inference time.\n Defaults to `:inference`\n\n * `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`","ref":"Axon.html#trace_forward/4-options","title":"Options - Axon.trace_forward/4","type":"function"},{"doc":"Compiles and returns the given model's init function\nexpression with the given options.\n\nThe returned expression is an Nx expression which can be\ntraversed and lowered to an IR or inspected for debugging\npurposes.\n\nYou may optionally specify initial parameters for some layers or\nnamespaces by passing a partial parameter map:\n\n Axon.trace_init(model, %{\"dense_0\" => dense_params})\n\nThe parameter map will be merged with the initialized model\nparameters.","ref":"Axon.html#trace_init/4","title":"Axon.trace_init/4","type":"function"},{"doc":"* `:debug` - if `true`, will log graph traversal and generation\n metrics. Also forwarded to JIT if debug mode is available\n for your chosen compiler or backend. Defaults to `false`","ref":"Axon.html#trace_init/4-options","title":"Options - Axon.trace_init/4","type":"function"},{"doc":"Adds a transpose layer to the network.","ref":"Axon.html#transpose/3","title":"Axon.transpose/3","type":"function"},{"doc":"* `:name` - layer name.","ref":"Axon.html#transpose/3-options","title":"Options - Axon.transpose/3","type":"function"},{"doc":"Unfreezes parameters returned from the given function or predicate.\n\n`fun` can be a predicate `:all`, `up: n`, or `down: n`. `:all`\nfreezes all parameters in the model, `up: n` unfreezes the first `n`\nlayers up (starting from output), and `down: n` freezes the first `n`\nlayers down (starting from input).\n\n`fun` may also be a predicate function which takes a parameter and\nreturns `true` if a parameter should be unfrozen or `false` otherwise.\n\nUnfreezing parameters is useful when fine tuning a model which you\nhave previously frozen and performed transfer learning on. You may\nwant to unfreeze some of the later frozen layers in a model and\nfine tune them specifically for your application:\n\n cnn_base = get_pretrained_cnn_base()\n model =\n frozen_model\n |> Axon.unfreeze(up: 25)\n\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.0005))\n |> Axon.Loop.run(data, epochs: 10)\n\nWhen compiled, frozen parameters are wrapped in `Nx.Defn.Kernel.stop_grad/1`,\nwhich zeros out the gradient with respect to the frozen parameter. Gradients\nof frozen parameters will return `0.0`, meaning they won't be changed during\nthe update process.","ref":"Axon.html#unfreeze/2","title":"Axon.unfreeze/2","type":"function"},{"doc":"","ref":"Axon.html#t:t/0","title":"Axon.t/0","type":"type"},{"doc":"Parameter initializers.\n\nParameter initializers are used to initialize the weights\nand biases of a neural network. Because most deep learning\noptimization algorithms are iterative, they require an initial\npoint to iterate from.\n\nSometimes the initialization of a model can determine whether\nor not a model converges. In some cases, the initial point is\nunstable, and therefore the model has no chance of converging\nusing common first-order optimization methods. In cases where\nthe model will converge, initialization can have a significant\nimpact on how quickly the model converges.\n\nMost initialization strategies are built from intuition and\nheuristics rather than theory. It's commonly accepted that\nthe parameters of different layers should be different -\nmotivating the use of random initialization for each layer's\nparameters. Usually, only the weights of a layer are initialized\nusing a random distribution - while the biases are initialized\nto a uniform constant (like 0).\n\nMost initializers use Gaussian (normal) or uniform distributions\nwith variations on scale. The output scale of an initializer\nshould generally be large enough to avoid information loss but\nsmall enough to avoid exploding values. The initializers in\nthis module have a default scale known to work well with\nthe initialization strategy.\n\nThe functions in this module return initialization functions which\ntake shapes and types and return tensors:\n\n init_fn = Axon.Initializers.zeros()\n init_fn.({1, 2}, {:f, 32})\n\nYou may use these functions from within `defn` or outside.","ref":"Axon.Initializers.html","title":"Axon.Initializers","type":"module"},{"doc":"Initializes parameters to value.","ref":"Axon.Initializers.html#full/1","title":"Axon.Initializers.full/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.full(1.00)\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#full/1-examples","title":"Examples - Axon.Initializers.full/1","type":"function"},{"doc":"Initializes parameters with the Glorot normal initializer.\n\nThe Glorot normal initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_avg`\nand `distribution: :truncated_normal`.\n\nThe Glorot normal initializer is also called the Xavier\nnormal initializer.","ref":"Axon.Initializers.html#glorot_normal/1","title":"Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#glorot_normal/1-options","title":"Options - Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.glorot_normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.glorot_normal(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#glorot_normal/1-examples","title":"Examples - Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)","ref":"Axon.Initializers.html#glorot_normal/1-references","title":"References - Axon.Initializers.glorot_normal/1","type":"function"},{"doc":"Initializes parameters with the Glorot uniform initializer.\n\nThe Glorot uniform initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_avg`\nand `distribution: :uniform`.\n\nThe Glorot uniform initializer is also called the Xavier\nuniform initializer.","ref":"Axon.Initializers.html#glorot_uniform/1","title":"Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#glorot_uniform/1-options","title":"Options - Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.glorot_uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.glorot_uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#glorot_uniform/1-examples","title":"Examples - Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"* [Understanding the difficulty of training deep feedforward neural networks](http://proceedings.mlr.press/v9/glorot10a.html)","ref":"Axon.Initializers.html#glorot_uniform/1-references","title":"References - Axon.Initializers.glorot_uniform/1","type":"function"},{"doc":"Initializes parameters with the He normal initializer.\n\nThe He normal initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_in`\nand `distribution: :truncated_normal`.","ref":"Axon.Initializers.html#he_normal/1","title":"Axon.Initializers.he_normal/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `2.0`","ref":"Axon.Initializers.html#he_normal/1-options","title":"Options - Axon.Initializers.he_normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.he_normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.he_normal(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#he_normal/1-examples","title":"Examples - Axon.Initializers.he_normal/1","type":"function"},{"doc":"* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)","ref":"Axon.Initializers.html#he_normal/1-references","title":"References - Axon.Initializers.he_normal/1","type":"function"},{"doc":"Initializes parameters with the He uniform initializer.\n\nThe He uniform initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_ni`\nand `distribution: :uniform`.","ref":"Axon.Initializers.html#he_uniform/1","title":"Axon.Initializers.he_uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `2.0`","ref":"Axon.Initializers.html#he_uniform/1-options","title":"Options - Axon.Initializers.he_uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.he_uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.he_uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#he_uniform/1-examples","title":"Examples - Axon.Initializers.he_uniform/1","type":"function"},{"doc":"* [Delving Deep into Rectifiers: Surpassing Human-Level Performance on ImageNet Classification](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)","ref":"Axon.Initializers.html#he_uniform/1-references","title":"References - Axon.Initializers.he_uniform/1","type":"function"},{"doc":"Initializes parameters to an identity matrix.","ref":"Axon.Initializers.html#identity/0","title":"Axon.Initializers.identity/0","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.identity()\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#identity/0-examples","title":"Examples - Axon.Initializers.identity/0","type":"function"},{"doc":"Initializes parameters with the Lecun normal initializer.\n\nThe Lecun normal initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_in`\nand `distribution: :truncated_normal`.","ref":"Axon.Initializers.html#lecun_normal/1","title":"Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#lecun_normal/1-options","title":"Options - Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.lecun_normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.lecun_normal(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#lecun_normal/1-examples","title":"Examples - Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)","ref":"Axon.Initializers.html#lecun_normal/1-references","title":"References - Axon.Initializers.lecun_normal/1","type":"function"},{"doc":"Initializes parameters with the Lecun uniform initializer.\n\nThe Lecun uniform initializer is equivalent to calling\n`Axon.Initializers.variance_scaling` with `mode: :fan_in`\nand `distribution: :uniform`.","ref":"Axon.Initializers.html#lecun_uniform/1","title":"Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0`","ref":"Axon.Initializers.html#lecun_uniform/1-options","title":"Options - Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.lecun_uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.lecun_uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#lecun_uniform/1-examples","title":"Examples - Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"* [Efficient BackProp](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)","ref":"Axon.Initializers.html#lecun_uniform/1-references","title":"References - Axon.Initializers.lecun_uniform/1","type":"function"},{"doc":"Initializes parameters with a random normal distribution.","ref":"Axon.Initializers.html#normal/1","title":"Axon.Initializers.normal/1","type":"function"},{"doc":"* `:mean` - mean of the output distribution. Defaults to `0.0`\n * `:scale` - scale of the output distribution. Defaults to `1.0e-2`","ref":"Axon.Initializers.html#normal/1-options","title":"Options - Axon.Initializers.normal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.normal()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.normal(mean: 1.0, scale: 1.0)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#normal/1-examples","title":"Examples - Axon.Initializers.normal/1","type":"function"},{"doc":"Initializes parameters to 1.","ref":"Axon.Initializers.html#ones/0","title":"Axon.Initializers.ones/0","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.ones()\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#ones/0-examples","title":"Examples - Axon.Initializers.ones/0","type":"function"},{"doc":"Initializes a tensor with an orthogonal distribution.\n\nFor 2-D tensors, the initialization is generated through the QR decomposition of a random distribution\nFor tensors with more than 2 dimensions, a 2-D tensor with shape `{shape[0] * shape[1] * ... * shape[n-2], shape[n-1]}`\nis initialized and then reshaped accordingly.","ref":"Axon.Initializers.html#orthogonal/1","title":"Axon.Initializers.orthogonal/1","type":"function"},{"doc":"* `:distribution` - output distribution. One of [`:normal`, `:uniform`].\n Defaults to `:normal`","ref":"Axon.Initializers.html#orthogonal/1-options","title":"Options - Axon.Initializers.orthogonal/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.orthogonal()\n iex> t = init_fn.({3, 3}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.type(t)\n {:f, 32}\n iex> Nx.shape(t)\n {3, 3}\n\n iex> init_fn = Axon.Initializers.orthogonal()\n iex> t = init_fn.({1, 2, 3, 4}, {:f, 64}, Nx.Random.key(1))\n iex> Nx.type(t)\n {:f, 64}\n iex> Nx.shape(t)\n {1, 2, 3, 4}","ref":"Axon.Initializers.html#orthogonal/1-examples","title":"Examples - Axon.Initializers.orthogonal/1","type":"function"},{"doc":"Initializes parameters with a random uniform distribution.","ref":"Axon.Initializers.html#uniform/1","title":"Axon.Initializers.uniform/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0e-2`","ref":"Axon.Initializers.html#uniform/1-options","title":"Options - Axon.Initializers.uniform/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.uniform()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.uniform(scale: 1.0e-3)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}","ref":"Axon.Initializers.html#uniform/1-examples","title":"Examples - Axon.Initializers.uniform/1","type":"function"},{"doc":"Initializes parameters with variance scaling according to\nthe given distribution and mode.\n\nVariance scaling adapts scale to the weights of the output\ntensor.","ref":"Axon.Initializers.html#variance_scaling/1","title":"Axon.Initializers.variance_scaling/1","type":"function"},{"doc":"* `:scale` - scale of the output distribution. Defaults to `1.0e-2`\n * `:mode` - compute fan mode. One of `:fan_in`, `:fan_out`, or `:fan_avg`.\n Defaults to `:fan_in`\n * `:distribution` - output distribution. One of `:normal`, `:truncated_normal`,\n or `:uniform`. Defaults to `:normal`","ref":"Axon.Initializers.html#variance_scaling/1-options","title":"Options - Axon.Initializers.variance_scaling/1","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.variance_scaling()\n iex> t = init_fn.({2, 2}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:f, 32}\n\n iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :truncated_normal)\n iex> t = init_fn.({2, 2}, {:bf, 16}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {2, 2}\n iex> Nx.type(t)\n {:bf, 16}\n\n iex> init_fn = Axon.Initializers.variance_scaling(mode: :fan_out, distribution: :normal)\n iex> t = init_fn.({64, 3, 32, 32}, {:f, 32}, Nx.Random.key(1))\n iex> Nx.shape(t)\n {64, 3, 32, 32}\n iex> Nx.type(t)\n {:f, 32}","ref":"Axon.Initializers.html#variance_scaling/1-examples","title":"Examples - Axon.Initializers.variance_scaling/1","type":"function"},{"doc":"Initializes parameters to 0.","ref":"Axon.Initializers.html#zeros/0","title":"Axon.Initializers.zeros/0","type":"function"},{"doc":"iex> init_fn = Axon.Initializers.zeros()\n iex> out = init_fn.({2, 2}, {:f, 32})\n iex> out\n #Nx.Tensor","ref":"Axon.Initializers.html#zeros/0-examples","title":"Examples - Axon.Initializers.zeros/0","type":"function"},{"doc":"Utilities for creating mixed precision policies.\n\nMixed precision is useful for increasing model throughput at the possible\nprice of a small dip in accuracy. When creating a mixed precision policy,\nyou define the policy for `params`, `compute`, and `output`.\n\nThe `params` policy dictates what type parameters should be stored as\nduring training. The `compute` policy dictates what type should be used\nduring intermediate computations in the model's forward pass. The `output`\npolicy dictates what type the model should output.\n\nHere's an example of creating a mixed precision policy and applying it\nto a model:\n\n model =\n Axon.input(\"input\", shape: {nil, 784})\n |> Axon.dense(128, activation: :relu)\n |> Axon.batch_norm()\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(64, activation: :relu)\n |> Axon.batch_norm()\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(10, activation: :softmax)\n\n policy = Axon.MixedPrecision.create_policy(\n params: {:f, 32},\n compute: {:f, 16},\n output: {:f, 32}\n )\n\n mp_model =\n model\n |> Axon.MixedPrecision.apply_policy(policy, except: [:batch_norm])\n\nThe example above applies the mixed precision policy to every layer in\nthe model except Batch Normalization layers. The policy will cast parameters\nand inputs to `{:f, 16}` for intermediate computations in the model's forward\npass before casting the output back to `{:f, 32}`.","ref":"Axon.MixedPrecision.html","title":"Axon.MixedPrecision","type":"module"},{"doc":"Casts the given container according to the given policy\nand type.","ref":"Axon.MixedPrecision.html#cast/3","title":"Axon.MixedPrecision.cast/3","type":"function"},{"doc":"iex> policy = Axon.MixedPrecision.create_policy(params: {:f, 16})\n iex> params = %{\"dense\" => %{\"kernel\" => Nx.tensor([1.0, 2.0, 3.0])}}\n iex> params = Axon.MixedPrecision.cast(policy, params, :params)\n iex> Nx.type(params[\"dense\"][\"kernel\"])\n {:f, 16}\n\n iex> policy = Axon.MixedPrecision.create_policy(compute: {:bf, 16})\n iex> value = Nx.tensor([1.0, 2.0, 3.0])\n iex> value = Axon.MixedPrecision.cast(policy, value, :compute)\n iex> Nx.type(value)\n {:bf, 16}\n\n iex> policy = Axon.MixedPrecision.create_policy(output: {:bf, 16})\n iex> value = Nx.tensor([1.0, 2.0, 3.0])\n iex> value = Axon.MixedPrecision.cast(policy, value, :output)\n iex> Nx.type(value)\n {:bf, 16}\n\nNote that integers are never promoted to floats:\n\n iex> policy = Axon.MixedPrecision.create_policy(output: {:f, 16})\n iex> value = Nx.tensor([1, 2, 3], type: :s64)\n iex> value = Axon.MixedPrecision.cast(policy, value, :params)\n iex> Nx.type(value)\n {:s, 64}","ref":"Axon.MixedPrecision.html#cast/3-examples","title":"Examples - Axon.MixedPrecision.cast/3","type":"function"},{"doc":"Creates a mixed precision policy with the given options.","ref":"Axon.MixedPrecision.html#create_policy/1","title":"Axon.MixedPrecision.create_policy/1","type":"function"},{"doc":"* `params` - parameter precision policy. Defaults to `{:f, 32}`\n * `compute` - compute precision policy. Defaults to `{:f, 32}`\n * `output` - output precision policy. Defaults to `{:f, 32}`","ref":"Axon.MixedPrecision.html#create_policy/1-options","title":"Options - Axon.MixedPrecision.create_policy/1","type":"function"},{"doc":"iex> Axon.MixedPrecision.create_policy(params: {:f, 16}, output: {:f, 16})\n #Axon.MixedPrecision.Policy \n\n iex> Axon.MixedPrecision.create_policy(compute: {:bf, 16})\n #Axon.MixedPrecision.Policy","ref":"Axon.MixedPrecision.html#create_policy/1-examples","title":"Examples - Axon.MixedPrecision.create_policy/1","type":"function"},{"doc":"Represents a missing value of an optional node.\n\nSee `Axon.input/2` and `Axon.optional/2` for more details.","ref":"Axon.None.html","title":"Axon.None","type":"module"},{"doc":"Container for returning stateful outputs from Axon layers.\n\nSome layers, such as `Axon.batch_norm/2`, keep a running internal\nstate which is updated continuously at train time and used statically\nat inference time. In order for the Axon compiler to differentiate\nordinary layer outputs from internal state, you must mark output\nas stateful.\n\nStateful Outputs consist of two fields:\n\n :output - Actual layer output to be forwarded to next layer\n :state - Internal layer state to be tracked and updated\n\n`:output` is simply forwarded to the next layer. `:state` is aggregated\nwith other stateful outputs, and then is treated specially by internal\nAxon training functions such that update state parameters reflect returned\nvalues from stateful outputs.\n\n`:state` must be a map with keys that map directly to layer internal\nstate names. For example, `Axon.Layers.batch_norm` returns StatefulOutput\nwith `:state` keys of `\"mean\"` and `\"var\"`.","ref":"Axon.StatefulOutput.html","title":"Axon.StatefulOutput","type":"module"},{"doc":"Module for rendering various visual representations of Axon models.","ref":"Axon.Display.html","title":"Axon.Display","type":"module"},{"doc":"Traces execution of the given Axon model with the given\ninputs, rendering the execution flow as a mermaid flowchart.\n\nYou must include [kino](https://hex.pm/packages/kino) as\na dependency in your project to make use of this function.","ref":"Axon.Display.html#as_graph/3","title":"Axon.Display.as_graph/3","type":"function"},{"doc":"* `:direction` - defines the direction of the graph visual. The\n value can either be `:top_down` or `:left_right`. Defaults to `:top_down`.","ref":"Axon.Display.html#as_graph/3-options","title":"Options - Axon.Display.as_graph/3","type":"function"},{"doc":"Given an Axon model:\n\n model = Axon.input(\"input\") |> Axon.dense(32)\n\nYou can define input templates for each input:\n\n input = Nx.template({1, 16}, :f32)\n\nAnd then display the execution flow of the model:\n\n Axon.Display.as_graph(model, input, direction: :top_down)","ref":"Axon.Display.html#as_graph/3-examples","title":"Examples - Axon.Display.as_graph/3","type":"function"},{"doc":"Traces execution of the given Axon model with the given\ninputs, rendering the execution flow as a table.\n\nYou must include [table_rex](https://hex.pm/packages/table_rex) as\na dependency in your project to make use of this function.","ref":"Axon.Display.html#as_table/2","title":"Axon.Display.as_table/2","type":"function"},{"doc":"Given an Axon model:\n\n model = Axon.input(\"input\") |> Axon.dense(32)\n\nYou can define input templates for each input:\n\n input = Nx.template({1, 16}, :f32)\n\nAnd then display the execution flow of the model:\n\n Axon.Display.as_table(model, input)","ref":"Axon.Display.html#as_table/2-examples","title":"Examples - Axon.Display.as_table/2","type":"function"},{"doc":"Activation functions.\n\nActivation functions are element-wise, (typically) non-linear\nfunctions called on the output of another layer, such as\na dense layer:\n\n x\n |> dense(weight, bias)\n |> relu()\n\nActivation functions output the \"activation\" or how active\na given layer's neurons are in learning a representation\nof the data-generating distribution.\n\nSome activations are commonly used as output activations. For\nexample `softmax` is often used as the output in multiclass\nclassification problems because it returns a categorical\nprobability distribution:\n\n iex> Axon.Activations.softmax(Nx.tensor([[1, 2, 3]], type: {:f, 32}))\n #Nx.Tensor \n\nOther activations such as `tanh` or `sigmoid` are used because\nthey have desirable properties, such as keeping the output\ntensor constrained within a certain range.\n\nGenerally, the choice of activation function is arbitrary;\nalthough some activations work better than others in certain\nproblem domains. For example ReLU (rectified linear unit)\nactivation is a widely-accepted default. You can see\na list of activation functions and implementations\n[here](https://paperswithcode.com/methods/category/activation-functions).\n\nAll of the functions in this module are implemented as\nnumerical functions and can be JIT or AOT compiled with\nany supported `Nx` compiler.","ref":"Axon.Activations.html","title":"Axon.Activations","type":"module"},{"doc":"Continuously-differentiable exponential linear unit activation.\n\n$$f(x_i) = \\max(0, x_i) + \\min(0, \\alpha * e^{\\frac{x_i}{\\alpha}} - 1)$$","ref":"Axon.Activations.html#celu/2","title":"Axon.Activations.celu/2","type":"function"},{"doc":"* `alpha` - $\\alpha$ in CELU formulation. Must be non-zero.\n Defaults to `1.0`","ref":"Axon.Activations.html#celu/2-options","title":"Options - Axon.Activations.celu/2","type":"function"},{"doc":"iex> Axon.Activations.celu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))\n #Nx.Tensor \n\n iex> Axon.Activations.celu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))\n #Nx.Tensor \n\n#","ref":"Axon.Activations.html#celu/2-examples","title":"Examples - Axon.Activations.celu/2","type":"function"},{"doc":"iex> Axon.Activations.celu(Nx.tensor([0.0, 1.0, 2.0], type: {:f, 32}), alpha: 0.0)\n ** (ArgumentError) :alpha must be non-zero in CELU activation","ref":"Axon.Activations.html#celu/2-error-cases","title":"Error cases - Axon.Activations.celu/2","type":"function"},{"doc":"* [Continuously Differentiable Exponential Linear Units](https://arxiv.org/pdf/1704.07483.pdf)","ref":"Axon.Activations.html#celu/2-references","title":"References - Axon.Activations.celu/2","type":"function"},{"doc":"Exponential linear unit activation.\n\nEquivalent to `celu` for $\\alpha = 1$\n\n$$f(x_i) = \\begin{cases}x_i & x _i > 0 \\newline \\alpha * (e^{x_i} - 1) & x_i \\leq 0 \\\\ \\end{cases}$$","ref":"Axon.Activations.html#elu/2","title":"Axon.Activations.elu/2","type":"function"},{"doc":"* `alpha` - $\\alpha$ in ELU formulation. Defaults to `1.0`","ref":"Axon.Activations.html#elu/2-options","title":"Options - Axon.Activations.elu/2","type":"function"},{"doc":"iex> Axon.Activations.elu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))\n #Nx.Tensor \n\n iex> Axon.Activations.elu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}))\n #Nx.Tensor","ref":"Axon.Activations.html#elu/2-examples","title":"Examples - Axon.Activations.elu/2","type":"function"},{"doc":"* [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289)","ref":"Axon.Activations.html#elu/2-references","title":"References - Axon.Activations.elu/2","type":"function"},{"doc":"Exponential activation.\n\n$$f(x_i) = e^{x_i}$$","ref":"Axon.Activations.html#exp/1","title":"Axon.Activations.exp/1","type":"function"},{"doc":"iex> Axon.Activations.exp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.exp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#exp/1-examples","title":"Examples - Axon.Activations.exp/1","type":"function"},{"doc":"Gaussian error linear unit activation.\n\n$$f(x_i) = \\frac{x_i}{2}(1 + {erf}(\\frac{x_i}{\\sqrt{2}}))$$","ref":"Axon.Activations.html#gelu/1","title":"Axon.Activations.gelu/1","type":"function"},{"doc":"iex> Axon.Activations.gelu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.gelu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#gelu/1-examples","title":"Examples - Axon.Activations.gelu/1","type":"function"},{"doc":"* [Gaussian Error Linear Units (GELUs)](https://arxiv.org/abs/1606.08415)","ref":"Axon.Activations.html#gelu/1-references","title":"References - Axon.Activations.gelu/1","type":"function"},{"doc":"Hard sigmoid activation.","ref":"Axon.Activations.html#hard_sigmoid/2","title":"Axon.Activations.hard_sigmoid/2","type":"function"},{"doc":"iex> Axon.Activations.hard_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.hard_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#hard_sigmoid/2-examples","title":"Examples - Axon.Activations.hard_sigmoid/2","type":"function"},{"doc":"Hard sigmoid weighted linear unit activation.\n\n$$f(x_i) = \\begin{cases} 0 & x_i \\leq -3 \\newline\nx & x_i \\geq 3 \\newline\n\\frac{x_i^2}{6} + \\frac{x_i}{2} & otherwise \\end{cases}$$","ref":"Axon.Activations.html#hard_silu/2","title":"Axon.Activations.hard_silu/2","type":"function"},{"doc":"iex> Axon.Activations.hard_silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.hard_silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#hard_silu/2-examples","title":"Examples - Axon.Activations.hard_silu/2","type":"function"},{"doc":"Hard hyperbolic tangent activation.\n\n$$f(x_i) = \\begin{cases} 1 & x > 1 \\newline -1 & x < -1 \\newline x & otherwise \\end{cases}$$","ref":"Axon.Activations.html#hard_tanh/1","title":"Axon.Activations.hard_tanh/1","type":"function"},{"doc":"iex> Axon.Activations.hard_tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.hard_tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#hard_tanh/1-examples","title":"Examples - Axon.Activations.hard_tanh/1","type":"function"},{"doc":"Leaky rectified linear unit activation.\n\n$$f(x_i) = \\begin{cases} x & x \\geq 0 \\newline \\alpha * x & otherwise \\end{cases}$$","ref":"Axon.Activations.html#leaky_relu/2","title":"Axon.Activations.leaky_relu/2","type":"function"},{"doc":"* `:alpha` - $\\alpha$ in Leaky ReLU formulation. Defaults to `1.0e-2`","ref":"Axon.Activations.html#leaky_relu/2-options","title":"Options - Axon.Activations.leaky_relu/2","type":"function"},{"doc":"iex> Axon.Activations.leaky_relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]), alpha: 0.5)\n #Nx.Tensor \n\n iex> Axon.Activations.leaky_relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], names: [:batch, :data]), alpha: 0.5)\n #Nx.Tensor","ref":"Axon.Activations.html#leaky_relu/2-examples","title":"Examples - Axon.Activations.leaky_relu/2","type":"function"},{"doc":"Linear activation.\n\n$$f(x_i) = x_i$$","ref":"Axon.Activations.html#linear/1","title":"Axon.Activations.linear/1","type":"function"},{"doc":"iex> Axon.Activations.linear(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.linear(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#linear/1-examples","title":"Examples - Axon.Activations.linear/1","type":"function"},{"doc":"Log-sigmoid activation.\n\n$$f(x_i) = \\log(sigmoid(x))$$","ref":"Axon.Activations.html#log_sigmoid/1","title":"Axon.Activations.log_sigmoid/1","type":"function"},{"doc":"iex> Axon.Activations.log_sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.log_sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#log_sigmoid/1-examples","title":"Examples - Axon.Activations.log_sigmoid/1","type":"function"},{"doc":"Log-softmax activation.\n\n$$f(x_i) = -log( um{e^x_i})$$","ref":"Axon.Activations.html#log_softmax/2","title":"Axon.Activations.log_softmax/2","type":"function"},{"doc":"iex> Axon.Activations.log_softmax(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.log_softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#log_softmax/2-examples","title":"Examples - Axon.Activations.log_softmax/2","type":"function"},{"doc":"Logsumexp activation.\n\n$$\\log(sum e^x_i)$$","ref":"Axon.Activations.html#log_sumexp/2","title":"Axon.Activations.log_sumexp/2","type":"function"},{"doc":"iex> Axon.Activations.log_sumexp(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.log_sumexp(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#log_sumexp/2-examples","title":"Examples - Axon.Activations.log_sumexp/2","type":"function"},{"doc":"Mish activation.\n\n$$f(x_i) = x_i* \\tanh(\\log(1 + e^x_i))$$","ref":"Axon.Activations.html#mish/1","title":"Axon.Activations.mish/1","type":"function"},{"doc":"iex> Axon.Activations.mish(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], type: {:f, 32}, names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.mish(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#mish/1-examples","title":"Examples - Axon.Activations.mish/1","type":"function"},{"doc":"Rectified linear unit 6 activation.\n\n$$f(x_i) = \\min_i(\\max_i(x, 0), 6)$$","ref":"Axon.Activations.html#relu6/1","title":"Axon.Activations.relu6/1","type":"function"},{"doc":"iex> Axon.Activations.relu6(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]))\n #Nx.Tensor \n\n iex> Axon.Activations.relu6(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#relu6/1-examples","title":"Examples - Axon.Activations.relu6/1","type":"function"},{"doc":"* [MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications](https://arxiv.org/abs/1704.04861v1)","ref":"Axon.Activations.html#relu6/1-references","title":"References - Axon.Activations.relu6/1","type":"function"},{"doc":"Rectified linear unit activation.\n\n$$f(x_i) = \\max_i(x, 0)$$","ref":"Axon.Activations.html#relu/1","title":"Axon.Activations.relu/1","type":"function"},{"doc":"iex> Axon.Activations.relu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.relu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#relu/1-examples","title":"Examples - Axon.Activations.relu/1","type":"function"},{"doc":"Scaled exponential linear unit activation.\n\n$$f(x_i) = \\begin{cases} \\lambda x & x \\geq 0 \\newline\n\\lambda \\alpha(e^{x} - 1) & x < 0 \\end{cases}$$\n\n$$\\alpha \\approx 1.6733$$\n$$\\lambda \\approx 1.0507$$","ref":"Axon.Activations.html#selu/2","title":"Axon.Activations.selu/2","type":"function"},{"doc":"iex> Axon.Activations.selu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.selu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#selu/2-examples","title":"Examples - Axon.Activations.selu/2","type":"function"},{"doc":"* [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515v5)","ref":"Axon.Activations.html#selu/2-references","title":"References - Axon.Activations.selu/2","type":"function"},{"doc":"Sigmoid activation.\n\n$$f(x_i) = \\frac{1}{1 + e^{-x_i}}$$\n\n**Implementation Note: Sigmoid logits are cached as metadata\nin the expression and can be used in calculations later on.\nFor example, they are used in cross-entropy calculations for\nbetter stability.**","ref":"Axon.Activations.html#sigmoid/1","title":"Axon.Activations.sigmoid/1","type":"function"},{"doc":"iex> Axon.Activations.sigmoid(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.sigmoid(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#sigmoid/1-examples","title":"Examples - Axon.Activations.sigmoid/1","type":"function"},{"doc":"Sigmoid weighted linear unit activation.\n\n$$f(x_i) = x * sigmoid(x)$$","ref":"Axon.Activations.html#silu/1","title":"Axon.Activations.silu/1","type":"function"},{"doc":"iex> Axon.Activations.silu(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.silu(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#silu/1-examples","title":"Examples - Axon.Activations.silu/1","type":"function"},{"doc":"* [Sigmoid-Weighted Linear Units for Neural Network Function Approximation in Reinforcement Learning](https://arxiv.org/abs/1702.03118v3)","ref":"Axon.Activations.html#silu/1-references","title":"References - Axon.Activations.silu/1","type":"function"},{"doc":"Softmax activation along an axis.\n\n$$\\frac{e^{x_i}}{\\sum_i e^{x_i}}$$\n\n**Implementation Note: Softmax logits are cached as metadata\nin the expression and can be used in calculations later on.\nFor example, they are used in cross-entropy calculations for\nbetter stability.**","ref":"Axon.Activations.html#softmax/2","title":"Axon.Activations.softmax/2","type":"function"},{"doc":"* `:axis` - softmax axis along which to calculate distribution.\n Defaults to 1.","ref":"Axon.Activations.html#softmax/2-options","title":"Options - Axon.Activations.softmax/2","type":"function"},{"doc":"iex> Axon.Activations.softmax(Nx.tensor([[-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0]], names: [:batch, :data]))\n #Nx.Tensor \n\n iex> Axon.Activations.softmax(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#softmax/2-examples","title":"Examples - Axon.Activations.softmax/2","type":"function"},{"doc":"Softplus activation.\n\n$$\\log(1 + e^x_i)$$","ref":"Axon.Activations.html#softplus/1","title":"Axon.Activations.softplus/1","type":"function"},{"doc":"iex> Axon.Activations.softplus(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.softplus(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#softplus/1-examples","title":"Examples - Axon.Activations.softplus/1","type":"function"},{"doc":"Softsign activation.\n\n$$f(x_i) = \\frac{x_i}{|x_i| + 1}$$","ref":"Axon.Activations.html#softsign/1","title":"Axon.Activations.softsign/1","type":"function"},{"doc":"iex> Axon.Activations.softsign(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.softsign(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#softsign/1-examples","title":"Examples - Axon.Activations.softsign/1","type":"function"},{"doc":"Hyperbolic tangent activation.\n\n$$f(x_i) = \\tanh(x_i)$$","ref":"Axon.Activations.html#tanh/1","title":"Axon.Activations.tanh/1","type":"function"},{"doc":"iex> Axon.Activations.tanh(Nx.tensor([-3.0, -2.0, -1.0, 0.0, 1.0, 2.0, 3.0], names: [:data]))\n #Nx.Tensor \n\n iex> Axon.Activations.tanh(Nx.tensor([[-1.0, -2.0, -3.0], [1.0, 2.0, 3.0]], type: {:bf, 16}, names: [:batch, :data]))\n #Nx.Tensor","ref":"Axon.Activations.html#tanh/1-examples","title":"Examples - Axon.Activations.tanh/1","type":"function"},{"doc":"Functional implementations of common neural network layer\noperations.\n\nLayers are the building blocks of neural networks. These\nfunctional implementations can be used to express higher-level\nconstructs using fundamental building blocks. Neural network\nlayers are stateful with respect to their parameters.\nThese implementations do not assume the responsibility of\nmanaging state - instead opting to delegate this responsibility\nto the caller.\n\nBasic neural networks can be seen as a composition of functions:\n\n input\n |> dense(w1, b1)\n |> relu()\n |> dense(w2, b2)\n |> softmax()\n\nThese kinds of models are often referred to as deep feedforward networks\nor multilayer perceptrons (MLPs) because information flows forward\nthrough the network with no feedback connections. Mathematically,\na feedforward network can be represented as:\n\n $$f(x) = f^{(3)}(f^{(2)}(f^{(1)}(x)))$$\n\nYou can see a similar pattern emerge if we condense the call stack\nin the previous example:\n\n softmax(dense(relu(dense(input, w1, b1)), w2, b2))\n\nThe chain structure shown here is the most common structure used\nin neural networks. You can consider each function $f^{(n)}$ as a\n*layer* in the neural network - for example $f^{(2)} is the 2nd\nlayer in the network. The number of function calls in the\nstructure is the *depth* of the network. This is where the term\n*deep learning* comes from.\n\nNeural networks are often written as the mapping:\n\n $$y = f(x; \\theta)$$\n\nWhere $x$ is the input to the neural network and $\\theta$ are the\nset of learned parameters. In Elixir, you would write this:\n\n y = model(input, params)\n\nFrom the previous example, `params` would represent the collection:\n\n {w1, b1, w2, b2}\n\nwhere `w1` and `w2` are layer *kernels*, and `b1` and `b2` are layer\n*biases*.","ref":"Axon.Layers.html","title":"Axon.Layers","type":"module"},{"doc":"Functional implementation of general dimensional adaptive average\npooling.\n\nAdaptive pooling allows you to specify the desired output size\nof the transformed input. This will automatically adapt the\nwindow size and strides to obtain the desired output size. It\nwill then perform average pooling using the calculated window\nsize and strides.\n\nAdaptive pooling can be useful when working on multiple inputs with\ndifferent spatial input shapes. You can guarantee the output of\nan adaptive pooling operation is always the same size regardless\nof input shape.","ref":"Axon.Layers.html#adaptive_avg_pool/2","title":"Axon.Layers.adaptive_avg_pool/2","type":"function"},{"doc":"* `:output_size` - spatial output size. Must be a tuple with\n size equal to the spatial dimensions in the input tensor.\n Required.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#adaptive_avg_pool/2-options","title":"Options - Axon.Layers.adaptive_avg_pool/2","type":"function"},{"doc":"Functional implementation of general dimensional adaptive power\naverage pooling.\n\nComputes:\n\n $$f(X) = qrt[p]{ um_{x in X} x^{p}}$$\n\nAdaptive pooling allows you to specify the desired output size\nof the transformed input. This will automatically adapt the\nwindow size and strides to obtain the desired output size. It\nwill then perform max pooling using the calculated window\nsize and strides.\n\nAdaptive pooling can be useful when working on multiple inputs with\ndifferent spatial input shapes. You can guarantee the output of\nan adaptive pooling operation is always the same size regardless\nof input shape.","ref":"Axon.Layers.html#adaptive_lp_pool/2","title":"Axon.Layers.adaptive_lp_pool/2","type":"function"},{"doc":"* `:norm` - $p$ from above equation. Defaults to 2.\n\n * `:output_size` - spatial output size. Must be a tuple with\n size equal to the spatial dimensions in the input tensor.\n Required.","ref":"Axon.Layers.html#adaptive_lp_pool/2-options","title":"Options - Axon.Layers.adaptive_lp_pool/2","type":"function"},{"doc":"Functional implementation of general dimensional adaptive max\npooling.\n\nAdaptive pooling allows you to specify the desired output size\nof the transformed input. This will automatically adapt the\nwindow size and strides to obtain the desired output size. It\nwill then perform max pooling using the calculated window\nsize and strides.\n\nAdaptive pooling can be useful when working on multiple inputs with\ndifferent spatial input shapes. You can guarantee the output of\nan adaptive pooling operation is always the same size regardless\nof input shape.","ref":"Axon.Layers.html#adaptive_max_pool/2","title":"Axon.Layers.adaptive_max_pool/2","type":"function"},{"doc":"* `:output_size` - spatial output size. Must be a tuple with\n size equal to the spatial dimensions in the input tensor.\n Required.","ref":"Axon.Layers.html#adaptive_max_pool/2-options","title":"Options - Axon.Layers.adaptive_max_pool/2","type":"function"},{"doc":"Functional implementation of an alpha dropout layer.\n\nAlpha dropout is a type of dropout that forces the input\nto have zero mean and unit standard deviation. Randomly\nmasks some elements and scales to enforce self-normalization.","ref":"Axon.Layers.html#alpha_dropout/3","title":"Axon.Layers.alpha_dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#alpha_dropout/3-options","title":"Options - Axon.Layers.alpha_dropout/3","type":"function"},{"doc":"* [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)","ref":"Axon.Layers.html#alpha_dropout/3-references","title":"References - Axon.Layers.alpha_dropout/3","type":"function"},{"doc":"A general dimensional functional average pooling layer.\n\nPooling is applied to the spatial dimension of the input tensor.\nAverage pooling returns the average of all elements in valid\nwindows in the input tensor. It is often used after convolutional\nlayers to downsample the input even further.","ref":"Axon.Layers.html#avg_pool/2","title":"Axon.Layers.avg_pool/2","type":"function"},{"doc":"* `kernel_size` - window size. Rank must match spatial dimension\n of the input tensor. Required.\n\n * `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:window_dilations` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Can be scalar or list who's length matches the number of\n spatial dimensions in the input tensor. Defaults to `1` or no\n dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#avg_pool/2-options","title":"Options - Axon.Layers.avg_pool/2","type":"function"},{"doc":"Functional implementation of batch normalization.\n\nNormalizes the input by calculating mean and variance of the\ninput tensor along every dimension but the given `:channel_index`,\nand then scaling according to:\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. If `training?` is\ntrue, this method will compute a new mean and variance, and return\nthe updated `ra_mean` and `ra_var`. Otherwise, it will just compute\nbatch norm from the given ra_mean and ra_var.","ref":"Axon.Layers.html#batch_norm/6","title":"Axon.Layers.batch_norm/6","type":"function"},{"doc":"* `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes for mean and variance calculation.\n\n * `:momentum` - momentum to use for EMA update.\n\n * `:mode` - if `:train`, uses training mode batch norm. Defaults to `:inference`.","ref":"Axon.Layers.html#batch_norm/6-options","title":"Options - Axon.Layers.batch_norm/6","type":"function"},{"doc":"* [Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift](https://arxiv.org/abs/1502.03167)","ref":"Axon.Layers.html#batch_norm/6-references","title":"References - Axon.Layers.batch_norm/6","type":"function"},{"doc":"Functional implementation of a bilinear layer.\n\nBilinear transformation of the input such that:\n\n$$y = x_1^{T}Ax_2 + b$$","ref":"Axon.Layers.html#bilinear/5","title":"Axon.Layers.bilinear/5","type":"function"},{"doc":"* `input1` - `{batch_size, ..., input1_features}`\n * `input2` - `{batch_size, ..., input2_features}`\n * `kernel` - `{out_features, input1_features, input2_features}`","ref":"Axon.Layers.html#bilinear/5-parameter-shapes","title":"Parameter Shapes - Axon.Layers.bilinear/5","type":"function"},{"doc":"`{batch_size, ..., output_features}`","ref":"Axon.Layers.html#bilinear/5-output-shape","title":"Output Shape - Axon.Layers.bilinear/5","type":"function"},{"doc":"iex> inp1 = Nx.iota({3, 2}, type: {:f, 32})\n iex> inp2 = Nx.iota({3, 4}, type: {:f, 32})\n iex> kernel = Nx.iota({1, 2, 4}, type: {:f, 32})\n iex> bias = Nx.tensor(1.0)\n iex> Axon.Layers.bilinear(inp1, inp2, kernel, bias)\n #Nx.Tensor","ref":"Axon.Layers.html#bilinear/5-examples","title":"Examples - Axon.Layers.bilinear/5","type":"function"},{"doc":"Functional implementation of a 2-dimensional blur pooling layer.\n\nBlur pooling applies a spatial low-pass filter to the input. It is\noften applied before pooling and convolutional layers as a way to\nincrease model accuracy without much additional computation cost.\n\nThe blur pooling implementation follows from [MosaicML](https://github.com/mosaicml/composer/blob/dev/composer/algorithms/blurpool/blurpool_layers.py).","ref":"Axon.Layers.html#blur_pool/2","title":"Axon.Layers.blur_pool/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#celu/2","title":"Axon.Layers.celu/2","type":"function"},{"doc":"Functional implementation of a general dimensional convolutional\nlayer.\n\nConvolutional layers can be described as applying a convolution\nover an input signal composed of several input planes. Intuitively,\nthe input kernel slides `output_channels` number of filters over\nthe input tensor to extract features from the input tensor.\n\nConvolutional layers are most commonly used in computer vision,\nbut can also be useful when working with sequences and other input signals.","ref":"Axon.Layers.html#conv/4","title":"Axon.Layers.conv/4","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`\n * `kernel` - `{output_channels, input_channels, kernel_spatial0, ..., kernel_spatialN}`\n * `bias` - `{}` or `{output_channels}`","ref":"Axon.Layers.html#conv/4-parameter-shapes","title":"Parameter Shapes - Axon.Layers.conv/4","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#conv/4-options","title":"Options - Axon.Layers.conv/4","type":"function"},{"doc":"#","ref":"Axon.Layers.html#conv/4-examples","title":"Examples - Axon.Layers.conv/4","type":"function"},{"doc":"iex> input = Nx.tensor([[[0.1294, -0.6638, 1.0251]], [[ 0.9182, 1.1512, -1.6149]]], type: {:f, 32})\n iex> kernel = Nx.tensor([[[-1.5475, 1.2425]], [[0.1871, 0.5458]], [[-0.4488, 0.8879]]], type: {:f, 32})\n iex> bias = Nx.tensor([0.7791, 0.1676, 1.5971], type: {:f, 32})\n iex> Axon.Layers.conv(input, kernel, bias, channels: :first)\n #Nx.Tensor \n\n#","ref":"Axon.Layers.html#conv/4-one-dimensional-convolution","title":"One-dimensional convolution - Axon.Layers.conv/4","type":"function"},{"doc":"iex> input = Nx.tensor([[[[-1.0476, -0.5041], [-0.9336, 1.5907]]]], type: {:f, 32})\n iex> kernel = Nx.tensor([\n ...> [[[0.7514, 0.7356], [1.3909, 0.6800]]],\n ...> [[[-0.3450, 0.4551], [-0.6275, -0.9875]]],\n ...> [[[1.8587, 0.4722], [0.6058, -1.0301]]]\n ...> ], type: {:f, 32})\n iex> bias = Nx.tensor([1.9564, 0.2822, -0.5385], type: {:f, 32})\n iex> Axon.Layers.conv(input, kernel, bias, channels: :first)\n #Nx.Tensor \n\n#","ref":"Axon.Layers.html#conv/4-two-dimensional-convolution","title":"Two-dimensional convolution - Axon.Layers.conv/4","type":"function"},{"doc":"iex> input = Nx.tensor([[[[[-0.6497], [1.0939]], [[-2.5465], [0.7801]]]]], type: {:f, 32})\n iex> kernel = Nx.tensor([\n ...> [[[[ 0.7390], [-0.0927]], [[-0.8675], [-0.9209]]]],\n ...> [[[[-0.6638], [0.4341]], [[0.6368], [1.1846]]]]\n ...> ], type: {:f, 32})\n iex> bias = Nx.tensor([-0.4101, 0.1776], type: {:f, 32})\n iex> Axon.Layers.conv(input, kernel, bias, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#conv/4-three-dimensional-convolution","title":"Three-dimensional convolution - Axon.Layers.conv/4","type":"function"},{"doc":"","ref":"Axon.Layers.html#conv_lstm/7","title":"Axon.Layers.conv_lstm/7","type":"function"},{"doc":"ConvLSTM Cell.\n\nWhen combined with `Axon.Layers.*_unroll`, implements a\nConvLSTM-based RNN. More memory efficient than traditional LSTM.","ref":"Axon.Layers.html#conv_lstm_cell/7","title":"Axon.Layers.conv_lstm_cell/7","type":"function"},{"doc":"* `:strides` - convolution strides. Defaults to `1`.\n\n * `:padding` - convolution padding. Defaults to `:same`.","ref":"Axon.Layers.html#conv_lstm_cell/7-options","title":"Options - Axon.Layers.conv_lstm_cell/7","type":"function"},{"doc":"* [Convolutional LSTM Network: A Machine Learning Approach for Precipitation Nowcasting](https://arxiv.org/abs/1506.04214)","ref":"Axon.Layers.html#conv_lstm_cell/7-references","title":"References - Axon.Layers.conv_lstm_cell/7","type":"function"},{"doc":"Functional implementation of a general dimensional transposed\nconvolutional layer.\n\n*Note: This layer is currently implemented as a fractionally strided\nconvolution by padding the input tensor. Please open an issue if you'd\nlike this behavior changed.*\n\nTransposed convolutions are sometimes (incorrectly) referred to as\ndeconvolutions because it \"reverses\" the spatial dimensions\nof a normal convolution. Transposed convolutions are a form of upsampling -\nthey produce larger spatial dimensions than the input tensor. They\ncan be thought of as a convolution in reverse - and are sometimes\nimplemented as the backward pass of a normal convolution.","ref":"Axon.Layers.html#conv_transpose/4","title":"Axon.Layers.conv_transpose/4","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#conv_transpose/4-options","title":"Options - Axon.Layers.conv_transpose/4","type":"function"},{"doc":"iex> input = Nx.iota({1, 3, 3}, type: {:f, 32})\n iex> kernel = Nx.iota({6, 3, 2}, type: {:f, 32})\n iex> bias = Nx.tensor(1.0, type: {:f, 32})\n iex> Axon.Layers.conv_transpose(input, kernel, bias, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#conv_transpose/4-examples","title":"Examples - Axon.Layers.conv_transpose/4","type":"function"},{"doc":"* [A guide to convolution arithmetic for deep learning](https://arxiv.org/abs/1603.07285v1)\n * [Deconvolutional Networks](https://www.matthewzeiler.com/mattzeiler/deconvolutionalnetworks.pdf)","ref":"Axon.Layers.html#conv_transpose/4-references","title":"References - Axon.Layers.conv_transpose/4","type":"function"},{"doc":"Functional implementation of a dense layer.\n\nLinear transformation of the input such that:\n\n$$y = xW^T + b$$\n\nA dense layer or fully connected layer transforms\nthe input using the given kernel matrix and bias\nto compute:\n\n Nx.dot(input, kernel) + bias\n\nTypically, both `kernel` and `bias` are learnable\nparameters trained using gradient-based optimization.","ref":"Axon.Layers.html#dense/4","title":"Axon.Layers.dense/4","type":"function"},{"doc":"* `input` - `{batch_size, * input_features}`\n * `kernel` - `{input_features, output_features}`\n * `bias` - `{}` or `{output_features}`","ref":"Axon.Layers.html#dense/4-parameter-shapes","title":"Parameter Shapes - Axon.Layers.dense/4","type":"function"},{"doc":"`{batch_size, *, output_features}`","ref":"Axon.Layers.html#dense/4-output-shape","title":"Output Shape - Axon.Layers.dense/4","type":"function"},{"doc":"iex> input = Nx.tensor([[1.0, 0.5, 1.0, 0.5], [0.0, 0.0, 0.0, 0.0]], type: {:f, 32})\n iex> kernel = Nx.tensor([[0.2], [0.3], [0.5], [0.8]], type: {:f, 32})\n iex> bias = Nx.tensor([1.0], type: {:f, 32})\n iex> Axon.Layers.dense(input, kernel, bias)\n #Nx.Tensor","ref":"Axon.Layers.html#dense/4-examples","title":"Examples - Axon.Layers.dense/4","type":"function"},{"doc":"Functional implementation of a general dimensional depthwise\nconvolution.\n\nDepthwise convolutions apply a single convolutional filter to\neach input channel. This is done by setting `feature_group_size`\nequal to the number of input channels. This will split the\n`output_channels` into `input_channels` number of groups and\nconvolve the grouped kernel channels over the corresponding input\nchannel.","ref":"Axon.Layers.html#depthwise_conv/4","title":"Axon.Layers.depthwise_conv/4","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`\n * `kernel` - `{output_channels, 1, kernel_spatial0, ..., kernel_spatialN}`\n * `bias` - `{output_channels}` or `{}`\n\n `output_channels` must be a multiple of the input channels.","ref":"Axon.Layers.html#depthwise_conv/4-parameter-shapes","title":"Parameter Shapes - Axon.Layers.depthwise_conv/4","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#depthwise_conv/4-options","title":"Options - Axon.Layers.depthwise_conv/4","type":"function"},{"doc":"Functional implementation of a dropout layer.\n\nApplies a mask to some elements of the input tensor with probability\n`rate` and scales the input tensor by a factor of $\\frac{1}{1 - rate}$.\n\nDropout is a form of regularization that helps prevent overfitting\nby preventing models from becoming too reliant on certain connections.\nDropout can somewhat be thought of as learning an ensemble of models\nwith random connections masked.","ref":"Axon.Layers.html#dropout/3","title":"Axon.Layers.dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#dropout/3-options","title":"Options - Axon.Layers.dropout/3","type":"function"},{"doc":"* [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](https://jmlr.org/papers/v15/srivastava14a.html)","ref":"Axon.Layers.html#dropout/3-references","title":"References - Axon.Layers.dropout/3","type":"function"},{"doc":"Dynamically unrolls an RNN.\n\nUnrolls implement a `scan` operation which applies a\ntransformation on the leading axis of `input_sequence` carrying\nsome state. In this instance `cell_fn` is an RNN cell function\nsuch as `lstm_cell` or `gru_cell`.\n\nThis function will make use of an `defn` while-loop such and thus\nmay be more efficient for long sequences.","ref":"Axon.Layers.html#dynamic_unroll/7","title":"Axon.Layers.dynamic_unroll/7","type":"function"},{"doc":"","ref":"Axon.Layers.html#elu/2","title":"Axon.Layers.elu/2","type":"function"},{"doc":"Computes embedding by treating kernel matrix as a lookup table\nfor discrete tokens.\n\n`input` is a vector of discrete values, typically representing tokens\n(e.g. words, characters, etc.) from a vocabulary. `kernel` is a kernel\nmatrix of shape `{vocab_size, embedding_size}` from which the dense\nembeddings will be drawn.","ref":"Axon.Layers.html#embedding/3","title":"Axon.Layers.embedding/3","type":"function"},{"doc":"* `input` - `{batch_size, ..., seq_len}`\n * `kernel` - `{vocab_size, embedding_size}`","ref":"Axon.Layers.html#embedding/3-parameter-shapes","title":"Parameter Shapes - Axon.Layers.embedding/3","type":"function"},{"doc":"iex> input = Nx.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])\n iex> kernels = Nx.tensor([\n ...> [0.46299999952316284, 0.5562999844551086, 0.18170000612735748],\n ...> [0.9801999926567078, 0.09780000150203705, 0.5333999991416931],\n ...> [0.6980000138282776, 0.9240999817848206, 0.23479999601840973],\n ...> [0.31929999589920044, 0.42250001430511475, 0.7865999937057495],\n ...> [0.5519000291824341, 0.5662999749183655, 0.20559999346733093],\n ...> [0.1898999959230423, 0.9311000108718872, 0.8356000185012817],\n ...> [0.6383000016212463, 0.8794000148773193, 0.5282999873161316],\n ...> [0.9523000121116638, 0.7597000002861023, 0.08250000327825546],\n ...> [0.6622999906539917, 0.02329999953508377, 0.8205999732017517],\n ...> [0.9855999946594238, 0.36419999599456787, 0.5372999906539917]\n ...> ])\n iex> Axon.Layers.embedding(input, kernels)\n #Nx.Tensor","ref":"Axon.Layers.html#embedding/3-examples","title":"Examples - Axon.Layers.embedding/3","type":"function"},{"doc":"Functional implementation of a feature alpha dropout layer.\n\nFeature alpha dropout applies dropout in the same manner as\nspatial dropout; however, it also enforces self-normalization\nby masking inputs with the SELU activation function and scaling\nunmasked inputs.","ref":"Axon.Layers.html#feature_alpha_dropout/3","title":"Axon.Layers.feature_alpha_dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#feature_alpha_dropout/3-options","title":"Options - Axon.Layers.feature_alpha_dropout/3","type":"function"},{"doc":"Flattens input to shape of `{batch, units}` by folding outer\ndimensions.","ref":"Axon.Layers.html#flatten/2","title":"Axon.Layers.flatten/2","type":"function"},{"doc":"iex> Axon.Layers.flatten(Nx.iota({1, 2, 2}, type: {:f, 32}))\n #Nx.Tensor","ref":"Axon.Layers.html#flatten/2-examples","title":"Examples - Axon.Layers.flatten/2","type":"function"},{"doc":"Functional implementation of global average pooling which averages across\nthe spatial dimensions of the input such that the only remaining dimensions\nare the batch and feature dimensions.\n\nAssumes data is configured in a channels-first like format.","ref":"Axon.Layers.html#global_avg_pool/2","title":"Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"* `input` - {batch_size, features, s1, ..., sN}","ref":"Axon.Layers.html#global_avg_pool/2-parameter-shapes","title":"Parameter Shapes - Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"* `:keep_axes` - option to keep reduced axes with size 1 for each reduced\n dimensions. Defaults to `false`","ref":"Axon.Layers.html#global_avg_pool/2-options","title":"Options - Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"iex> Axon.Layers.global_avg_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)\n #Nx.Tensor \n\n iex> Axon.Layers.global_avg_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), channels: :first, keep_axes: true)\n #Nx.Tensor","ref":"Axon.Layers.html#global_avg_pool/2-examples","title":"Examples - Axon.Layers.global_avg_pool/2","type":"function"},{"doc":"Functional implementation of global LP pooling which computes the following\nfunction across spatial dimensions of the input:\n\n $$f(X) = qrt[p]{ um_{x in X} x^{p}}$$\n\nWhere $p$ is given by the keyword argument `:norm`. As $p$ approaches\ninfinity, it becomes equivalent to max pooling.\n\nAssumes data is configured in a channels-first like format.","ref":"Axon.Layers.html#global_lp_pool/2","title":"Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"* `input` - {batch_size, s1, ..., sN, features}","ref":"Axon.Layers.html#global_lp_pool/2-parameter-shapes","title":"Parameter Shapes - Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"* `:keep_axes` - option to keep reduced axes with size 1 for each reduced\n dimensions. Defaults to `false`\n * `:norm` - $p$ in above function. Defaults to 2","ref":"Axon.Layers.html#global_lp_pool/2-options","title":"Options - Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"iex> Axon.Layers.global_lp_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), norm: 1, channels: :first)\n #Nx.Tensor \n\n iex> Axon.Layers.global_lp_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 16}), keep_axes: true, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#global_lp_pool/2-examples","title":"Examples - Axon.Layers.global_lp_pool/2","type":"function"},{"doc":"Functional implementation of global max pooling which computes maximums across\nthe spatial dimensions of the input such that the only remaining dimensions are\nthe batch and feature dimensions.\n\nAssumes data is configured in a channels-first like format.","ref":"Axon.Layers.html#global_max_pool/2","title":"Axon.Layers.global_max_pool/2","type":"function"},{"doc":"* `input` - {batch_size, s1, ..., sN, features}","ref":"Axon.Layers.html#global_max_pool/2-parameter-shapes","title":"Parameter Shapes - Axon.Layers.global_max_pool/2","type":"function"},{"doc":"* `:keep_axes` - option to keep reduced axes with size 1 for each reduced\n dimensions. Defaults to `false`","ref":"Axon.Layers.html#global_max_pool/2-options","title":"Options - Axon.Layers.global_max_pool/2","type":"function"},{"doc":"iex> Axon.Layers.global_max_pool(Nx.iota({3, 2, 3}, type: {:f, 32}), channels: :first)\n #Nx.Tensor \n\n iex> Axon.Layers.global_max_pool(Nx.iota({1, 3, 2, 2}, type: {:f, 32}), keep_axes: true, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#global_max_pool/2-examples","title":"Examples - Axon.Layers.global_max_pool/2","type":"function"},{"doc":"Functional implementation of group normalization.\n\nNormalizes the input by reshaping input into `:num_groups`\ngroups and then calculating the mean and variance along\nevery dimension but the input batch dimension.\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. This method does\nnot maintain an EMA of mean and variance.","ref":"Axon.Layers.html#group_norm/4","title":"Axon.Layers.group_norm/4","type":"function"},{"doc":"* `:num_groups` - Number of groups.\n\n * `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes and group shape for mean and variance calculation.","ref":"Axon.Layers.html#group_norm/4-options","title":"Options - Axon.Layers.group_norm/4","type":"function"},{"doc":"* [Group Normalization](https://arxiv.org/abs/1803.08494v3)","ref":"Axon.Layers.html#group_norm/4-references","title":"References - Axon.Layers.group_norm/4","type":"function"},{"doc":"","ref":"Axon.Layers.html#gru/7","title":"Axon.Layers.gru/7","type":"function"},{"doc":"GRU Cell.\n\nWhen combined with `Axon.Layers.*_unroll`, implements a\nGRU-based RNN. More memory efficient than traditional LSTM.","ref":"Axon.Layers.html#gru_cell/8","title":"Axon.Layers.gru_cell/8","type":"function"},{"doc":"* [Empirical Evaluation of Gated Recurrent Neural Networks on Sequence Modeling](https://arxiv.org/pdf/1412.3555v1.pdf)","ref":"Axon.Layers.html#gru_cell/8-references","title":"References - Axon.Layers.gru_cell/8","type":"function"},{"doc":"","ref":"Axon.Layers.html#hard_sigmoid/2","title":"Axon.Layers.hard_sigmoid/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#hard_silu/2","title":"Axon.Layers.hard_silu/2","type":"function"},{"doc":"Functional implementation of instance normalization.\n\nNormalizes the input by calculating mean and variance of the\ninput tensor along the spatial dimensions of the input.\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. If `training?` is\ntrue, this method will compute a new mean and variance, and return\nthe updated `ra_mean` and `ra_var`. Otherwise, it will just compute\nbatch norm from the given ra_mean and ra_var.","ref":"Axon.Layers.html#instance_norm/6","title":"Axon.Layers.instance_norm/6","type":"function"},{"doc":"* `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes for mean and variance calculation.\n\n * `:momentum` - momentum to use for EMA update.\n\n * `:training?` - if true, uses training mode batch norm. Defaults to false.","ref":"Axon.Layers.html#instance_norm/6-options","title":"Options - Axon.Layers.instance_norm/6","type":"function"},{"doc":"* [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022v3)","ref":"Axon.Layers.html#instance_norm/6-references","title":"References - Axon.Layers.instance_norm/6","type":"function"},{"doc":"Functional implementation of layer normalization.\n\nNormalizes the input by calculating mean and variance of the\ninput tensor along the given feature dimension `:channel_index`.\n\n$$y = \\frac{x - E[x]}{\\sqrt{Var[x] + \\epsilon}} * \\gamma + \\beta$$\n\n`gamma` and `beta` are often trainable parameters. This method does\nnot maintain an EMA of mean and variance.","ref":"Axon.Layers.html#layer_norm/4","title":"Axon.Layers.layer_norm/4","type":"function"},{"doc":"* `:epsilon` - numerical stability term. $epsilon$ in the above\n formulation.\n\n * `:channel_index` - channel index used to determine reduction\n axes for mean and variance calculation.","ref":"Axon.Layers.html#layer_norm/4-options","title":"Options - Axon.Layers.layer_norm/4","type":"function"},{"doc":"","ref":"Axon.Layers.html#leaky_relu/2","title":"Axon.Layers.leaky_relu/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#log_softmax/2","title":"Axon.Layers.log_softmax/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#log_sumexp/2","title":"Axon.Layers.log_sumexp/2","type":"function"},{"doc":"Functional implementation of a general dimensional power average\npooling layer.\n\nPooling is applied to the spatial dimension of the input tensor.\nPower average pooling computes the following function on each\nvalid window of the input tensor:\n\n$$f(X) = \\sqrt[p]{\\sum_{x \\in X} x^{p}}$$\n\nWhere $p$ is given by the keyword argument `:norm`. As $p$ approaches\ninfinity, it becomes equivalent to max pooling.","ref":"Axon.Layers.html#lp_pool/2","title":"Axon.Layers.lp_pool/2","type":"function"},{"doc":"* `:norm` - $p$ from above equation. Defaults to 2.\n\n * `:kernel_size` - window size. Rank must match spatial dimension\n of the input tensor. Required.\n\n * `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to size of kernel.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:window_dilations` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Can be scalar or list who's length matches the number of\n spatial dimensions in the input tensor. Defaults to `1` or no\n dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#lp_pool/2-options","title":"Options - Axon.Layers.lp_pool/2","type":"function"},{"doc":"iex> t = Nx.tensor([[[0.9450, 0.4684, 1.8146], [1.2663, 0.4354, -0.0781], [-0.4759, 0.3251, 0.8742]]], type: {:f, 32})\n iex> Axon.Layers.lp_pool(t, kernel_size: 2, norm: 2, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#lp_pool/2-examples","title":"Examples - Axon.Layers.lp_pool/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#lstm/7","title":"Axon.Layers.lstm/7","type":"function"},{"doc":"LSTM Cell.\n\nWhen combined with `Axon.Layers.*_unroll`, implements a\nLSTM-based RNN. More memory efficient than traditional LSTM.","ref":"Axon.Layers.html#lstm_cell/8","title":"Axon.Layers.lstm_cell/8","type":"function"},{"doc":"* [Long Short-Term Memory](http://www.bioinf.jku.at/publications/older/2604.pdf)","ref":"Axon.Layers.html#lstm_cell/8-references","title":"References - Axon.Layers.lstm_cell/8","type":"function"},{"doc":"Functional implementation of a general dimensional max pooling layer.\n\nPooling is applied to the spatial dimension of the input tensor.\nMax pooling returns the maximum element in each valid window of\nthe input tensor. It is often used after convolutional layers\nto downsample the input even further.","ref":"Axon.Layers.html#max_pool/2","title":"Axon.Layers.max_pool/2","type":"function"},{"doc":"* `kernel_size` - window size. Rank must match spatial dimension\n of the input tensor. Required.\n\n * `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to size of kernel.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:window_dilations` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Can be scalar or list who's length matches the number of\n spatial dimensions in the input tensor. Defaults to `1` or no\n dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#max_pool/2-options","title":"Options - Axon.Layers.max_pool/2","type":"function"},{"doc":"iex> t = Nx.tensor([[\n ...> [0.051500000059604645, -0.7042999863624573, -0.32899999618530273],\n ...> [-0.37130001187324524, 1.6191999912261963, -0.11829999834299088],\n ...> [0.7099999785423279, 0.7282999753952026, -0.18639999628067017]]], type: {:f, 32})\n iex> Axon.Layers.max_pool(t, kernel_size: 2, channels: :first)\n #Nx.Tensor","ref":"Axon.Layers.html#max_pool/2-examples","title":"Examples - Axon.Layers.max_pool/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#multiply/2","title":"Axon.Layers.multiply/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#padding_config_transform/2","title":"Axon.Layers.padding_config_transform/2","type":"function"},{"doc":"Resizes a batch of tensors to the given shape using one of a\nnumber of sampling methods.\n\nRequires input option `:size` which should be a tuple specifying\nthe resized spatial dimensions of the input tensor. Input tensor\nmust be at least rank 3, with fixed `batch` and `channel` dimensions.\nResizing will upsample or downsample using the given resize method.","ref":"Axon.Layers.html#resize/2","title":"Axon.Layers.resize/2","type":"function"},{"doc":"* `:size` - a tuple specifying the resized spatial dimensions.\n Required.\n\n * `:method` - the resizing method to use, either of `:nearest`,\n `:bilinear`, `:bicubic`, `:lanczos3`, `:lanczos5`. Defaults to\n `:nearest`.\n\n * `:antialias` - whether an anti-aliasing filter should be used\n when downsampling. This has no effect with upsampling. Defaults\n to `true`.\n\n * `:channels` - channels location, either `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#resize/2-options","title":"Options - Axon.Layers.resize/2","type":"function"},{"doc":"iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})\n iex> Axon.Layers.resize(img, size: {4, 4}, channels: :first)\n #Nx.Tensor \n\n#","ref":"Axon.Layers.html#resize/2-examples","title":"Examples - Axon.Layers.resize/2","type":"function"},{"doc":"iex> img = Nx.iota({1, 1, 3, 3}, type: {:f, 32})\n iex> Axon.Layers.resize(img, size: {4, 4}, method: :foo)\n ** (ArgumentError) expected :method to be either of :nearest, :bilinear, :bicubic, :lanczos3, :lanczos5, got: :foo","ref":"Axon.Layers.html#resize/2-error-cases","title":"Error cases - Axon.Layers.resize/2","type":"function"},{"doc":"","ref":"Axon.Layers.html#selu/2","title":"Axon.Layers.selu/2","type":"function"},{"doc":"Functional implementation of a 2-dimensional separable depthwise\nconvolution.\n\nThe 2-d depthwise separable convolution performs 2 depthwise convolutions\neach over 1 spatial dimension of the input.","ref":"Axon.Layers.html#separable_conv2d/6","title":"Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, ..., input_spatialN}`\n * `k1` - `{output_channels, 1, kernel_spatial0, 1}`\n * `b1` - `{output_channels}` or `{}`\n * `k2` - `{output_channels, 1, 1, kernel_spatial1}`\n * `b2` - `{output_channels}` or `{}`\n\n `output_channels` must be a multiple of the input channels.","ref":"Axon.Layers.html#separable_conv2d/6-parameter-shapes","title":"Parameter Shapes - Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#separable_conv2d/6-options","title":"Options - Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"* [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)","ref":"Axon.Layers.html#separable_conv2d/6-references","title":"References - Axon.Layers.separable_conv2d/6","type":"function"},{"doc":"Functional implementation of a 3-dimensional separable depthwise\nconvolution.\n\nThe 3-d depthwise separable convolution performs 3 depthwise convolutions\neach over 1 spatial dimension of the input.","ref":"Axon.Layers.html#separable_conv3d/8","title":"Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"* `input` - `{batch_size, input_channels, input_spatial0, input_spatial1, input_spatial2}`\n * `k1` - `{output_channels, 1, kernel_spatial0, 1, 1}`\n * `b1` - `{output_channels}` or `{}`\n * `k2` - `{output_channels, 1, 1, kernel_spatial1, 1}`\n * `b2` - `{output_channels}` or `{}`\n * `k3` - `{output_channels, 1, 1, 1, 1, kernel_spatial2}`\n * `b3` - `{output_channels}` or `{}`\n\n `output_channels` must be a multiple of the input channels.","ref":"Axon.Layers.html#separable_conv3d/8-parameter-shapes","title":"Parameter Shapes - Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"* `:strides` - kernel strides. Can be a scalar or a list\n who's length matches the number of spatial dimensions in\n the input tensor. Defaults to 1.\n\n * `:padding` - zero padding on the input. Can be one of\n `:valid`, `:same` or a general padding configuration\n without interior padding for each spatial dimension\n of the input.\n\n * `:input_dilation` - input dilation factor. Equivalent\n to applying interior padding on the input. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:kernel_dilation` - kernel dilation factor. Equivalent\n to applying interior padding on the kernel. The amount\n of interior padding applied is given by `kernel_dilation - 1`.\n Defaults to `1` or no dilation.\n\n * `:channels ` - channel configuration. One of `:first` or `:last`.\n Defaults to `:last`.","ref":"Axon.Layers.html#separable_conv3d/8-options","title":"Options - Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"* [Xception: Deep Learning with Depthwise Separable Convolutions](https://arxiv.org/abs/1610.02357)","ref":"Axon.Layers.html#separable_conv3d/8-references","title":"References - Axon.Layers.separable_conv3d/8","type":"function"},{"doc":"","ref":"Axon.Layers.html#softmax/2","title":"Axon.Layers.softmax/2","type":"function"},{"doc":"Functional implementation of an n-dimensional spatial\ndropout layer.\n\nApplies a mask to entire feature maps instead of individual\nelements. This is done by calculating a mask shape equal to\nthe spatial dimensions of the input tensor with 1 channel,\nand then broadcasting the mask across the feature dimension\nof the input tensor.","ref":"Axon.Layers.html#spatial_dropout/3","title":"Axon.Layers.spatial_dropout/3","type":"function"},{"doc":"* `:rate` - dropout rate. Used to determine probability a connection\n will be dropped. Required.\n\n * `:noise_shape` - input noise shape. Shape of `mask` which can be useful\n for broadcasting `mask` across feature channels or other dimensions.\n Defaults to shape of input tensor.","ref":"Axon.Layers.html#spatial_dropout/3-options","title":"Options - Axon.Layers.spatial_dropout/3","type":"function"},{"doc":"* [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)","ref":"Axon.Layers.html#spatial_dropout/3-references","title":"References - Axon.Layers.spatial_dropout/3","type":"function"},{"doc":"Statically unrolls an RNN.\n\nUnrolls implement a `scan` operation which applies a\ntransformation on the leading axis of `input_sequence` carrying\nsome state. In this instance `cell_fn` is an RNN cell function\nsuch as `lstm_cell` or `gru_cell`.\n\nThis function inlines the unrolling of the sequence such that\nthe entire operation appears as a part of the compilation graph.\nThis makes it suitable for shorter sequences.","ref":"Axon.Layers.html#static_unroll/7","title":"Axon.Layers.static_unroll/7","type":"function"},{"doc":"","ref":"Axon.Layers.html#subtract/2","title":"Axon.Layers.subtract/2","type":"function"},{"doc":"Implementations of loss-scalers for use in mixed precision\ntraining.\n\nLoss scaling is used to prevent underflow when using mixed\nprecision during the model training process. Each loss-scale\nimplementation here returns a 3-tuple of the functions:\n\n {init_fn, scale_fn, unscale_fn, adjust_fn} = Axon.LossScale.static(Nx.pow(2, 15))\n\nYou can use these to scale/unscale loss and gradients as well\nas adjust the loss scale state.\n\n`Axon.Loop.trainer/3` builds loss-scaling in by default. You\ncan reference the `Axon.Loop.train_step/3` implementation to\nsee how loss-scaling is applied in practice.","ref":"Axon.LossScale.html","title":"Axon.LossScale","type":"module"},{"doc":"Implements dynamic loss-scale.","ref":"Axon.LossScale.html#dynamic/1","title":"Axon.LossScale.dynamic/1","type":"function"},{"doc":"Implements identity loss-scale.","ref":"Axon.LossScale.html#identity/1","title":"Axon.LossScale.identity/1","type":"function"},{"doc":"Implements static loss-scale.","ref":"Axon.LossScale.html#static/1","title":"Axon.LossScale.static/1","type":"function"},{"doc":"Loss functions.\n\nLoss functions evaluate predictions with respect to true\ndata, often to measure the divergence between a model's\nrepresentation of the data-generating distribution and the\ntrue representation of the data-generating distribution.\n\nEach loss function is implemented as an element-wise function\nmeasuring the loss with respect to the input target `y_true`\nand input prediction `y_pred`. As an example, the `mean_squared_error/2`\nloss function produces a tensor whose values are the mean squared\nerror between targets and predictions:\n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred)\n #Nx.Tensor \n\nIt's common to compute the loss across an entire minibatch.\nYou can easily do so by specifying a `:reduction` mode, or\nby composing one of these with an `Nx` reduction method:\n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\nYou can even compose loss functions:\n\n defn my_strange_loss(y_true, y_pred) do\n y_true\n |> Axon.Losses.mean_squared_error(y_pred)\n |> Axon.Losses.binary_cross_entropy(y_pred)\n |> Nx.sum()\n end\n\nOr, more commonly, you can combine loss functions with penalties for\nregularization:\n\n defn regularized_loss(params, y_true, y_pred) do\n loss = Axon.mean_squared_error(y_true, y_pred)\n penalty = l2_penalty(params)\n Nx.sum(loss) + penalty\n end\n\nAll of the functions in this module are implemented as\nnumerical functions and can be JIT or AOT compiled with\nany supported `Nx` compiler.","ref":"Axon.Losses.html","title":"Axon.Losses","type":"module"},{"doc":"Applies label smoothing to the given labels.\n\nLabel smoothing is a regularization technique which shrink targets\ntowards a uniform distribution. Label smoothing can improve model\ngeneralization.","ref":"Axon.Losses.html#apply_label_smoothing/3","title":"Axon.Losses.apply_label_smoothing/3","type":"function"},{"doc":"* `:smoothing` - smoothing factor. Defaults to 0.1","ref":"Axon.Losses.html#apply_label_smoothing/3-options","title":"Options - Axon.Losses.apply_label_smoothing/3","type":"function"},{"doc":"* [Rethinking the Inception Architecture for Computer Vision](https://arxiv.org/abs/1512.00567)","ref":"Axon.Losses.html#apply_label_smoothing/3-references","title":"References - Axon.Losses.apply_label_smoothing/3","type":"function"},{"doc":"Binary cross-entropy loss function.\n\n$$l_i = -\\frac{1}{2}(\\hat{y_i} \\cdot \\log(y_i) + (1 - \\hat{y_i}) \\cdot \\log(1 - y_i))$$\n\nBinary cross-entropy loss is most often used in binary classification problems.\nBy default, it expects `y_pred` to encode probabilities from `[0.0, 1.0]`, typically\nas the output of the sigmoid function or another function which squeezes values\nbetween 0 and 1. You may optionally set `from_logits: true` to specify that values\nare being sent as non-normalized values (e.g. weights with possibly infinite range).\nIn this case, input values will be encoded as probabilities by applying the logistic\nsigmoid function before computing loss.","ref":"Axon.Losses.html#binary_cross_entropy/3","title":"Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#binary_cross_entropy/3-argument-shapes","title":"Argument Shapes - Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n\n * `:negative_weight` - class weight for `0` class useful for scaling loss\n by importance of class. Defaults to `1.0`.\n\n * `:positive_weight` - class weight for `1` class useful for scaling loss\n by importance of class. Defaults to `1.0`.\n\n * `:from_logits` - whether `y_pred` is a logits tensor. Defaults to `false`.","ref":"Axon.Losses.html#binary_cross_entropy/3-options","title":"Options - Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])\n iex> Axon.Losses.binary_cross_entropy(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])\n iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0.6811, 0.5565], [0.6551, 0.4551], [0.5422, 0.2648]])\n iex> Axon.Losses.binary_cross_entropy(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#binary_cross_entropy/3-examples","title":"Examples - Axon.Losses.binary_cross_entropy/3","type":"function"},{"doc":"Categorical cross-entropy loss function.\n\n$$l_i = -\\sum_i^C \\hat{y_i} \\cdot \\log(y_i)$$\n\nCategorical cross-entropy is typically used for multi-class classification problems.\nBy default, it expects `y_pred` to encode a probability distribution along the last\naxis. You can specify `from_logits: true` to indicate `y_pred` is a logits tensor.\n\n # Batch size of 3 with 3 target classes\n y_true = Nx.tensor([0, 2, 1])\n y_pred = Nx.tensor([[0.2, 0.8, 0.0], [0.1, 0.2, 0.7], [0.1, 0.2, 0.7]])","ref":"Axon.Losses.html#categorical_cross_entropy/3","title":"Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#categorical_cross_entropy/3-argument-shapes","title":"Argument Shapes - Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n\n * `:class_weights` - 1-D list corresponding to weight of each\n class useful for scaling loss according to importance of class. Tensor\n size must match number of classes in dataset. Defaults to `1.0` for all\n classes.\n\n * `:from_logits` - whether `y_pred` is a logits tensor. Defaults to `false`.\n\n * `:sparse` - whether `y_true` encodes a \"sparse\" tensor. In this case the\n inputs are integer values corresponding to the target class. Defaults to\n `false`.","ref":"Axon.Losses.html#categorical_cross_entropy/3-options","title":"Options - Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([1, 2], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05, 0.95, 0], [0.1, 0.8, 0.1]])\n iex> Axon.Losses.categorical_cross_entropy(y_true, y_pred, reduction: :sum, sparse: true)\n #Nx.Tensor","ref":"Axon.Losses.html#categorical_cross_entropy/3-examples","title":"Examples - Axon.Losses.categorical_cross_entropy/3","type":"function"},{"doc":"Categorical hinge loss function.","ref":"Axon.Losses.html#categorical_hinge/3","title":"Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#categorical_hinge/3-argument-shapes","title":"Argument Shapes - Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#categorical_hinge/3-options","title":"Options - Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])\n iex> Axon.Losses.categorical_hinge(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])\n iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[1, 0, 0], [0, 0, 1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.05300799, 0.21617081, 0.68642382], [0.3754382 , 0.08494169, 0.13442067]])\n iex> Axon.Losses.categorical_hinge(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#categorical_hinge/3-examples","title":"Examples - Axon.Losses.categorical_hinge/3","type":"function"},{"doc":"Connectionist Temporal Classification loss.","ref":"Axon.Losses.html#connectionist_temporal_classification/3","title":"Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"* `l_true` - $(B)$\n * `y_true` - $(B, S)$\n * `y_pred` - $(B, T, D)$","ref":"Axon.Losses.html#connectionist_temporal_classification/3-argument-shapes","title":"Argument Shapes - Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:sum` or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#connectionist_temporal_classification/3-options","title":"Options - Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"`l_true` contains lengths of target sequences. Nonzero positive values.\n `y_true` contains target sequences. Each value represents a class\n of element in range of available classes 0 <= y < D. Blank element\n class is included in this range, but shouldn't be presented among\n y_true values. Maximum target sequence length should be lower or equal\n to `y_pred` sequence length: S <= T.\n `y_pred` - log probabilities of classes D along the\n prediction sequence T.","ref":"Axon.Losses.html#connectionist_temporal_classification/3-description","title":"Description - Axon.Losses.connectionist_temporal_classification/3","type":"function"},{"doc":"Cosine Similarity error loss function.\n\n$$l_i = \\sum_i (\\hat{y_i} - y_i)^2$$","ref":"Axon.Losses.html#cosine_similarity/3","title":"Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#cosine_similarity/3-argument-shapes","title":"Argument Shapes - Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n * `:axes` - Defaults to `[1]`.\n * `:eps` - Defaults to `1.0e-6`.","ref":"Axon.Losses.html#cosine_similarity/3-options","title":"Options - Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"iex> y_pred = Nx.tensor([[1.0, 0.0], [1.0, 1.0]])\n iex> y_true = Nx.tensor([[0.0, 1.0], [1.0, 1.0]])\n iex> Axon.Losses.cosine_similarity(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Losses.html#cosine_similarity/3-examples","title":"Examples - Axon.Losses.cosine_similarity/3","type":"function"},{"doc":"Hinge loss function.\n\n$$\\frac{1}{C}\\max_i(1 - \\hat{y_i} * y_i, 0)$$","ref":"Axon.Losses.html#hinge/3","title":"Axon.Losses.hinge/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#hinge/3-options","title":"Options - Axon.Losses.hinge/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#hinge/3-argument-shapes","title":"Argument Shapes - Axon.Losses.hinge/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[ 1, 1, -1], [ 1, 1, -1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])\n iex> Axon.Losses.hinge(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[ 1, 1, -1], [ 1, 1, -1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])\n iex> Axon.Losses.hinge(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[ 1, 1, -1], [ 1, 1, -1]], type: {:s, 8})\n iex> y_pred = Nx.tensor([[0.45440044, 0.31470688, 0.67920924], [0.24311459, 0.93466766, 0.10914676]])\n iex> Axon.Losses.hinge(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#hinge/3-examples","title":"Examples - Axon.Losses.hinge/3","type":"function"},{"doc":"Huber loss.","ref":"Axon.Losses.html#huber/3","title":"Axon.Losses.huber/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#huber/3-argument-shapes","title":"Argument Shapes - Axon.Losses.huber/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.\n\n * `:delta` - the point where the Huber loss function changes from a quadratic to linear.\n Defaults to `1.0`.","ref":"Axon.Losses.html#huber/3-options","title":"Options - Axon.Losses.huber/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[1], [1.5], [2.0]])\n iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])\n iex> Axon.Losses.huber(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[1], [1.5], [2.0]])\n iex> y_pred = Nx.tensor([[0.8], [1.8], [2.1]])\n iex> Axon.Losses.huber(y_true, y_pred, reduction: :mean)\n #Nx.Tensor","ref":"Axon.Losses.html#huber/3-examples","title":"Examples - Axon.Losses.huber/3","type":"function"},{"doc":"Kullback-Leibler divergence loss function.\n\n$$l_i = \\sum_i^C \\hat{y_i} \\cdot \\log(\\frac{\\hat{y_i}}{y_i})$$","ref":"Axon.Losses.html#kl_divergence/3","title":"Axon.Losses.kl_divergence/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#kl_divergence/3-argument-shapes","title":"Argument Shapes - Axon.Losses.kl_divergence/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#kl_divergence/3-options","title":"Options - Axon.Losses.kl_divergence/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})\n iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])\n iex> Axon.Losses.kl_divergence(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})\n iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])\n iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0, 1], [0, 0]], type: {:u, 8})\n iex> y_pred = Nx.tensor([[0.6, 0.4], [0.4, 0.6]])\n iex> Axon.Losses.kl_divergence(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#kl_divergence/3-examples","title":"Examples - Axon.Losses.kl_divergence/3","type":"function"},{"doc":"Modifies the given loss function to smooth labels prior\nto calculating loss.\n\nSee `apply_label_smoothing/2` for details.","ref":"Axon.Losses.html#label_smoothing/2","title":"Axon.Losses.label_smoothing/2","type":"function"},{"doc":"* `:smoothing` - smoothing factor. Defaults to 0.1","ref":"Axon.Losses.html#label_smoothing/2-options","title":"Options - Axon.Losses.label_smoothing/2","type":"function"},{"doc":"Logarithmic-Hyperbolic Cosine loss function.\n\n$$l_i = \\frac{1}{C} \\sum_i^C (\\hat{y_i} - y_i) + \\log(1 + e^{-2(\\hat{y_i} - y_i)}) - \\log(2)$$","ref":"Axon.Losses.html#log_cosh/3","title":"Axon.Losses.log_cosh/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#log_cosh/3-argument-shapes","title":"Argument Shapes - Axon.Losses.log_cosh/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#log_cosh/3-options","title":"Options - Axon.Losses.log_cosh/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])\n iex> Axon.Losses.log_cosh(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])\n iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]])\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]])\n iex> Axon.Losses.log_cosh(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#log_cosh/3-examples","title":"Examples - Axon.Losses.log_cosh/3","type":"function"},{"doc":"Margin ranking loss function.\n\n$$l_i = \\max(0, -\\hat{y_i} * (y^(1)_i - y^(2)_i) + \\alpha)$$","ref":"Axon.Losses.html#margin_ranking/3","title":"Axon.Losses.margin_ranking/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#margin_ranking/3-options","title":"Options - Axon.Losses.margin_ranking/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})\n iex> y_pred1 = Nx.tensor([0.6934, -0.7239, 1.1954], type: {:f, 32})\n iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})\n iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2})\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})\n iex> y_pred1 = Nx.tensor([0.6934, -0.7239, 1.1954], type: {:f, 32})\n iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})\n iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([1.0, 1.0, 1.0], type: {:f, 32})\n iex> y_pred1 = Nx.tensor([0.6934, -0.7239, 1.1954], type: {:f, 32})\n iex> y_pred2 = Nx.tensor([-0.4691, 0.2670, -1.7452], type: {:f, 32})\n iex> Axon.Losses.margin_ranking(y_true, {y_pred1, y_pred2}, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#margin_ranking/3-examples","title":"Examples - Axon.Losses.margin_ranking/3","type":"function"},{"doc":"Mean-absolute error loss function.\n\n$$l_i = \\sum_i |\\hat{y_i} - y_i|$$","ref":"Axon.Losses.html#mean_absolute_error/3","title":"Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#mean_absolute_error/3-argument-shapes","title":"Argument Shapes - Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#mean_absolute_error/3-options","title":"Options - Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_absolute_error(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_absolute_error(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#mean_absolute_error/3-examples","title":"Examples - Axon.Losses.mean_absolute_error/3","type":"function"},{"doc":"Mean-squared error loss function.\n\n$$l_i = \\sum_i (\\hat{y_i} - y_i)^2$$","ref":"Axon.Losses.html#mean_squared_error/3","title":"Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#mean_squared_error/3-argument-shapes","title":"Argument Shapes - Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#mean_squared_error/3-options","title":"Options - Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.mean_squared_error(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#mean_squared_error/3-examples","title":"Examples - Axon.Losses.mean_squared_error/3","type":"function"},{"doc":"Poisson loss function.\n\n$$l_i = \\frac{1}{C} \\sum_i^C y_i - (\\hat{y_i} \\cdot \\log(y_i))$$","ref":"Axon.Losses.html#poisson/3","title":"Axon.Losses.poisson/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Losses.html#poisson/3-argument-shapes","title":"Argument Shapes - Axon.Losses.poisson/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#poisson/3-options","title":"Options - Axon.Losses.poisson/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.poisson(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.poisson(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> Axon.Losses.poisson(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#poisson/3-examples","title":"Examples - Axon.Losses.poisson/3","type":"function"},{"doc":"Soft margin loss function.\n\n$$l_i = \\sum_i \\frac{\\log(1 + e^{-\\hat{y_i} * y_i})}{N}$$","ref":"Axon.Losses.html#soft_margin/3","title":"Axon.Losses.soft_margin/3","type":"function"},{"doc":"* `:reduction` - reduction mode. One of `:mean`, `:sum`, or `:none`.\n Defaults to `:none`.","ref":"Axon.Losses.html#soft_margin/3-options","title":"Options - Axon.Losses.soft_margin/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([[-1.0, 1.0, 1.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})\n iex> Axon.Losses.soft_margin(y_true, y_pred)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[-1.0, 1.0, 1.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})\n iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :mean)\n #Nx.Tensor \n\n iex> y_true = Nx.tensor([[-1.0, 1.0, 1.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[0.2953, -0.1709, 0.9486]], type: {:f, 32})\n iex> Axon.Losses.soft_margin(y_true, y_pred, reduction: :sum)\n #Nx.Tensor","ref":"Axon.Losses.html#soft_margin/3-examples","title":"Examples - Axon.Losses.soft_margin/3","type":"function"},{"doc":"Metric functions.\n\nMetrics are used to measure the performance and compare\nperformance of models in easy-to-understand terms. Often\ntimes, neural networks use surrogate loss functions such\nas negative log-likelihood to indirectly optimize a certain\nperformance metric. Metrics such as accuracy, also called\nthe 0-1 loss, do not have useful derivatives (e.g. they\nare information sparse), and are often intractable even\nwith low input dimensions.\n\nDespite not being able to train specifically for certain\nmetrics, it's still useful to track these metrics to\nmonitor the performance of a neural network during training.\nMetrics such as accuracy provide useful feedback during\ntraining, whereas loss can sometimes be difficult to interpret.\n \nYou can attach any of these functions as metrics within the\n`Axon.Loop` API using `Axon.Loop.metric/3`.\n\nAll of the functions in this module are implemented as\nnumerical functions and can be JIT or AOT compiled with\nany supported `Nx` compiler.","ref":"Axon.Metrics.html","title":"Axon.Metrics","type":"module"},{"doc":"Computes the accuracy of the given predictions.\n\nIf the size of the last axis is 1, it performs a binary\naccuracy computation with a threshold of 0.5. Otherwise,\ncomputes categorical accuracy.","ref":"Axon.Metrics.html#accuracy/3","title":"Axon.Metrics.accuracy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#accuracy/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.accuracy/3","type":"function"},{"doc":"iex> Axon.Metrics.accuracy(Nx.tensor([[1], [0], [0]]), Nx.tensor([[1], [1], [1]]))\n #Nx.Tensor \n\n iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1], [1, 0], [1, 0]]), Nx.tensor([[0, 1], [1, 0], [0, 1]]))\n #Nx.Tensor \n\n iex> Axon.Metrics.accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0, 1, 0], [0, 1, 0]]))\n #Nx.Tensor","ref":"Axon.Metrics.html#accuracy/3-examples","title":"Examples - Axon.Metrics.accuracy/3","type":"function"},{"doc":"","ref":"Axon.Metrics.html#accuracy_transform/4","title":"Axon.Metrics.accuracy_transform/4","type":"function"},{"doc":"Computes the number of false negative predictions with respect\nto given targets.","ref":"Axon.Metrics.html#false_negatives/3","title":"Axon.Metrics.false_negatives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#false_negatives/3-options","title":"Options - Axon.Metrics.false_negatives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.false_negatives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#false_negatives/3-examples","title":"Examples - Axon.Metrics.false_negatives/3","type":"function"},{"doc":"Computes the number of false positive predictions with respect\nto given targets.","ref":"Axon.Metrics.html#false_positives/3","title":"Axon.Metrics.false_positives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#false_positives/3-options","title":"Options - Axon.Metrics.false_positives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.false_positives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#false_positives/3-examples","title":"Examples - Axon.Metrics.false_positives/3","type":"function"},{"doc":"Calculates the mean absolute error of predictions\nwith respect to targets.\n\n$$l_i = \\sum_i |\\hat{y_i} - y_i|$$","ref":"Axon.Metrics.html#mean_absolute_error/2","title":"Axon.Metrics.mean_absolute_error/2","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#mean_absolute_error/2-argument-shapes","title":"Argument Shapes - Axon.Metrics.mean_absolute_error/2","type":"function"},{"doc":"iex> y_true = Nx.tensor([[0.0, 1.0], [0.0, 0.0]], type: {:f, 32})\n iex> y_pred = Nx.tensor([[1.0, 1.0], [1.0, 0.0]], type: {:f, 32})\n iex> Axon.Metrics.mean_absolute_error(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#mean_absolute_error/2-examples","title":"Examples - Axon.Metrics.mean_absolute_error/2","type":"function"},{"doc":"Computes the precision of the given predictions with\nrespect to the given targets.","ref":"Axon.Metrics.html#precision/3","title":"Axon.Metrics.precision/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#precision/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.precision/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#precision/3-options","title":"Options - Axon.Metrics.precision/3","type":"function"},{"doc":"iex> Axon.Metrics.precision(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#precision/3-examples","title":"Examples - Axon.Metrics.precision/3","type":"function"},{"doc":"Computes the recall of the given predictions with\nrespect to the given targets.","ref":"Axon.Metrics.html#recall/3","title":"Axon.Metrics.recall/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#recall/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.recall/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#recall/3-options","title":"Options - Axon.Metrics.recall/3","type":"function"},{"doc":"iex> Axon.Metrics.recall(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#recall/3-examples","title":"Examples - Axon.Metrics.recall/3","type":"function"},{"doc":"Returns a function which computes a running average given current average,\nnew observation, and current iteration.","ref":"Axon.Metrics.html#running_average/1","title":"Axon.Metrics.running_average/1","type":"function"},{"doc":"iex> cur_avg = 0.5\n iex> iteration = 1\n iex> y_true = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> y_pred = Nx.tensor([[0, 1], [1, 0], [1, 0]])\n iex> avg_acc = Axon.Metrics.running_average(&Axon.Metrics.accuracy/2)\n iex> avg_acc.(cur_avg, [y_true, y_pred], iteration)\n #Nx.Tensor","ref":"Axon.Metrics.html#running_average/1-examples","title":"Examples - Axon.Metrics.running_average/1","type":"function"},{"doc":"Returns a function which computes a running sum given current sum,\nnew observation, and current iteration.","ref":"Axon.Metrics.html#running_sum/1","title":"Axon.Metrics.running_sum/1","type":"function"},{"doc":"iex> cur_sum = 12\n iex> iteration = 2\n iex> y_true = Nx.tensor([0, 1, 0, 1])\n iex> y_pred = Nx.tensor([1, 1, 0, 1])\n iex> fps = Axon.Metrics.running_sum(&Axon.Metrics.false_positives/2)\n iex> fps.(cur_sum, [y_true, y_pred], iteration)\n #Nx.Tensor","ref":"Axon.Metrics.html#running_sum/1-examples","title":"Examples - Axon.Metrics.running_sum/1","type":"function"},{"doc":"Computes the sensitivity of the given predictions\nwith respect to the given targets.","ref":"Axon.Metrics.html#sensitivity/3","title":"Axon.Metrics.sensitivity/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#sensitivity/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.sensitivity/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#sensitivity/3-options","title":"Options - Axon.Metrics.sensitivity/3","type":"function"},{"doc":"iex> Axon.Metrics.sensitivity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#sensitivity/3-examples","title":"Examples - Axon.Metrics.sensitivity/3","type":"function"},{"doc":"Computes the specificity of the given predictions\nwith respect to the given targets.","ref":"Axon.Metrics.html#specificity/3","title":"Axon.Metrics.specificity/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#specificity/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.specificity/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of the predictions.\n Defaults to `0.5`","ref":"Axon.Metrics.html#specificity/3-options","title":"Options - Axon.Metrics.specificity/3","type":"function"},{"doc":"iex> Axon.Metrics.specificity(Nx.tensor([0, 1, 1, 1]), Nx.tensor([1, 0, 1, 1]))\n #Nx.Tensor","ref":"Axon.Metrics.html#specificity/3-examples","title":"Examples - Axon.Metrics.specificity/3","type":"function"},{"doc":"Computes the top-k categorical accuracy.","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3","title":"Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"* `k` - The k in \"top-k\". Defaults to 5.\n * `sparse` - If `y_true` is a sparse tensor. Defaults to `false`.","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3-options","title":"Options - Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"* `y_true` - $(d_0, d_1, ..., d_n)$\n * `y_pred` - $(d_0, d_1, ..., d_n)$","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3-argument-shapes","title":"Argument Shapes - Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([0, 1, 0, 0, 0]), Nx.tensor([0.1, 0.4, 0.3, 0.7, 0.1]), k: 2)\n #Nx.Tensor \n\n iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0, 1, 0], [1, 0, 0]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2)\n #Nx.Tensor \n\n iex> Axon.Metrics.top_k_categorical_accuracy(Nx.tensor([[0], [2]]), Nx.tensor([[0.1, 0.4, 0.7], [0.1, 0.4, 0.7]]), k: 2, sparse: true)\n #Nx.Tensor","ref":"Axon.Metrics.html#top_k_categorical_accuracy/3-examples","title":"Examples - Axon.Metrics.top_k_categorical_accuracy/3","type":"function"},{"doc":"Computes the number of true negative predictions with respect\nto given targets.","ref":"Axon.Metrics.html#true_negatives/3","title":"Axon.Metrics.true_negatives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#true_negatives/3-options","title":"Options - Axon.Metrics.true_negatives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.true_negatives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#true_negatives/3-examples","title":"Examples - Axon.Metrics.true_negatives/3","type":"function"},{"doc":"Computes the number of true positive predictions with respect\nto given targets.","ref":"Axon.Metrics.html#true_positives/3","title":"Axon.Metrics.true_positives/3","type":"function"},{"doc":"* `:threshold` - threshold for truth value of predictions.\n Defaults to `0.5`.","ref":"Axon.Metrics.html#true_positives/3-options","title":"Options - Axon.Metrics.true_positives/3","type":"function"},{"doc":"iex> y_true = Nx.tensor([1, 0, 1, 1, 0, 1, 0])\n iex> y_pred = Nx.tensor([0.8, 0.6, 0.4, 0.2, 0.8, 0.2, 0.2])\n iex> Axon.Metrics.true_positives(y_true, y_pred)\n #Nx.Tensor","ref":"Axon.Metrics.html#true_positives/3-examples","title":"Examples - Axon.Metrics.true_positives/3","type":"function"},{"doc":"Abstraction for modeling a reduction of a dataset with an accumulated\nstate for a number of epochs.\n\nInspired heavily by [PyTorch Ignite](https://pytorch.org/ignite/index.html).\n\nThe main abstraction is the `%Axon.Loop{}` struct, which controls a nested\nreduction of the form:\n\n Enum.reduce(1..max_epochs, state, fn epoch, state ->\n Enum.reduce(data, state, &batch_step/2)\n end)\n\n`data` is assumed to be an `Enumerable` or `Stream` of input data which is\nhandled by a processing function, `batch_step`. The purpose of the loop\nabstraction is to take away much of the boilerplate code used in solving machine\nlearning tasks. Tasks such as normalizing a dataset, hyperparameter optimization,\nor training machine learning models boil down to writing one function:\n\n defn batch_step(batch, state) do\n # ...do something with batch...\n updated_state\n end\n\nFor tasks such as training a neural network, `state` will encapsulate things\nsuch as model and optimizer state. For supervised learning tasks, `batch_step`\nmight look something like:\n\n defn batch_step({inputs, targets}, state) do\n %{parameters: params, optimizer_state: optim_state} = state\n\n gradients = grad(params, objective_fn.(&1, inputs, targets))\n {updates, new_optim_state} = optimizer.(optim_state, params, gradients)\n\n new_params = apply_updates(params, updates)\n\n %{parameters: new_params, optimizer_state: optim_state}\n end\n\n`batch_step` takes a batch of `{input, target}` pairs and the current state,\nand updates the model parameters based on the gradients received from some arbitrary\nobjective function. This function will run in a nested loop, iterating over the entire\ndataset for `N` epochs before finally returning the trained model state. By defining\n1 function, we've created a training loop that works for most machine learning models.\n\nIn actuality, the loop abstraction accumulates a struct, `%Axon.Loop.State{}`, which looks\nlike (assuming `container` is a generic Elixir container of tensors, e.g. map, tuple, etc.):\n\n %Axon.Loop.State{\n epoch: integer(),\n max_epoch: integer(),\n iteration: integer(),\n max_iteration: integer(),\n metrics: map(string(), container()),\n times: map(integer(), integer()),\n step_state: container()\n }\n\n`batch_step` takes in the batch and the step state field and returns a `step_state`,\nwhich is a generic container of state accumulated at each iteration. The rest of the fields\nin the state struct are updated automatically behind the scenes.\n\nThe loop must start from some initial step state, thus most tasks must also provide\nan additional initialization function to provide some starting point for the step\nstate. For machine learning tasks, the initialization function will return things like\ninitial model parameters and optimizer state.\n\nTypically, the final output of the loop is the accumulated final state; however, you\nmay optionally apply an output transform to extract specific values at the end of the\nloop. For example, `Axon.Loop.trainer/4` by default extracts trained model state:\n\n output_transform = fn state ->\n state.step_state[:model_state]\n end","ref":"Axon.Loop.html","title":"Axon.Loop","type":"module"},{"doc":"The core of the Axon loop are the init and step functions. The initialization is an\narity-0 function which provides an initial step state:\n\n init = fn ->\n %{params: Axon.init(model)}\n end\n\nWhile the step function is the `batch_step` function mentioned earlier:\n\n step = fn data, state ->\n new_state = # ...do something...\n new_state\n end\n\nNote that any optimization and training anonymous functions that need to be used in the\n`batch_step` function can be passed as extra arguments. For example:\n\n step_with_training_arguments = fn data, state, optimizer_update_fn, state_update_fn ->\n # ...do something...\n end\n\n step = &(step_with_training_arguments.(&1, &2, actual_optimizer_update_fn, actual_state_update_fn))","ref":"Axon.Loop.html#module-initialize-and-step","title":"Initialize and Step - Axon.Loop","type":"module"},{"doc":"Often times you want to compute metrics associated with your training iterations.\nTo accomplish this, you can attach metrics to each `Axon.Loop`. Assuming a `batch_step`\nfunction which looks like:\n\n defn batch_step({inputs, targets}, state) do\n %{parameters: params, optimizer_state: optim_state} = state\n\n gradients = grad(params, objective_fn.(&1, inputs, targets))\n {updates, new_optim_state} = optimizer.(optim_state, params, gradients)\n\n new_params = apply_updates(params, updates)\n\n # Shown for simplicity, you can optimize this by calculating preds\n # along with the gradient calculation\n preds = model_fn.(params, inputs)\n\n %{\n y_true: targets,\n y_pred: preds,\n parameters: new_params,\n optimizer_state: optim_state\n }\n end\n\nYou can attach metrics to this by using `Axon.Loop.metric/4`:\n\n Axon.Loop.loop(&batch_step/2)\n |> Axon.Loop.metric(\"Accuracy\", :accuracy, fn %{y_true: y_, y_pred: y} -> [y_, y] end)\n |> Axon.Loop.run(data)\n\nBecause metrics work directly on `step_state`, you typically need to provide an output\ntransform to indicate which values should be passed to your metric function. By default,\nAxon assumes a supervised training task with the fields `:y_true` and `:y_pred` present\nin the step state. See `Axon.Loop.metric/4` for more information.\n\nMetrics will be tracked in the loop state using the user-provided key. Metrics integrate\nseamlessly with the supervised metrics defined in `Axon.Metrics`. You can also use metrics\nto keep running averages of some values in the original dataset.","ref":"Axon.Loop.html#module-metrics","title":"Metrics - Axon.Loop","type":"module"},{"doc":"You can instrument several points in the loop using event handlers. By default, several events\nare fired when running a loop:\n\n events = [\n :started, # After loop state initialization\n :epoch_started, # On epoch start\n :iteration_started, # On iteration start\n :iteration_completed, # On iteration complete\n :epoch_completed, # On epoch complete\n :epoch_halted, # On epoch halt, if early halted\n ]\n\nYou can attach event handlers to events using `Axon.Loop.handle_event/4`:\n\n loop\n |> Axon.Loop.handle_event(:iteration_completed, &log_metrics/1, every: 100)\n |> Axon.Loop.run(data)\n\nThe above will trigger `log_metrics/1` every 100 times the `:iteration_completed` event\nis fired. Event handlers must return a tuple `{status, state}`, where `status` is an\natom with one of the following values:\n\n :continue # Continue epoch, continue looping\n :halt_epoch # Halt the epoch, continue looping\n :halt_loop # Halt looping\n\nAnd `state` is an updated `Axon.Loop.State` struct. Handler functions take as input\nthe current loop state.\n\nIt's important to note that event handlers are triggered in the order they are attached\nto the loop. If you have two handlers on the same event, they will trigger in order:\n\n loop\n |> Axon.Loop.handle_event(:epoch_completed, &normalize_state/1) # Runs first\n |> Axon.Loop.handle_event(:epoch_completed, &log_state/1) # Runs second\n\nYou may provide filters to filter when event handlers trigger. See `Axon.Loop.handle_event/4`\nfor more details on valid filters.","ref":"Axon.Loop.html#module-events-and-handlers","title":"Events and Handlers - Axon.Loop","type":"module"},{"doc":"Axon loops are typically created from one of the factory functions provided in this\nmodule:\n\n * `Axon.Loop.loop/3` - Creates a loop from step function and optional initialization\n functions and output transform functions.\n\n * `Axon.Loop.trainer/3` - Creates a supervised training loop from model, loss, and\n optimizer.\n\n * `Axon.Loop.evaluator/1` - Creates a supervised evaluator loop from model.","ref":"Axon.Loop.html#module-factories","title":"Factories - Axon.Loop","type":"module"},{"doc":"In order to execute a loop, you should use `Axon.Loop.run/3`:\n\n Axon.Loop.run(loop, data, epochs: 10)","ref":"Axon.Loop.html#module-running-loops","title":"Running loops - Axon.Loop","type":"module"},{"doc":"At times you may want to resume a loop from some previous state. You can accomplish this\nwith `Axon.Loop.from_state/2`:\n\n loop\n |> Axon.Loop.from_state(state)\n |> Axon.Loop.run(data)","ref":"Axon.Loop.html#module-resuming-loops","title":"Resuming loops - Axon.Loop","type":"module"},{"doc":"Adds a handler function which saves loop checkpoints on a given\nevent, optionally with metric-based criteria.\n\nBy default, loop checkpoints will be saved at the end of every\nepoch in the current working directory under the `checkpoint/`\npath. Checkpoints are serialized representations of loop state\nobtained from `Axon.Loop.serialize_state/2`. Serialization\noptions will be forwarded to `Axon.Loop.serialize_state/2`.\n\nYou can customize checkpoint events by passing `:event` and `:filter`\noptions:\n\n loop\n |> Axon.Loop.checkpoint(event: :iteration_completed, filter: [every: 50])\n\nCheckpoints are saved under the `checkpoint/` directory with a pattern\nof `checkpoint_{epoch}_{iteration}.ckpt`. You can customize the path and pattern\nwith the `:path` and `:file_pattern` options:\n\n my_file_pattern =\n fn %Axon.Loop.State{epoch: epoch, iteration: iter} ->\n \"checkpoint_#{epoch}_#{iter}\"\n end\n\n loop\n |> Axon.Loop.checkpoint(path: \"my_checkpoints\", file_pattern: my_file_pattern)\n\nIf you'd like to only save checkpoints based on some metric criteria,\nyou can specify the `:criteria` option. `:criteria` must be a valid key\nin metrics:\n\n loop\n |> Axon.Loop.checkpoint(criteria: \"validation_loss\")\n\nThe default criteria mode is `:min`, meaning the min score metric will\nbe considered \"best\" when deciding to save on a given event. Valid modes\nare `:min` and `:max`:\n\n loop\n |> Axon.Loop.checkpoint(criteria: \"validation_accuracy\", mode: :max)","ref":"Axon.Loop.html#checkpoint/2","title":"Axon.Loop.checkpoint/2","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:epoch_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.\n\n * `:patience` - number of given events to wait for improvement. Defaults\n to `3`.\n\n * `:mode` - whether given metric is being minimized or maximized. One of\n `:min`, `:max` or an arity-1 function which returns `true` or `false`.\n Defaults to `:min`.\n\n * `:path` - path to directory to save checkpoints. Defaults to `checkpoint`\n\n * `:file_pattern` - arity-1 function which returns a string file pattern\n based on the current loop state. Defaults to saving checkpoints to files\n `checkpoint_#{epoch}_#{iteration}.ckpt`.","ref":"Axon.Loop.html#checkpoint/2-options","title":"Options - Axon.Loop.checkpoint/2","type":"function"},{"doc":"Deserializes loop state from a binary.\n\nIt is the opposite of `Axon.Loop.serialize_state/2`.\n\nBy default, the step state is deserialized using `Nx.deserialize.2`;\nhowever, this behavior can be changed if step state is an application\nspecific container. For example, if you introduce your own data\nstructure into step_state and you customized the serialization logic,\n`Nx.deserialize/2` will not be sufficient for deserialization. - you\nmust pass custom logic with `:deserialize_step_state`.","ref":"Axon.Loop.html#deserialize_state/2","title":"Axon.Loop.deserialize_state/2","type":"function"},{"doc":"Adds a handler function which halts a loop if the given\nmetric does not improve between events.\n\nBy default, this will run after each epoch and track the\nimprovement of a given metric.\n\nYou must specify a metric to monitor and the metric must\nbe present in the loop state. Typically, this will be\na validation metric:\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.validate(val_data)\n |> Axon.Loop.early_stop(\"validation_accuracy\")\n\nIt's important to remember that handlers are executed in the\norder they are added to the loop. For example, if you'd like\nto checkpoint a loop after every epoch and use early stopping,\nmost likely you want to add the checkpoint handler before\nthe early stopping handler:\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.checkpoint()\n |> Axon.Loop.early_stop(\"accuracy\")\n\nThat will ensure checkpoint is always fired, even if the loop\nexited early.","ref":"Axon.Loop.html#early_stop/3","title":"Axon.Loop.early_stop/3","type":"function"},{"doc":"Creates a supervised evaluation step from a model and model state.\n\nThis function is intended for more fine-grained control over the loop\ncreation process. It returns a tuple of `{init_fn, step_fn}` where\n`init_fn` returns an initial step state and `step_fn` performs a\nsingle evaluation step.","ref":"Axon.Loop.html#eval_step/1","title":"Axon.Loop.eval_step/1","type":"function"},{"doc":"Creates a supervised evaluator from a model.\n\nAn evaluator can be used for things such as testing and validation of models\nafter or during training. It assumes `model` is an Axon struct, container of\nstructs, or a tuple of `init` / `apply` functions. `model_state` must be a\ncontainer usable from within `model`.\n\nThe evaluator returns a step state of the form:\n\n %{\n y_true: labels,\n y_pred: predictions\n }\n\nSuch that you can attach any number of supervised metrics to the evaluation\nloop:\n\n model\n |> Axon.Loop.evaluator()\n |> Axon.Loop.metric(\"Accuracy\", :accuracy)\n\nYou must pass a compatible trained model state to `Axon.Loop.run/4` when using\nsupervised evaluation loops. For example, if you've binded the result of a training\nrun to `trained_model_state`, you can run the trained model through an evaluation\nrun like this:\n\n model\n |> Axon.Loop.evaluator()\n |> Axon.Loop.run(data, trained_model_state, compiler: EXLA)\n\nThis function applies an output transform which returns the map of metrics accumulated\nover the given loop.","ref":"Axon.Loop.html#evaluator/1","title":"Axon.Loop.evaluator/1","type":"function"},{"doc":"Attaches `state` to the given loop in order to resume looping\nfrom a previous state.\n\nIt's important to note that a loop's attached state takes precedence\nover defined initialization functions. Given initialization function:\n\n defn init_state(), do: %{foo: 1, bar: 2}\n\nAnd an attached state:\n\n state = %State{step_state: %{foo: 2, bar: 3}}\n\n`init_state/0` will never execute, and instead the initial step state\nof `%{foo: 2, bar: 3}` will be used.","ref":"Axon.Loop.html#from_state/2","title":"Axon.Loop.from_state/2","type":"function"},{"doc":"Adds a handler function to the loop which will be triggered on `event`\nwith an optional filter.\n\nEvents take place at different points during loop execution. The default\nevents are:\n\n events = [\n :started, # After loop state initialization\n :epoch_started, # On epoch start\n :iteration_started, # On iteration start\n :iteration_completed, # On iteration complete\n :epoch_completed, # On epoch complete\n :epoch_halted, # On epoch halt, if early halted\n ]\n\nGenerally, event handlers are side-effecting operations which provide some\nsort of inspection into the loop's progress. It's important to note that\nif you define multiple handlers to be triggered on the same event, they\nwill execute in order from when they were attached to the training\nloop:\n\n loop\n |> Axon.Loop.handle_event(:epoch_started, &normalize_step_state/1) # executes first\n |> Axon.Loop.handle_event(:epoch_started, &log_step_state/1) # executes second\n\nThus, if you have separate handlers which alter or depend on loop state,\nyou need to ensure they are ordered correctly, or combined into a single\nevent handler for maximum control over execution.\n\n`event` must be an atom representing the event to trigger `handler` or a\nlist of atoms indicating `handler` should be triggered on multiple events.\n`event` may be `:all` which indicates the handler should be triggered on\nevery event during loop processing.\n\n`handler` must be an arity-1 function which takes as input loop state and\nreturns `{status, state}`, where `status` is an atom with one of the following\nvalues:\n\n :continue # Continue epoch, continue looping\n :halt_epoch # Halt the epoch, continue looping\n :halt_loop # Halt looping\n\n`filter` is an atom representing a valid filter predicate, a keyword of\npredicate-value pairs, or a function which takes loop state and returns\na `true`, indicating the handler should run, or `false`, indicating the\nhandler should not run. Valid predicates are:\n\n :always # Always trigger event\n :once # Trigger on first event firing\n\nValid predicate-value pairs are:\n\n every: N # Trigger every `N` event\n only: N # Trigger on `N` event\n\n**Warning: If you modify the step state in an event handler, it will trigger\npotentially excessive recompilation and result in significant additional overhead\nduring loop execution.**","ref":"Axon.Loop.html#handle_event/4","title":"Axon.Loop.handle_event/4","type":"function"},{"doc":"Adds a handler function which updates a `Kino.VegaLite` plot.\n\nBy default, this will run after every iteration.\n\nYou must specify a plot to push to and a metric to track. The `:x` axis will be the iteration count, labeled `\"step\"`. The metric must match the name given to the `:y` axis in your `VegaLite` plot:\n\n plot =\n Vl.new()\n |> Vl.mark(:line)\n |> Vl.encode_field(:x, \"step\", type: :quantitative)\n |> Vl.encode_field(:y, \"loss\", type: :quantitative)\n |> Kino.VegaLite.new()\n |> Kino.render()\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.kino_vega_lite_plot(plot, \"loss\")","ref":"Axon.Loop.html#kino_vega_lite_plot/4","title":"Axon.Loop.kino_vega_lite_plot/4","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:iteration_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.","ref":"Axon.Loop.html#kino_vega_lite_plot/4-options","title":"Options - Axon.Loop.kino_vega_lite_plot/4","type":"function"},{"doc":"Adds a handler function which logs the given message produced\nby `message_fn` to the given IO device every `event` satisfying\n`filter`.\n\nIn most cases, this is useful for inspecting the contents of\nthe loop state at intermediate stages. For example, the default\n`trainer` loop factory attaches IO logging of epoch, batch, loss\nand metrics.\n\nIt's also possible to log loop state to files by changing the\ngiven IO device. By default, the IO device is `:stdio`.\n\n`message_fn` should take the loop state and return a binary\nrepresenting the message to be written to the IO device.","ref":"Axon.Loop.html#log/3","title":"Axon.Loop.log/3","type":"function"},{"doc":"Creates a loop from `step_fn`, an optional `init_fn`, and an\noptional `output_transform`.\n\n`step_fn` is an arity-2 function which takes a batch and state\nand returns an updated step state:\n\n defn batch_step(batch, step_state) do\n step_state + 1\n end\n\n`init_fn` by default is an identity function which forwards its\ninitial arguments as the model state. You should define a custom\ninitialization function if you require a different behavior:\n\n defn init_step_state(state) do\n Map.merge(%{foo: 1}, state)\n end\n\nYou may use `state` in conjunction with initialization functions in\n`init_fn`. For example, `train_step/3` uses initial state as initial\nmodel parameters to allow initializing models from partial parameterizations.\n\n`step_batch/2` and `init_step_state/1` are typically called from\nwithin `Nx.Defn.jit/3`. While JIT-compilation will work with anonymous functions,\n`def`, and `defn`, it is recommended that you use the stricter `defn` to define\nboth functions in order to avoid bugs or cryptic errors.\n\n`output_transform/1` applies a transformation on the final accumulated loop state.\nThis is useful for extracting specific fields from a loop and piping them into\nadditional functions.","ref":"Axon.Loop.html#loop/3","title":"Axon.Loop.loop/3","type":"function"},{"doc":"Adds a metric of the given name to the loop.\n\nA metric is a function which tracks or measures some value with respect\nto values in the step state. For example, when training classification\nmodels, it's common to track the model's accuracy during training:\n\n loop\n |> Axon.Loop.metric(:accuracy, \"Accuracy\")\n\nBy default, metrics assume a supervised learning task and extract the fields\n`[:y_true, :y_pred]` from the step state. If you wish to work on a different\nvalue, you can use an output transform. An output transform is a list of keys\nto extract from the output state, or a function which returns a flattened list\nof values to pass to the given metric function. Values received from output\ntransforms are passed to the given metric using:\n\n value = output_transform.(step_state)\n apply(metric, value)\n\nThus, even if you want your metric to work on a container, your output transform\nmust return a list.\n\n`metric` must be an atom which matches the name of a metric in `Axon.Metrics`, or\nan arbitrary function which returns a tensor or container.\n\n`name` must be a string or atom used to store the computed metric in the loop\nstate. If names conflict, the last attached metric will take precedence:\n\n loop\n |> Axon.Loop.metric(:mean_squared_error, \"Error\") # Will be overwritten\n |> Axon.Loop.metric(:mean_absolute_error, \"Error\") # Will be used\n\nBy default, metrics keep a running average of the metric calculation. You can\noverride this behavior by changing `accumulate`:\n\n loop\n |> Axon.Loop.metric(:true_negatives, \"tn\", :running_sum)\n\nAccumulation function can be one of the accumulation combinators in Axon.Metrics\nor an arity-3 function of the form: `accumulate(acc, obs, i) :: new_acc`.","ref":"Axon.Loop.html#metric/5","title":"Axon.Loop.metric/5","type":"function"},{"doc":"Adds a handler function which monitors the given metric\nand fires some action when the given metric meets some\ncriteria.\n\nThis function is a generalization of handlers such as\n`Axon.Loop.reduce_lr_on_plateau/3` and `Axon.Loop.early_stop/3`.\n\nYou must specify a metric to monitor that is present in\nthe state metrics. This handler will then monitor the value\nof the metric at the specified intervals and fire the specified\nfunction if the criteria is met.\n\nYou must also specify a name for the monitor attached to the\ngiven metric. This will be used to store metadata associated\nwith the monitor.\n\nThe common case of monitor is to track improvement of metrics\nand take action if metrics haven't improved after a certain number\nof events. However, you can also set a monitor up to trigger if\na metric hits some criteria (such as a threshold) by passing a\ncustom monitoring mode.","ref":"Axon.Loop.html#monitor/5","title":"Axon.Loop.monitor/5","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:epoch_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.\n\n * `:patience` - number of given events to wait for improvement. Defaults\n to `3`.\n\n * `:mode` - whether given metric is being minimized or maximized. One of\n `:min`, `:max` or an arity-1 function which returns `true` or `false`.\n Defaults to `:min`.","ref":"Axon.Loop.html#monitor/5-options","title":"Options - Axon.Loop.monitor/5","type":"function"},{"doc":"Adds a handler function which reduces the learning rate by\nthe given factor if the given metric does not improve between\nevents.\n\nBy default, this will run after each epoch and track the\nimprovement of a given metric.\n\nYou must specify a metric to monitor and the metric must\nbe present in the loop state. Typically, this will be\na validation metric:\n\n model\n |> Axon.Loop.trainer(loss, optim)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.validate(model, val_data)\n |> Axon.Loop.reduce_lr_on_plateau(\"accuracy\", mode: :max)","ref":"Axon.Loop.html#reduce_lr_on_plateau/3","title":"Axon.Loop.reduce_lr_on_plateau/3","type":"function"},{"doc":"* `:event` - event to fire handler on. Defaults to `:epoch_completed`.\n\n * `:filter` - event filter to attach to handler. Defaults to `:always`.\n\n * `:patience` - number of given events to wait for improvement. Defaults\n to `3`.\n\n * `:mode` - whether given metric is being minimized or maximized. Defaults\n to `:min`.\n\n * `:factor` - factor to decrease learning rate by. Defaults to `0.1`.","ref":"Axon.Loop.html#reduce_lr_on_plateau/3-options","title":"Options - Axon.Loop.reduce_lr_on_plateau/3","type":"function"},{"doc":"Runs the given loop on data with the given options.\n\n`loop` must be a valid Axon.Loop struct built from one of the\nloop factories provided in this module.\n\n`data` must be an Enumerable or Stream which yields batches of\ndata on each iteration.","ref":"Axon.Loop.html#run/4","title":"Axon.Loop.run/4","type":"function"},{"doc":"* `:epochs` - max epochs to run loop for. Must be non-negative integer.\n Defaults to `1`.\n\n * `:iterations` - max iterations to run each epoch. Must be non-negative\n integer. Defaults to `-1` or no max iterations.\n\n * `:jit_compile?` - whether or not to JIT compile initialization and step\n functions. JIT compilation must be used for gradient computations. Defaults\n to true.\n\n * `:garbage_collect` - whether or not to garbage collect after\n each loop iteration. This may prevent OOMs, but it will slow down training.\n\n * `:strict?` - whether or not to compile step functions strictly. If this flag\n is set, the loop will raise on any cache miss during the training loop. Defaults\n to true.\n\n * `:force_garbage_collect?` - whether or not to force garbage collection after each\n iteration. This may help avoid OOMs when training large models, but it will slow\n training down.\n\n * `:debug` - run loop in debug mode to trace loop progress. Defaults to\n false.\n\n Additional options are forwarded to `Nx.Defn.jit` as JIT-options. If no JIT\n options are set, the default options set with `Nx.Defn.default_options` are\n used.","ref":"Axon.Loop.html#run/4-options","title":"Options - Axon.Loop.run/4","type":"function"},{"doc":"Serializes loop state to a binary for saving and loading\nloop from previous states.\n\nYou can consider the serialized state to be a checkpoint of\nall state at a given iteration and epoch.\n\nBy default, the step state is serialized using `Nx.serialize/2`;\nhowever, this behavior can be changed if step state is an application\nspecific container. For example, if you introduce your own data\nstructure into step_state, `Nx.serialize/2` will not be sufficient\nfor serialization - you must pass custom serialization as an option\nwith `:serialize_step_state`.\n\nAdditional `opts` controls serialization options such as compression.\nIt is forwarded to `:erlang.term_to_binary/2`.","ref":"Axon.Loop.html#serialize_state/2","title":"Axon.Loop.serialize_state/2","type":"function"},{"doc":"Creates a supervised train step from a model, loss function, and\noptimizer.\n\nThis function is intended for more fine-grained control over the loop\ncreation process. It returns a tuple of `{init_fn, step_fn}` where `init_fn`\nis an initialization function which returns an initial step state and\n`step_fn` is a supervised train step constructed from `model`, `loss`,\nand `optimizer`.\n\n`model` must be an Axon struct, a valid defn container\nof Axon structs, or a `{init_fn, apply_fn}`-tuple where `init_fn` is\nan arity-2 function which initializes the model state and `apply_fn` is\nan arity-2 function which applies the forward pass of the model. The forward\npass of the model must return a map with keys `:prediction` and `:state`\nrepresenting the model's prediction and updated state for layers which\naggregate state during training.\n\n`loss` must be an atom which matches a function in `Axon.Losses`, a list\nof `{loss, weight}` tuples representing a basic weighted loss function\nfor multi-output models, or an arity-2 function representing a custom loss\nfunction.\n\n`optimizer` must be an atom matching the name of a valid optimizer in `Polaris.Optimizers`,\nor a `{init_fn, update_fn}` tuple where `init_fn` is an arity-1 function which\ninitializes the optimizer state from the model parameters and `update_fn` is an\narity-3 function that receives `(gradient, optimizer_state, model_parameters)` and\nscales gradient updates with respect to input parameters, optimizer state, and gradients.\nThe `update_fn` returns `{scaled_updates, optimizer_state}`, which can then be applied to\nthe model through `model_parameters = Axon.Update.apply_updates(model_parameters, scaled_updates)`.\nSee `Polaris.Updates` for more information on building optimizers.","ref":"Axon.Loop.html#train_step/4","title":"Axon.Loop.train_step/4","type":"function"},{"doc":"* `:seed` - seed to use when constructing models. Seed controls random initialization\n of model parameters. Defaults to no seed which constructs a random seed for you at\n model build time.\n\n * `:loss_scale` - type of loss-scaling to use, if any. Loss-scaling is necessary when\n doing mixed precision training for numerical stability. Defaults to `:identity` or\n no loss-scaling.\n\n * `:gradient_accumulation_steps` - number of gradient accumulation steps to take during\n training. Gradient accumulation decreases the number of updates by accumulating gradients\n between steps, increasing the effective batch size on smaller devices. Defaults to 1.","ref":"Axon.Loop.html#train_step/4-options","title":"Options - Axon.Loop.train_step/4","type":"function"},{"doc":"Creates a supervised training loop from a model, loss function,\nand optimizer.\n\nThis function is useful for training models on most standard supervised\nlearning tasks. It assumes data consists of tuples of input-target pairs,\ne.g. `[{x0, y0}, {x1, y1}, ..., {xN, yN}]` where `x0` and `y0` are batched\ntensors or containers of batched tensors.\n\nIt defines an initialization function which first initializes model state\nusing the given model and then initializes optimizer state using the initial\nmodel state. The step function uses a differentiable objective function\ndefined with respect to the model parameters, input data, and target data\nusing the given loss function. It then updates model parameters using the\ngiven optimizer in order to minimize loss with respect to the model parameters.\n\n`model` must be an Axon struct, a valid defn container\nof Axon structs, or a `{init_fn, apply_fn}`-tuple where `init_fn` is\nan arity-2 function which initializes the model state and `apply_fn` is\nan arity-2 function which applies the forward pass of the model.\n\n`loss` must be an atom which matches a function in `Axon.Losses`, a list\nof `{loss, weight}` tuples representing a basic weighted loss function\nfor multi-output models, or an arity-2 function representing a custom loss\nfunction.\n\n`optimizer` must be an atom matching the name of a valid optimizer in `Polaris.Optimizers`,\nor a `{init_fn, update_fn}` tuple where `init_fn` is an arity-1 function which\ninitializes the optimizer state from attached parameters and `update_fn` is an\narity-3 function which scales gradient updates with respect to input parameters,\noptimizer state, and gradients. See `Polaris.Updates` for more information on building\noptimizers.\n\nThis function creates a step function which outputs a map consisting of the following\nfields for `step_state`:\n\n %{\n y_pred: tensor() | container(tensor()), # Model predictions for use in metrics\n y_true: tensor() | container(tensor()), # True labels for use in metrics\n loss: tensor(), # Running average of loss over epoch\n model_state: container(tensor()), # Model parameters and state\n optimizer_state: container(tensor()) # Optimizer state associated with each parameter\n }","ref":"Axon.Loop.html#trainer/4","title":"Axon.Loop.trainer/4","type":"function"},{"doc":"#","ref":"Axon.Loop.html#trainer/4-examples","title":"Examples - Axon.Loop.trainer/4","type":"function"},{"doc":"data = Stream.zip(input, target)\n\n model = Axon.input(\"input\", shape: {nil, 32}) |> Axon.dense(1, activation: :sigmoid)\n\n model\n |> Axon.Loop.trainer(:binary_cross_entropy, :adam)\n |> Axon.Loop.run(data)\n\n#","ref":"Axon.Loop.html#trainer/4-basic-usage","title":"Basic usage - Axon.Loop.trainer/4","type":"function"},{"doc":"model\n |> Axon.Loop.trainer(:binary_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.05))\n |> Axon.Loop.run(data)\n\n#","ref":"Axon.Loop.html#trainer/4-customizing-optimizer","title":"Customizing Optimizer - Axon.Loop.trainer/4","type":"function"},{"doc":"loss_fn = fn y_true, y_pred -> Nx.cos(y_true, y_pred) end\n\n model\n |> Axon.Loop.trainer(loss_fn, Polaris.Optimizers.rmsprop(learning_rate: 0.01))\n |> Axon.Loop.run(data)\n\n#","ref":"Axon.Loop.html#trainer/4-custom-loss","title":"Custom loss - Axon.Loop.trainer/4","type":"function"},{"doc":"model = {Axon.input(\"input_0\", shape: {nil, 1}), Axon.input(\"input_1\", shape: {nil, 2})}\n loss_weights = [mean_squared_error: 0.5, mean_absolute_error: 0.5]\n\n model\n |> Axon.Loop.trainer(loss_weights, :sgd)\n |> Axon.Loop.run(data)","ref":"Axon.Loop.html#trainer/4-multiple-objectives-with-multi-output-model","title":"Multiple objectives with multi-output model - Axon.Loop.trainer/4","type":"function"},{"doc":"* `:log` - training loss and metric log interval. Set to 0 to silence\n training logs. Defaults to 50\n\n * `:seed` - seed to use when constructing models. Seed controls random initialization\n of model parameters. Defaults to no seed which constructs a random seed for you at\n model build time.\n\n * `:loss_scale` - type of loss-scaling to use, if any. Loss-scaling is necessary when\n doing mixed precision training for numerical stability. Defaults to `:identity` or\n no loss-scaling.\n\n * `:gradient_accumulation_steps` - number of gradient accumulation steps to take during\n training. Gradient accumulation decreases the number of updates by accumulating gradients\n between steps, increasing the effective batch size on smaller devices. Defaults to 1.","ref":"Axon.Loop.html#trainer/4-options","title":"Options - Axon.Loop.trainer/4","type":"function"},{"doc":"Adds a handler function which tests the performance of `model`\nagainst the given validation set.\n\nThis handler assumes the loop state matches the state initialized\nin a supervised training loop. Typically, you'd call this immediately\nafter creating a supervised training loop:\n\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.validate(model, validation_data)\n\nPlease note that you must pass the same (or an equivalent) model\ninto this method so it can be used during the validation loop. The\nmetrics which are computed are those which are present BEFORE the\nvalidation handler was added to the loop. For the following loop:\n\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(:mean_absolute_error)\n |> Axon.Loop.validate(model, validation_data)\n |> Axon.Loop.metric(:binary_cross_entropy)\n\nonly `:mean_absolute_error` will be computed at validation time.\n\nThe returned loop state is altered to contain validation\nmetrics for use in later handlers such as early stopping and model\ncheckpoints. Since the order of execution of event handlers is in\nthe same order they are declared in the training loop, you MUST call\nthis method before any other handler which expects or may use\nvalidation metrics.\n\nBy default the validation loop runs after every epoch; however, you\ncan customize it by overriding the default event and event filters:\n\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(:mean_absolute_error)\n |> Axon.Loop.validate(model, validation_data, event: :iteration_completed, filter: [every: 10_000])\n |> Axon.Loop.metric(:binary_cross_entropy)","ref":"Axon.Loop.html#validate/4","title":"Axon.Loop.validate/4","type":"function"},{"doc":"Accumulated state in an Axon.Loop.\n\nLoop state is a struct:\n\n %State{\n epoch: integer(),\n max_epoch: integer(),\n iteration: integer(),\n max_iteration: integer(),\n metrics: map(string(), container()),\n times: map(integer(), integer()),\n step_state: container(),\n handler_metadata: container()\n }\n\n`epoch` is the current epoch, starting at 0, of the nested loop.\nDefaults to 0.\n\n`max_epoch` is the maximum number of epochs the loop should run\nfor. Defaults to 1.\n\n`iteration` is the current iteration of the inner loop. In supervised\nsettings, this will be the current batch. Defaults to 0.\n\n`max_iteration` is the maximum number of iterations the loop should\nrun a given epoch for. Defaults to -1 (no max).\n\n`metrics` is a map of `%{\"metric_name\" => value}` which accumulates metrics\nover the course of loop processing. Defaults to an empty map.\n\n`times` is a map of `%{epoch_number => value}` which maps a given epoch\nto the processing time. Defaults to an empty map.\n\n`step_state` is the step state as defined by the loop's processing\ninitialization and update functions. `step_state` is a required field.\n\n`handler_metadata` is a metadata field for storing loop handler metadata.\nFor example, loop checkpoints with specific metric criteria can store\nprevious best metrics in the handler meta for use between iterations.\n\n`event_counts` is a metadata field which stores information about the number\nof times each event has been fired. This is useful when creating custom filters.\n\n`status` refers to the loop state status after the loop has executed. You can\nuse this to determine if the loop ran to completion or if it was halted early.","ref":"Axon.Loop.State.html","title":"Axon.Loop.State","type":"module"},{"doc":"","ref":"Axon.CompileError.html","title":"Axon.CompileError","type":"exception"},{"doc":"","ref":"Axon.CompileError.html#message/1","title":"Axon.CompileError.message/1","type":"function"},{"doc":"# Axon Guides\n\nAxon is a library for creating and training neural networks in Elixir. The Axon guides are a collection of Livebooks designed to introduce Axon's APIs and design decisions from the bottom-up. After working through the guides, you will feel comfortable and confident working with Axon and using Axon for your next deep learning problem.","ref":"guides.html","title":"Axon Guides","type":"extras"},{"doc":"* [Your first Axon model](model_creation/your_first_axon_model.livemd)\n* [Sequential models](model_creation/sequential_models.livemd)\n* [Complex models](model_creation/complex_models.livemd)\n* [Multi-input / multi-output models](model_creation/multi_input_multi_output_models.livemd)\n* [Custom layers](model_creation/custom_layers.livemd)\n* [Model hooks](model_creation/model_hooks.livemd)","ref":"guides.html#model-creation","title":"Model Creation - Axon Guides","type":"extras"},{"doc":"* [Accelerating Axon](model_execution/accelerating_axon.livemd)\n* [Training and inference mode](model_execution/training_and_inference_mode.livemd)","ref":"guides.html#model-execution","title":"Model Execution - Axon Guides","type":"extras"},{"doc":"* [Your first training loop](training_and_evaluation/your_first_training_loop.livemd)\n* [Instrumenting loops with metrics](training_and_evaluation/instrumenting_loops_with_metrics.livemd)\n* [Your first evaluation loop](training_and_evaluation/your_first_evaluation_loop.livemd)\n* [Using loop event handlers](training_and_evaluation/using_loop_event_handlers.livemd)\n* [Custom models, loss functions, and optimizers](training_and_evaluation/custom_models_loss_optimizers.livemd)\n* [Writing custom metrics](training_and_evaluation/writing_custom_metrics.livemd)\n* [Writing custom event handlers](training_and_evaluation/writing_custom_event_handlers.livemd)","ref":"guides.html#training-and-evaluation","title":"Training and Evaluation - Axon Guides","type":"extras"},{"doc":"* [Converting ONNX models to Axon](serialization/onnx_to_axon.livemd)","ref":"guides.html#serialization","title":"Serialization - Axon Guides","type":"extras"},{"doc":"# Your first Axon model\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"your_first_axon_model.html","title":"Your first Axon model","type":"extras"},{"doc":"Axon is a library for creating and training neural networks in Elixir. Everything in Axon centers around the `%Axon{}` struct which represents an instance of an Axon model.\n\nModels are just graphs which represent the transformation and flow of input data to a desired output. Really, you can think of models as representing a single computation or function. An Axon model, when executed, takes data as input and returns transformed data as output.\n\nAll Axon models start with a declaration of input nodes. These are the root nodes of your computation graph, and correspond to the actual input data you want to send to Axon:\n\n```elixir\ninput = Axon.input(\"data\")\n```\n\n\n\n```\n#Axon \n```\n\nTechnically speaking, `input` is now a valid Axon model which you can inspect, execute, and initialize. You can visualize how data flows through the graph using `Axon.Display.as_graph/2`:\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(input, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n;\n```\n\nNotice the execution flow is just a single node, because your graph only consists of an input node! You pass data in and the model spits the same data back out, without any intermediate transformations.\n\nYou can see this in action by actually executing your model. You can build the `%Axon{}` struct into it's `initialization` and `forward` functions by calling `Axon.build/2`. This pattern of \"lowering\" or transforming the `%Axon{}` data structure into other functions or representations is very common in Axon. By simply traversing the data structure, you can create useful functions, execution visualizations, and more!\n\n```elixir\n{init_fn, predict_fn} = Axon.build(input)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\nNotice that `Axon.build/2` returns a tuple of `{init_fn, predict_fn}`. `init_fn` has the signature:\n\n```\ninit_fn.(template :: map(tensor) | tensor, initial_params :: map) :: map(tensor)\n```\n\nwhile `predict_fn` has the signature:\n\n```\npredict_fn.(params :: map(tensor), input :: map(tensor) | tensor)\n```\n\n`init_fn` returns all of your model's trainable parameters and state. You need to pass a template of the expected inputs because the shape of certain model parameters often depend on the shape of model inputs. You also need to pass any initial parameters you want your model to start with. This is useful for things like transfer learning, which you can read about in another guide.\n\n`predict_fn` returns transformed inputs from your model's trainable parameters and the given inputs.\n\n```elixir\nparams = init_fn.(Nx.template({1, 8}, :f32), %{})\n```\n\n\n\n```\n%{}\n```\n\nIn this example, you use `Nx.template/2` to create a *template tensor*, which is a placeholder that does not actually consume any memory. Templates are useful for initialization because you don't actually need to know anything about your inputs other than their shape and type.\n\nNotice `init_fn` returned an empty map because your model does not have any trainable parameters. This should make sense because it's just an input layer.\n\nNow you can pass these trainable parameters to `predict_fn` along with some input to actually execute your model:\n\n```elixir\npredict_fn.(params, Nx.iota({1, 8}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAnd your model just returned the given input, as expected!","ref":"your_first_axon_model.html#your-first-model","title":"Your first model - Your first Axon model","type":"extras"},{"doc":"# Sequential models\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"sequential_models.html","title":"Sequential models","type":"extras"},{"doc":"In the [last guide](your_first_axon_model.livemd), you created a simple identity model which just returned the input. Of course, you would never actually use Axon for such purposes. You want to create real neural networks!\n\nIn equivalent frameworks in the Python ecosystem such as Keras and PyTorch, there is a concept of *sequential models*. Sequential models are named after the sequential nature in which data flows through them. Sequential models transform the input with sequential, successive transformations.\n\nIf you're an experienced Elixir programmer, this paradigm of sequential transformations might sound a lot like what happens when using the pipe (`|>`) operator. In Elixir, it's common to see code blocks like:\n\n\n\n```elixir\nlist\n|> Enum.map(fn x -> x + 1 end)\n|> Enum.filter(&rem(&1, 2) == 0)\n|> Enum.count()\n```\n\nThe snippet above passes `list` through a sequence of transformations. You can apply this same paradigm in Axon to create sequential models. In fact, creating sequential models is so natural with Elixir's pipe operator, that Axon does not need a distinct *sequential* construct. To create a sequential model, you just pass Axon models through successive transformations in the Axon API:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(32)\n |> Axon.activation(:relu)\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(1)\n |> Axon.activation(:softmax)\n```\n\n\n\n```\n#Axon \n```\n\nIf you visualize this model, it's easy to see how data flows sequentially through it:\n\n```elixir\ntemplate = Nx.template({2, 16}, :f32)\nAxon.Display.as_graph(model, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 16}\"/];\n4[\"dense_0 (:dense) {2, 32}\"];\n5[\"relu_0 (:relu) {2, 32}\"];\n6[\"dropout_0 (:dropout) {2, 32}\"];\n7[\"dense_1 (:dense) {2, 1}\"];\n8[\"softmax_0 (:softmax) {2, 1}\"];\n7 --> 8;\n6 --> 7;\n5 --> 6;\n4 --> 5;\n3 --> 4;\n```\n\nYour model is more involved and as a result so is the execution graph! Now, using the same constructs from the last section, you can build and run your model:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\n```elixir\nparams = init_fn.(template, %{})\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nWow! Notice that this model actually has trainable parameters. You can see that the parameter map is just a regular Elixir map. Each top-level entry maps to a layer with a key corresponding to that layer's name and a value corresponding to that layer's trainable parameters. Each layer's individual trainable parameters are given layer-specific names and map directly to Nx tensors.\n\nNow you can use these `params` with your `predict_fn`:\n\n```elixir\npredict_fn.(params, Nx.iota({2, 16}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAnd voila! You've successfully created and used a sequential model in Axon!","ref":"sequential_models.html#creating-a-sequential-model","title":"Creating a sequential model - Sequential models","type":"extras"},{"doc":"# Complex models\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"complex_models.html","title":"Complex models","type":"extras"},{"doc":"Not all models you'd want to create fit cleanly in the *sequential* paradigm. Some models require a more flexible API. Fortunately, because Axon models are just Elixir data structures, you can manipulate them and decompose architectures as you would any other Elixir program:\n\n```elixir\ninput = Axon.input(\"data\")\n\nx1 = input |> Axon.dense(32)\nx2 = input |> Axon.dense(64) |> Axon.relu() |> Axon.dense(32)\n\nout = Axon.add(x1, x2)\n```\n\n\n\n```\n#Axon \n```\n\nIn the snippet above, your model branches `input` into `x1` and `x2`. Each branch performs a different set of transformations; however, at the end the branches are merged with an `Axon.add/3`. You might sometimes see layers like `Axon.add/3` called *combinators*. Really they're just layers that operate on multiple Axon models at once - typically to merge some branches together.\n\n`out` represents your final Axon model.\n\nIf you visualize this model, you can see the full effect of the branching in this model:\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n4[\"dense_0 (:dense) {2, 32}\"];\n5[\"dense_1 (:dense) {2, 64}\"];\n6[\"relu_0 (:relu) {2, 64}\"];\n7[\"dense_2 (:dense) {2, 32}\"];\n8[\"container_0 (:container) {{2, 32}, {2, 32}}\"];\n9[\"add_0 (:add) {2, 32}\"];\n8 --> 9;\n7 --> 8;\n4 --> 8;\n6 --> 7;\n5 --> 6;\n3 --> 5;\n3 --> 4;\n```\n\nAnd you can use `Axon.build/2` on `out` as you would any other Axon model:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\n```elixir\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAs your architectures grow in complexity, you might find yourself reaching for better abstractions to organize your model creation code. For example, PyTorch models are often organized into `nn.Module`. The equivalent of an `nn.Module` in Axon is a regular Elixir function. If you're translating models from PyTorch to Axon, it's natural to create one Elixir function per `nn.Module`.\n\nYou should write your models as you would write any other Elixir code - you don't need to worry about any framework specific constructs:\n\n```elixir\ndefmodule MyModel do\n def model() do\n Axon.input(\"data\")\n |> conv_block()\n |> Axon.flatten()\n |> dense_block()\n |> dense_block()\n |> Axon.dense(1)\n end\n\n defp conv_block(input) do\n residual = input\n\n x = input |> Axon.conv(3, padding: :same) |> Axon.mish()\n\n x\n |> Axon.add(residual)\n |> Axon.max_pool(kernel_size: {2, 2})\n end\n\n defp dense_block(input) do\n input |> Axon.dense(32) |> Axon.relu()\n end\nend\n```\n\n\n\n```\n{:module, MyModel, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:dense_block, 1}}\n```\n\n```elixir\nmodel = MyModel.model()\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\ntemplate = Nx.template({1, 28, 28, 3}, :f32)\nAxon.Display.as_graph(model, template)\n```\n\n\n\n```mermaid\ngraph TD;\n10[/\"data (:input) {1, 28, 28, 3}\"/];\n11[\"conv_0 (:conv) {1, 28, 28, 3}\"];\n12[\"mish_0 (:mish) {1, 28, 28, 3}\"];\n13[\"container_0 (:container) {{1, 28, 28, 3}, {1, 28, 28, 3}}\"];\n14[\"add_0 (:add) {1, 28, 28, 3}\"];\n15[\"max_pool_0 (:max_pool) {1, 14, 14, 3}\"];\n16[\"flatten_0 (:flatten) {1, 588}\"];\n17[\"dense_0 (:dense) {1, 32}\"];\n18[\"relu_0 (:relu) {1, 32}\"];\n19[\"dense_1 (:dense) {1, 32}\"];\n20[\"relu_1 (:relu) {1, 32}\"];\n21[\"dense_2 (:dense) {1, 1}\"];\n20 --> 21;\n19 --> 20;\n18 --> 19;\n17 --> 18;\n16 --> 17;\n15 --> 16;\n14 --> 15;\n13 --> 14;\n10 --> 13;\n12 --> 13;\n11 --> 12;\n10 --> 11;\n```","ref":"complex_models.html#creating-more-complex-models","title":"Creating more complex models - Complex models","type":"extras"},{"doc":"# Multi-input / multi-output models\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"multi_input_multi_output_models.html","title":"Multi-input / multi-output models","type":"extras"},{"doc":"Sometimes your application necessitates the use of multiple inputs. To use multiple inputs in an Axon model, you just need to declare multiple inputs in your graph:\n\n```elixir\ninput_1 = Axon.input(\"input_1\")\ninput_2 = Axon.input(\"input_2\")\n\nout = Axon.add(input_1, input_2)\n```\n\n\n\n```\n#Axon \n```\n\nNotice when you inspect the model, it tells you what your models inputs are up front. You can also get metadata about your model inputs programmatically with `Axon.get_inputs/1`:\n\n```elixir\nAxon.get_inputs(out)\n```\n\n\n\n```\n%{\"input_1\" => nil, \"input_2\" => nil}\n```\n\nEach input is uniquely named, so you can pass inputs by-name into inspection and execution functions with a map:\n\n```elixir\ninputs = %{\n \"input_1\" => Nx.template({2, 8}, :f32),\n \"input_2\" => Nx.template({2, 8}, :f32)\n}\n\nAxon.Display.as_graph(out, inputs)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"input_1 (:input) {2, 8}\"/];\n4[/\"input_2 (:input) {2, 8}\"/];\n5[\"container_0 (:container) {{2, 8}, {2, 8}}\"];\n6[\"add_0 (:add) {2, 8}\"];\n5 --> 6;\n4 --> 5;\n3 --> 5;\n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(inputs, %{})\n```\n\n\n\n```\n%{}\n```\n\n```elixir\ninputs = %{\n \"input_1\" => Nx.iota({2, 8}, type: :f32),\n \"input_2\" => Nx.iota({2, 8}, type: :f32)\n}\n\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nIf you forget a required input, Axon will raise:\n\n```elixir\npredict_fn.(params, %{\"input_1\" => Nx.iota({2, 8}, type: :f32)})\n```","ref":"multi_input_multi_output_models.html#creating-multi-input-models","title":"Creating multi-input models - Multi-input / multi-output models","type":"extras"},{"doc":"Depending on your application, you might also want your model to have multiple outputs. You can achieve this by using `Axon.container/2` to wrap multiple nodes into any supported Nx container:\n\n```elixir\ninp = Axon.input(\"data\")\n\nx1 = inp |> Axon.dense(32) |> Axon.relu()\nx2 = inp |> Axon.dense(64) |> Axon.relu()\n\nout = Axon.container({x1, x2})\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n7[/\"data (:input) {2, 8}\"/];\n8[\"dense_0 (:dense) {2, 32}\"];\n9[\"relu_0 (:relu) {2, 32}\"];\n10[\"dense_1 (:dense) {2, 64}\"];\n11[\"relu_1 (:relu) {2, 64}\"];\n12[\"container_0 (:container) {{2, 32}, {2, 64}}\"];\n11 --> 12;\n9 --> 12;\n10 --> 11;\n7 --> 10;\n8 --> 9;\n7 --> 8;\n```\n\nWhen executed, containers will return a data structure which matches their input structure:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n{#Nx.Tensor ,\n #Nx.Tensor }\n```\n\nYou can output maps as well:\n\n```elixir\nout = Axon.container(%{x1: x1, x2: x2})\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n%{\n x1: #Nx.Tensor ,\n x2: #Nx.Tensor \n}\n```\n\nContainers even support arbitrary nesting:\n\n```elixir\nout = Axon.container({%{x1: {x1, x2}, x2: %{x1: x1, x2: {x2}}}})\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n{%{\n x1: {#Nx.Tensor ,\n #Nx.Tensor },\n x2: %{\n x1: #Nx.Tensor ,\n x2: {#Nx.Tensor }\n }\n }}\n```","ref":"multi_input_multi_output_models.html#creating-multi-output-models","title":"Creating multi-output models - Multi-input / multi-output models","type":"extras"},{"doc":"# Custom layers\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:kino, \">= 0.9.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"custom_layers.html","title":"Custom layers","type":"extras"},{"doc":"While Axon has a plethora of built-in layers, more than likely you'll run into a case where you need something not provided by the framework. In these instances, you can use *custom layers*.\n\nTo Axon, layers are really just `defn` implementations with special Axon inputs. Every layer in Axon (including the built-in layers), are implemented with the `Axon.layer/3` function. The API of `Axon.layer/3` intentionally mirrors the API of `Kernel.apply/2`. To declare a custom layer you need 2 things:\n\n1. A `defn` implementation\n2. Inputs\n\nThe `defn` implementation looks like any other `defn` you'd write; however, it must always account for additional `opts` as an argument:\n\n```elixir\ndefmodule CustomLayers0 do\n import Nx.Defn\n\n defn my_layer(input, opts \\\\ []) do\n opts = keyword!(opts, mode: :train, alpha: 1.0)\n\n input\n |> Nx.sin()\n |> Nx.multiply(opts[:alpha])\n end\nend\n```\n\n\n\n```\n{:module, CustomLayers0, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}\n```\n\nRegardless of the options you configure your layer to accept, the `defn` implementation will always receive a `:mode` option indicating whether or not the model is running in training or inference mode. You can customize the behavior of your layer depending on the mode.\n\nWith an implementation defined, you need only to call `Axon.layer/3` to apply our custom layer to an Axon input:\n\n```elixir\ninput = Axon.input(\"data\")\n\nout = Axon.layer(&CustomLayers0.my_layer/2, [input])\n```\n\n\n\n```\n#Axon \n```\n\nNow you can inspect and execute your model as normal:\n\n```elixir\ntemplate = Nx.template({2, 8}, :f32)\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n4[\"custom_0 (:custom) {2, 8}\"];\n3 --> 4;\n```\n\nNotice that by default custom layers render with a default operation marked as `:custom`. This can make it difficult to determine which layer is which during inspection. You can control the rendering by passing `:op_name` to `Axon.layer/3`:\n\n```elixir\nout = Axon.layer(&CustomLayers0.my_layer/2, [input], op_name: :my_layer)\n\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n5[\"my_layer_0 (:my_layer) {2, 8}\"];\n3 --> 5;\n```\n\nYou can also control the name of your layer via the `:name` option. All other options are forwarded to the layer implementation function:\n\n```elixir\nout =\n Axon.layer(&CustomLayers0.my_layer/2, [input],\n name: \"layer\",\n op_name: :my_layer,\n alpha: 2.0\n )\n\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n6[\"layer (:my_layer) {2, 8}\"];\n3 --> 6;\n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\n```\n\n\n\n```\n%{}\n```\n\n```elixir\npredict_fn.(params, Nx.iota({2, 8}, type: :f32))\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nNotice that this model does not have any trainable parameters because none of the layers have trainable parameters. You can introduce trainable parameters by passing inputs created with `Axon.param/3` to `Axon.layer/3`. For example, you can modify your original custom layer to take an additional trainable parameter:\n\n```elixir\ndefmodule CustomLayers1 do\n import Nx.Defn\n\n defn my_layer(input, alpha, _opts \\\\ []) do\n input\n |> Nx.sin()\n |> Nx.multiply(alpha)\n end\nend\n```\n\n\n\n```\n{:module, CustomLayers1, <<70, 79, 82, 49, 0, 0, 10, ...>>, true}\n```\n\nAnd then construct the layer with a regular Axon input and a trainable parameter:\n\n```elixir\nalpha = Axon.param(\"alpha\", fn _ -> {} end)\n\nout = Axon.layer(&CustomLayers1.my_layer/3, [input, alpha], op_name: :my_layer)\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(out)\nparams = init_fn.(template, %{})\n```\n\n\n\n```\n%{\n \"my_layer_0\" => %{\n \"alpha\" => #Nx.Tensor \n }\n}\n```\n\nNotice how your model now initializes with a trainable parameter `\"alpha\"` for your custom layer. Each parameter requires a unique (per-layer) string name and a function which determines the parameter's shape from the layer's input shapes.\n\n\n\nIf you plan on re-using custom layers in many locations, it's recommended that you wrap them in an Elixir function as an interface:\n\n```elixir\ndefmodule CustomLayers2 do\n import Nx.Defn\n\n def my_layer(%Axon{} = input, opts \\\\ []) do\n opts = Keyword.validate!(opts, [:name])\n alpha = Axon.param(\"alpha\", fn _ -> {} end)\n\n Axon.layer(&my_layer_impl/3, [input, alpha], name: opts[:name], op_name: :my_layer)\n end\n\n defnp my_layer_impl(input, alpha, _opts \\\\ []) do\n input\n |> Nx.sin()\n |> Nx.multiply(alpha)\n end\nend\n```\n\n\n\n```\n{:module, CustomLayers2, <<70, 79, 82, 49, 0, 0, 12, ...>>, true}\n```\n\n```elixir\nout =\n input\n |> CustomLayers2.my_layer()\n |> CustomLayers2.my_layer()\n |> Axon.dense(1)\n```\n\n\n\n```\n#Axon \n```\n\n```elixir\nAxon.Display.as_graph(out, template)\n```\n\n\n\n```mermaid\ngraph TD;\n3[/\"data (:input) {2, 8}\"/];\n8[\"my_layer_0 (:my_layer) {2, 8}\"];\n9[\"my_layer_1 (:my_layer) {2, 8}\"];\n10[\"dense_0 (:dense) {2, 1}\"];\n9 --> 10;\n8 --> 9;\n3 --> 8;\n```","ref":"custom_layers.html#creating-custom-layers","title":"Creating custom layers - Custom layers","type":"extras"},{"doc":"# Model hooks\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"model_hooks.html","title":"Model hooks","type":"extras"},{"doc":"Sometimes it's useful to inspect or visualize the values of intermediate layers in your model during the forward or backward pass. For example, it's common to visualize the gradients of activation functions to ensure your model is learning in a stable manner. Axon supports this functionality via model hooks.\n\nModel hooks are a means of unidirectional communication with an executing model. Hooks are unidirectional in the sense that you can only **receive** information from your model, and not send information back.\n\nHooks are attached per-layer and can execute at 4 different points in model execution: on the pre-forward, forward, or backward pass of the model or during model initialization. You can also configure the same hook to execute on all 3 events. You can attach hooks to models using `Axon.attach_hook/3`:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_forward) end, on: :forward)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_init) end, on: :initialize)\n |> Axon.relu()\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :relu) end, on: :forward)\n\n{init_fn, predict_fn} = Axon.build(model)\n\ninput = Nx.iota({2, 4}, type: :f32)\nparams = init_fn.(input, %{})\n```\n\n\n\n```\ndense_init: %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n}\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nNotice how during initialization the `:dense_init` hook fired and inspected the layer's parameters. Now when executing, you'll see outputs for `:dense` and `:relu`:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\nrelu: #Nx.Tensor \n```\n\n\n\n```\n#Nx.Tensor \n```\n\nIt's important to note that hooks execute in the order they were attached to a layer. If you attach 2 hooks to the same layer which execute different functions on the same event, they will run in order:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook1) end, on: :forward)\n |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook2) end, on: :forward)\n |> Axon.relu()\n\n{init_fn, predict_fn} = Axon.build(model)\nparams = init_fn.(input, %{})\n\npredict_fn.(params, input)\n```\n\n\n\n```\nhook2: #Nx.Tensor \n```\n\n\n\n```\n#Nx.Tensor \n```\n\nNotice that `:hook1` fires before `:hook2`.\n\nYou can also specify a hook to fire on all events:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(&IO.inspect/1, on: :all)\n |> Axon.relu()\n |> Axon.dense(1)\n\n{init_fn, predict_fn} = Axon.build(model)\n```\n\n\n\n```\n{#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,\n #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}\n```\n\nOn initialization:\n\n```elixir\nparams = init_fn.(input, %{})\n```\n\n\n\n```\n%{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n}\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nOn pre-forward and forward:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\n#Nx.Tensor \n#Nx.Tensor \n#Nx.Tensor \n```\n\n\n\n```\n#Nx.Tensor \n```\n\nAnd on backwards:\n\n```elixir\nNx.Defn.grad(fn params -> predict_fn.(params, input) end).(params)\n```\n\n\n\n```\n#Nx.Tensor \n#Nx.Tensor \n#Nx.Tensor \n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nFinally, you can specify hooks to only run when the model is built in a certain mode such as training and inference mode. You can read more about training and inference mode in [Training and inference mode](../model_execution/training_and_inference_mode.livemd):\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)\n |> Axon.relu()\n\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\nparams = init_fn.(input, %{})\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nThe model was built in training mode so the hook will run:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\n\n\n```\n%{\n prediction: #Nx.Tensor ,\n state: %{}\n}\n```\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model, mode: :inference)\nparams = init_fn.(input, %{})\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nThe model was built in inference mode so the hook will not run:\n\n```elixir\npredict_fn.(params, input)\n```\n\n\n\n```\n#Nx.Tensor \n```","ref":"model_hooks.html#creating-models-with-hooks","title":"Creating models with hooks - Model hooks","type":"extras"},{"doc":"# Accelerating Axon\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"},\n {:exla, \">= 0.5.0\"},\n {:torchx, \">= 0.5.0\"},\n {:benchee, \"~> 1.1\"},\n {:kino, \">= 0.9.0\", override: true}\n])\n```\n\n\n\n```\n:ok\n```","ref":"accelerating_axon.html","title":"Accelerating Axon","type":"extras"},{"doc":"Nx provides two mechanisms for accelerating your neural networks: backends and compilers. Before we learn how to effectively use them, first let's create a simple model for benchmarking purposes:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(32)\n |> Axon.relu()\n |> Axon.dense(1)\n |> Axon.softmax()\n```\n\n\n\n```\n#Axon \n```\n\nBackends are where your tensors (your neural network inputs and parameters) are located. By default, Nx and Axon run all computations using the `Nx.BinaryBackend` which is a pure Elixir implementation of various numerical routines. The `Nx.BinaryBackend` is guaranteed to run wherever an Elixir installation runs; however, it is **very** slow. Due to the computational expense of neural networks, you should basically never use the `Nx.BinaryBackend` and instead opt for one of the available accelerated libraries. At the time of writing, Nx officially supports two of them:\n\n1. EXLA - Acceleration via Google's [XLA project](https://www.tensorflow.org/xla)\n2. TorchX - Bindings to [LibTorch](https://pytorch.org/cppdocs/)\n\nAxon will respect the global and process-level Nx backend configuration. Compilers are covered more in-depth in the second half of this example. You can set the default backend using the following APIs:\n\n```elixir\n# Sets the global compilation options (for all Elixir processes)\nNx.global_default_backend(Torchx.Backend)\n# OR\nNx.global_default_backend(EXLA.Backend)\n\n# Sets the process-level compilation options (current process only)\nNx.default_backend(Torchx.Backend)\n# OR\nNx.default_backend(EXLA.Backend)\n```\n\nNow all tensors and operations on them will run on the configured backend:\n\n```elixir\n{inputs, _next_key} =\n Nx.Random.key(9999)\n |> Nx.Random.uniform(shape: {2, 128})\n\n{init_fn, predict_fn} = Axon.build(model)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n f32[2][1]\n [\n [1.0],\n [1.0]\n ]\n>\n```\n\nAs you swap backends above, you will get tensors allocated on different backends as results. You should be careful using multiple backends in the same project as attempting to mix tensors between backends may result in strange performance bugs or errors, as Nx will require you to explicitly convert between backends.\n\nWith most larger models, using a compiler will bring more performance benefits in addition to the backend.","ref":"accelerating_axon.html#using-nx-backends-in-axon","title":"Using Nx Backends in Axon - Accelerating Axon","type":"extras"},{"doc":"Axon is built entirely on top of Nx's numerical definitions `defn`. Functions declared with `defn` tell Nx to use *just-in-time compilation* to compile and execute the given numerical definition with an available Nx compiler. Numerical definitions enable acceleration on CPU/GPU/TPU via pluggable compilers. At the time of this writing, only EXLA supports a compiler in addition to its backend.\n\nWhen you call `Axon.build/2`, Axon can automatically mark your initialization and forward functions as JIT compiled functions. First let's make sure we are using the EXLA backend:\n\n```elixir\nNx.default_backend(EXLA.Backend)\n```\n\nAnd now let's build another model, this time passing the EXLA compiler as an option:\n\n```elixir\n{inputs, _next_key} =\n Nx.Random.key(9999)\n |> Nx.Random.uniform(shape: {2, 128})\n\n{init_fn, predict_fn} = Axon.build(model, compiler: EXLA)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n\n15:39:26.463 [info] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n\n15:39:26.473 [info] XLA service 0x7f3488329030 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n\n15:39:26.473 [info] StreamExecutor device (0): NVIDIA GeForce RTX 3050 Ti Laptop GPU, Compute Capability 8.6\n\n15:39:26.473 [info] Using BFC allocator.\n\n15:39:26.473 [info] XLA backend allocating 3605004288 bytes on device 0 for BFCAllocator.\n\n15:39:28.272 [info] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n\n```\n\n\n\n```\n#Nx.Tensor \n [\n [1.0],\n [1.0]\n ]\n>\n```\n\nYou can also instead JIT compile functions explicitly via the `Nx.Defn.jit` or compiler-specific JIT APIs. This is useful when running benchmarks against various backends:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model)\n\n# These will both JIT compile with EXLA\nexla_init_fn = Nx.Defn.jit(init_fn, compiler: EXLA)\nexla_predict_fn = EXLA.jit(predict_fn)\n```\n\n\n\n```\n#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>\n```\n\n```elixir\nBenchee.run(\n %{\n \"elixir init\" => fn -> init_fn.(inputs, %{}) end,\n \"exla init\" => fn -> exla_init_fn.(inputs, %{}) end\n },\n time: 10,\n memory_time: 5,\n warmup: 2\n)\n```\n\n\n\n```\nWarning: the benchmark elixir init is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nWarning: the benchmark exla init is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nOperating System: Linux\nCPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz\nNumber of Available Cores: 4\nAvailable memory: 24.95 GB\nElixir 1.13.4\nErlang 25.0.4\n\nBenchmark suite executing with the following configuration:\nwarmup: 2 s\ntime: 10 s\nmemory time: 5 s\nreduction time: 0 ns\nparallel: 1\ninputs: none specified\nEstimated total run time: 34 s\n\nBenchmarking elixir init ...\nBenchmarking exla init ...\n\nName ips average deviation median 99th %\nexla init 3.79 K 0.26 ms ±100.40% 0.24 ms 0.97 ms\nelixir init 0.52 K 1.91 ms ±35.03% 1.72 ms 3.72 ms\n\nComparison:\nexla init 3.79 K\nelixir init 0.52 K - 7.25x slower +1.65 ms\n\nMemory usage statistics:\n\nName Memory usage\nexla init 9.80 KB\nelixir init 644.63 KB - 65.80x memory usage +634.83 KB\n\n**All measurements for memory usage were the same**\n```\n\n```elixir\nBenchee.run(\n %{\n \"elixir predict\" => fn -> predict_fn.(params, inputs) end,\n \"exla predict\" => fn -> exla_predict_fn.(params, inputs) end\n },\n time: 10,\n memory_time: 5,\n warmup: 2\n)\n```\n\n\n\n```\nWarning: the benchmark elixir predict is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nWarning: the benchmark exla predict is using an evaluated function.\n Evaluated functions perform slower than compiled functions.\n You can move the Benchee caller to a function in a module and invoke `Mod.fun()` instead.\n Alternatively, you can move the benchmark into a benchmark.exs file and run mix run benchmark.exs\n\nOperating System: Linux\nCPU Information: Intel(R) Core(TM) i7-7600U CPU @ 2.80GHz\nNumber of Available Cores: 4\nAvailable memory: 24.95 GB\nElixir 1.13.4\nErlang 25.0.4\n\nBenchmark suite executing with the following configuration:\nwarmup: 2 s\ntime: 10 s\nmemory time: 5 s\nreduction time: 0 ns\nparallel: 1\ninputs: none specified\nEstimated total run time: 34 s\n\nBenchmarking elixir predict ...\nBenchmarking exla predict ...\n\nName ips average deviation median 99th %\nexla predict 2.32 K 0.43 ms ±147.05% 0.34 ms 1.61 ms\nelixir predict 0.28 K 3.53 ms ±42.21% 3.11 ms 7.26 ms\n\nComparison:\nexla predict 2.32 K\nelixir predict 0.28 K - 8.20x slower +3.10 ms\n\nMemory usage statistics:\n\nName Memory usage\nexla predict 10.95 KB\nelixir predict 91.09 KB - 8.32x memory usage +80.14 KB\n\n**All measurements for memory usage were the same**\n```\n\nNotice how calls to EXLA variants are significantly faster. These speedups become more pronounced with more complex models and workflows.\n\n\n\nIt's important to note that in order to use a given library as an Nx compiler, it must implement the Nx compilation behaviour. For example, you cannot invoke Torchx as an Nx compiler because it does not support JIT compilation at this time.","ref":"accelerating_axon.html#using-nx-compilers-in-axon","title":"Using Nx Compilers in Axon - Accelerating Axon","type":"extras"},{"doc":"While Nx mostly tries to standardize behavior across compilers and backends, some behaviors are backend-specific. For example, the API for choosing an acceleration platform (e.g. CUDA/ROCm/TPU) is backend-specific. You should refer to your chosen compiler or backend's documentation for information on targeting various accelerators. Typically, you only need to change a few configuration options and your code will run as-is on a chosen accelerator.","ref":"accelerating_axon.html#a-note-on-cpus-gpus-tpus","title":"A Note on CPUs/GPUs/TPUs - Accelerating Axon","type":"extras"},{"doc":"# Training and inference mode\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"training_and_inference_mode.html","title":"Training and inference mode","type":"extras"},{"doc":"Some layers have different considerations and behavior when running during model training versus model inference. For example *dropout layers* are intended only to be used during training as a form of model regularization. Certain stateful layers like *batch normalization* keep a running-internal state which changes during training mode but remains fixed during inference mode. Axon supports mode-dependent execution behavior via the `:mode` option passed to all building, compilation, and execution methods. By default, all models build in inference mode. You can see this behavior by adding a dropout layer with a dropout rate of 1. In inference mode this layer will have no affect:\n\n```elixir\ninputs = Nx.iota({2, 8}, type: :f32)\n\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(4)\n |> Axon.sigmoid()\n |> Axon.dropout(rate: 0.99)\n |> Axon.dense(1)\n\n{init_fn, predict_fn} = Axon.build(model)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nYou can also explicitly specify the mode:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model, mode: :inference)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n#Nx.Tensor \n```\n\nIt's important that you know which mode your model's were compiled for, as running a model built in `:inference` mode will behave drastically different than a model built in `:train` mode.","ref":"training_and_inference_mode.html#executing-models-in-inference-mode","title":"Executing models in inference mode - Training and inference mode","type":"extras"},{"doc":"By specifying `mode: :train`, you tell your models to execute in training mode. You can see the effects of this behavior here:\n\n```elixir\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n%{\n prediction: #Nx.Tensor ,\n state: %{\n \"dropout_0\" => %{\n \"key\" => #Nx.Tensor \n }\n }\n}\n```\n\nFirst, notice that your model now returns a map with keys `:prediction` and `:state`. `:prediction` contains the actual model prediction, while `:state` contains the updated state for any stateful layers such as batch norm. When writing custom training loops, you should extract `:state` and use it in conjunction with the updates API to ensure your stateful layers are updated correctly. If your model has stateful layers, `:state` will look similar to your model's parameter map:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(4)\n |> Axon.sigmoid()\n |> Axon.batch_norm()\n |> Axon.dense(1)\n\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\nparams = init_fn.(inputs, %{})\npredict_fn.(params, inputs)\n```\n\n\n\n```\n%{\n prediction: #Nx.Tensor ,\n state: %{\n \"batch_norm_0\" => %{\n \"mean\" => #Nx.Tensor ,\n \"var\" => #Nx.Tensor \n }\n }\n}\n```","ref":"training_and_inference_mode.html#executing-models-in-training-mode","title":"Executing models in training mode - Training and inference mode","type":"extras"},{"doc":"# Your first training loop\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"your_first_training_loop.html","title":"Your first training loop","type":"extras"},{"doc":"Axon generalizes the concept of training, evaluation, hyperparameter optimization, and more into the `Axon.Loop` API. Axon loops are a instrumented reductions over Elixir Streams - that basically means you can accumulate some state over an Elixir `Stream` and control different points in the loop execution.\n\nWith Axon, you'll most commonly implement and work with supervised training loops. Because supervised training loops are so common in deep learning, Axon has a loop factory function which takes care of most of the boilerplate of creating a supervised training loop for you. In the beginning of your deep learning journey, you'll almost exclusively use Axon's loop factories to create and run loops.\n\nAxon's supervised training loop assumes you have an input stream of data with entries that look like:\n\n`{batch_inputs, batch_labels}`\n\nEach entry is a batch of input data with a corresponding batch of labels. You can simulate some real training data by constructing an Elixir stream:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n```\n\n\n\n```\n#Function<51.6935098/2 in Stream.repeatedly/1>\n```\n\nThe most basic supervised training loop in Axon requires 3 things:\n\n1. An Axon model\n2. A loss function\n3. An optimizer\n\nYou can construct an Axon model using the knowledge you've gained from going through the model creation guides:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n```\n\n\n\n```\n#Axon \n```\n\nAxon comes with built-in loss functions and optimizers which you can use directly when constructing your training loop. To construct your training loop, you use `Axon.Loop.trainer/3`:\n\n```elixir\nloop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nYou'll notice that `Axon.Loop.trainer/3` returns an `%Axon.Loop{}` data structure. This data structure contains information which Axon uses to control the execution of the loop. In order to run the loop, you need to explicitly pass it to `Axon.Loop.run/4`:\n\n```elixir\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0563023\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\n`Axon.Loop.run/4` expects a loop to execute, some data to loop over, and any initial state you explicitly want your loop to start with. `Axon.Loop.run/4` will then iterate over your data, executing a step function on each batch, and accumulating some generic loop state. In the case of a supervised training loop, this generic loop state actually represents training state including your model's trained parameters.\n\n`Axon.Loop.run/4` also accepts options which control the loops execution. This includes `:iterations` which controls the number of iterations per epoch a loop should execute for, and `:epochs` which controls the number of epochs a loop should execute for:\n\n```elixir\nAxon.Loop.run(loop, train_data, %{}, epochs: 3, iterations: 500)\n```\n\n\n\n```\nEpoch: 0, Batch: 450, loss: 0.0935063\nEpoch: 1, Batch: 450, loss: 0.0576384\nEpoch: 2, Batch: 450, loss: 0.0428323\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou may have noticed that by default `Axon.Loop.trainer/3` configures your loop to log information about training progress every 50 iterations. You can control this when constructing your supervised training loop with the `:log` option:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd, log: 100)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 900, loss: 0.1492715\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"your_first_training_loop.html#creating-an-axon-training-loop","title":"Creating an Axon training loop - Your first training loop","type":"extras"},{"doc":"# Instrumenting loops with metrics\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"instrumenting_loops_with_metrics.html","title":"Instrumenting loops with metrics","type":"extras"},{"doc":"Often times when executing a loop you want to keep track of various metrics such as accuracy or precision. For training loops, Axon by default only tracks loss; however, you can instrument the loop with additional built-in metrics. For example, you might want to track mean-absolute error on top of a mean-squared error loss:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(:mean_absolute_error)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},\n \"mean_absolute_error\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n :mean_absolute_error}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nWhen specifying a metric, you can specify an atom which maps to any of the metrics defined in `Axon.Metrics`. You can also define custom metrics. For more information on custom metrics, see [Writing custom metrics](writing_custom_metrics.livemd).\n\nWhen you run a loop with metrics, Axon will aggregate that metric over the course of the loop execution. For training loops, Axon will also report the aggregate metric in the training logs:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0590630 mean_absolute_error: 0.1463431\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nBy default, the metric will have a name which matches the string form of the given metric. You can give metrics semantic meaning by providing an explicit name:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.metric(:mean_absolute_error, \"model error\")\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0607362 model error: 0.1516546\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nAxon's default aggregation behavior is to aggregate metrics with a running average; however, you can customize this behavior by specifying an explicit accumulation function. Built-in accumulation functions are `:running_average` and `:running_sum`:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.metric(:mean_absolute_error, \"total error\", :running_sum)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0688004 total error: 151.4876404\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"instrumenting_loops_with_metrics.html#adding-metrics-to-training-loops","title":"Adding metrics to training loops - Instrumenting loops with metrics","type":"extras"},{"doc":"# Your first evaluation loop\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"your_first_evaluation_loop.html","title":"Your first evaluation loop","type":"extras"},{"doc":"Once you have a trained model, it's necessary to test the trained model on some test data. Axon's loop abstraction is general enough to work for both training and evaluating models. Just as Axon implements a canned `Axon.Loop.trainer/3` factory, it also implements a canned `Axon.Loop.evaluator/1` factory.\n\n`Axon.Loop.evaluator/1` creates an evaluation loop which you can instrument with metrics to measure the performance of a trained model on test data. First, you need a trained model:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\ntrain_loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)\n\ndata =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\ntrained_model_state = Axon.Loop.run(train_loop, data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.1285532\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nRunning loops with `Axon.Loop.trainer/3` returns a trained model state which you can use to evaluate your model. To construct an evaluation loop, you just call `Axon.Loop.evaluator/1` with your pre-trained model:\n\n```elixir\ntest_loop = Axon.Loop.evaluator(model)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nNext, you'll need to instrument your test loop with the metrics you'd like to aggregate:\n\n```elixir\ntest_loop = test_loop |> Axon.Loop.metric(:mean_absolute_error)\n```\n\n\n\n```\n#Axon.Loop ,\n :mean_absolute_error}\n },\n handlers: %{\n completed: [],\n epoch_completed: [],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nFinally, you can run your loop on test data. Because you want to test your trained model, you need to provide your model's initial state to the test loop:\n\n```elixir\nAxon.Loop.run(test_loop, data, trained_model_state, iterations: 1000)\n```\n\n\n\n```\nBatch: 999, mean_absolute_error: 0.0856894\n```\n\n\n\n```\n%{\n 0 => %{\n \"mean_absolute_error\" => #Nx.Tensor \n }\n}\n```","ref":"your_first_evaluation_loop.html#creating-an-axon-evaluation-loop","title":"Creating an Axon evaluation loop - Your first evaluation loop","type":"extras"},{"doc":"# Using loop event handlers\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"using_loop_event_handlers.html","title":"Using loop event handlers","type":"extras"},{"doc":"Often times you want more fine-grained control over things that happen during loop execution. For example, you might want to save loop state to a file every 500 iterations, or log some output to `:stdout` at the end of every epoch. Axon loops allow more fine-grained control via events and event handlers.\n\nAxon fires a number of events during loop execution which allow you to instrument various points in the loop execution cycle. You can attach event handlers to any of these events:\n\n\n\n```elixir\nevents = [\n :started, # After loop state initialization\n :epoch_started, # On epoch start\n :iteration_started, # On iteration start\n :iteration_completed, # On iteration complete\n :epoch_completed, # On epoch complete\n :epoch_halted, # On epoch halt, if early halted\n :halted, # On loop halt, if early halted\n :completed # On loop completion\n]\n```\n\nAxon packages a number of common loop event handlers for you out of the box. These handlers should cover most of the common event handlers you would need to write in practice. Axon also allows for custom event handlers. See [Writing custom event handlers](writing_custom_event_handlers.livemd) for more information.\n\nAn event handler will take the current loop state at the time of the fired event, and alter or use it in someway before returning control back to the main loop execution. You can attach any of Axon's pre-packaged event handlers to a loop by using the function directly. For example, if you want to checkpoint loop state at the end of every epoch, you can use `Axon.Loop.checkpoint/2`:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.checkpoint(event: :epoch_completed)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<17.37390314/1 in Axon.Loop.checkpoint/2>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nNow when you execute your loop, it will save a checkpoint at the end of every epoch:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.5345965\nEpoch: 1, Batch: 50, loss: 0.4578816\nEpoch: 2, Batch: 50, loss: 0.4527244\nEpoch: 3, Batch: 50, loss: 0.4466343\nEpoch: 4, Batch: 50, loss: 0.4401709\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou can also use event handlers for things as simple as implementing custom logging with the pre-packaged `Axon.Loop.log/4` event handler:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.log(fn _state -> \"epoch is over\\n\" end, event: :epoch_completed, device: :stdio)\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.3220241\nepoch is over\nEpoch: 1, Batch: 50, loss: 0.2309804\nepoch is over\nEpoch: 2, Batch: 50, loss: 0.1759415\nepoch is over\nEpoch: 3, Batch: 50, loss: 0.1457551\nepoch is over\nEpoch: 4, Batch: 50, loss: 0.1247821\nepoch is over\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nFor even more fine-grained control over when event handlers fire, you can add filters. For example, if you only want to checkpoint loop state every 2 epochs, you can use a filter:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.checkpoint(event: :epoch_completed, filter: [every: 2])\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.3180207\nEpoch: 1, Batch: 50, loss: 0.1975918\nEpoch: 2, Batch: 50, loss: 0.1353940\nEpoch: 3, Batch: 50, loss: 0.1055405\nEpoch: 4, Batch: 50, loss: 0.0890203\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nAxon event handlers support both keyword and function filters. Keyword filters include keywords such as `:every`, `:once`, and `:always`. Function filters are arity-1 functions which accept the current loop state and return a boolean.","ref":"using_loop_event_handlers.html#adding-event-handlers-to-training-loops","title":"Adding event handlers to training loops - Using loop event handlers","type":"extras"},{"doc":"\n\n# Custom models, loss functions, and optimizers\n\n```elixir\nMix.install([\n {:axon, github: \"elixir-nx/axon\"},\n {:nx, \"~> 0.3.0\", github: \"elixir-nx/nx\", sparse: \"nx\", override: true}\n])\n```\n\n\n\n```\n:ok\n```","ref":"custom_models_loss_optimizers.html","title":"Custom models, loss functions, and optimizers","type":"extras"},{"doc":"In the [Your first training loop](your_first_training_loop.livemd), you learned how to declare a supervised training loop using `Axon.Loop.trainer/3` with a model, loss function, and optimizer. Your overall model and loop declaration looked something like this:\n\n\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)\n```\n\nThis example uses an `%Axon{}` struct to represent your `model` to train, and atoms to represent your loss function and optimizer. Some of your problems will require a bit more flexibility than this example affords. Fortunately, `Axon.Loop.trainer/3` is designed for flexibility.\n\nFor example, if your model cannot be cleanly represented as an `%Axon{}` model, you can instead opt instead to define custom initialization and forward functions to pass to `Axon.Loop.trainer/3`. Actually, `Axon.Loop.trainer/3` is doing this for you under the hood - the ability to pass an `%Axon{}` struct directly is just a convenience:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nlowered_model = {init_fn, predict_fn} = Axon.build(model)\n\nloop = Axon.Loop.trainer(lowered_model, :mean_squared_error, :sgd)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<23.20267452/1 in Axon.Loop.log/5>,\n #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n metrics: %{\n \"loss\" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,\n #Function<6.20267452/2 in Axon.Loop.build_loss_fn/1>}\n },\n ...\n>\n```\n\nNotice that `Axon.Loop.trainer/3` handles the \"lowered\" form of an Axon model without issue. When you pass an `%Axon{}` struct, the trainer factory converts it to a lowered representation for you. With this construct, you can build custom models entirely with Nx `defn`, or readily mix your Axon models into custom workflows without worrying about compatibility with the `Axon.Loop` API:\n\n```elixir\ndefmodule CustomModel do\n import Nx.Defn\n\n defn custom_predict_fn(model_predict_fn, params, input) do\n %{prediction: preds} = out = model_predict_fn.(params, input)\n %{out | prediction: Nx.cos(preds)}\n end\nend\n```\n\n\n\n```\n{:module, CustomModel, <<70, 79, 82, 49, 0, 0, 9, ...>>, {:custom_predict_fn, 3}}\n```\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n xs = Nx.random_normal({8, 1})\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\n{init_fn, predict_fn} = Axon.build(model, mode: :train)\ncustom_predict_fn = &CustomModel.custom_predict_fn(predict_fn, &1, &2)\n\nloop = Axon.Loop.trainer({init_fn, custom_predict_fn}, :mean_squared_error, :sgd)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 500)\n```\n\n\n\n```\nEpoch: 0, Batch: 500, loss: 0.3053460\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"custom_models_loss_optimizers.html#using-custom-models-in-training-loops","title":"Using custom models in training loops - Custom models, loss functions, and optimizers","type":"extras"},{"doc":"Just as `Axon.Loop.trainer/3` allows more flexibility with models, it also supports more flexible loss functions. In most cases, you can get away with using one of Axon's built-in loss functions by specifying an atom. Atoms map directly to a loss-function defined in `Axon.Losses`. Under the hood, `Axon.Loop.trainer/3` is doing something like:\n\n\n\n```elixir\nloss_fn = &apply(Axon.Losses, loss_atom, [&1, &2])\n```\n\nRather than pass an atom, you can pass your own custom arity-2 function to `Axon.Loop.trainer/3`. This arises most often in cases where you want to control some parameters of the loss function, such as the batch-level reduction:\n\n```elixir\nloss_fn = &Axon.Losses.mean_squared_error(&1, &2, reduction: :sum)\n\nloop = Axon.Loop.trainer(model, loss_fn, :sgd)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<5.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<23.20267452/1 in Axon.Loop.log/5>,\n #Function<3.20267452/1 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n metrics: %{\n \"loss\" => {#Function<12.6031754/3 in Axon.Metrics.running_average/1>,\n #Function<41.3316493/2 in :erl_eval.expr/6>}\n },\n ...\n>\n```\n\nYou can also define your own custom loss functions, so long as they match the following spec:\n\n\n\n```elixir\nloss(\n y_true :: tensor[batch, ...] | container(tensor),\n y_preds :: tensor[batch, ...] | container(tensor)\n ) :: scalar\n```\n\nThis is useful for constructing loss functions when dealing with multi-output scenarios. For example, it's very easy to construct a custom loss function which is a weighted average of several loss functions on multiple inputs:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n xs = Nx.random_normal({8, 1})\n y1 = Nx.sin(xs)\n y2 = Nx.cos(xs)\n {xs, {y1, y2}}\n end)\n\nshared =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n\ny1 = Axon.dense(shared, 1)\ny2 = Axon.dense(shared, 1)\n\nmodel = Axon.container({y1, y2})\n\ncustom_loss_fn = fn {y_true1, y_true2}, {y_pred1, y_pred2} ->\n loss1 = Axon.Losses.mean_squared_error(y_true1, y_pred1, reduction: :mean)\n loss2 = Axon.Losses.mean_squared_error(y_true2, y_pred2, reduction: :mean)\n\n loss1\n |> Nx.multiply(0.4)\n |> Nx.add(Nx.multiply(loss2, 0.6))\nend\n\nmodel\n|> Axon.Loop.trainer(custom_loss_fn, :sgd)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 1000, loss: 0.1098235\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_3\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"custom_models_loss_optimizers.html#using-custom-loss-functions-in-training-loops","title":"Using custom loss functions in training loops - Custom models, loss functions, and optimizers","type":"extras"},{"doc":"As you might expect, it's also possible to customize the optimizer passed to `Axon.Loop.trainer/3`. If you read the `Polaris.Updates` documentation, you'll learn that optimizers are actually represented as the tuple `{init_fn, update_fn}` where `init_fn` initializes optimizer state from model state and `update_fn` scales gradients from optimizer state, gradients, and model state.\n\nYou likely won't have to implement a custom optimizer; however, you should know how to construct optimizers with different hyperparameters and how to apply different modifiers to different optimizers to customize the optimization process.\n\nWhen you specify an optimizer as an atom in `Axon.Loop.trainer/3`, it maps directly to an optimizer declared in `Polaris.Optimizers`. You can instead opt to declare your optimizer directly. This is most useful for controlling things like the learning rate and various optimizer hyperparameters:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n xs = Nx.random_normal({8, 1})\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\noptimizer = {_init_optimizer_fn, _update_fn} = Polaris.Optimizers.sgd(learning_rate: 1.0e-3)\n\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, optimizer)\n|> Axon.Loop.run(train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 1000, loss: 0.0992607\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"custom_models_loss_optimizers.html#using-custom-optimizers-in-training-loops","title":"Using custom optimizers in training loops - Custom models, loss functions, and optimizers","type":"extras"},{"doc":"# Writing custom metrics\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"writing_custom_metrics.html","title":"Writing custom metrics","type":"extras"},{"doc":"When passing an atom to `Axon.Loop.metric/5`, Axon dispatches the function to a built-in function in `Axon.Metrics`. If you find you'd like to use a metric that does not exist in `Axon.Metrics`, you can define a custom function:\n\n```elixir\ndefmodule CustomMetric do\n import Nx.Defn\n\n defn my_weird_metric(y_true, y_pred) do\n Nx.atan2(y_true, y_pred) |> Nx.sum()\n end\nend\n```\n\n\n\n```\n{:module, CustomMetric, <<70, 79, 82, 49, 0, 0, 8, ...>>, true}\n```\n\nThen you can pass that directly to `Axon.Loop.metric/5`. You must provide a name for your custom metric:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(&CustomMetric.my_weird_metric/2, \"my weird metric\")\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},\n \"my weird metric\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n &CustomMetric.my_weird_metric/2}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nThen when running, Axon will invoke your custom metric function and accumulate it with the given aggregator:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, loss: 0.0681635 my weird metric: -5.2842808\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nWhile the metric defaults are designed with supervised training loops in mind, they can be used for much more flexible purposes. By default, metrics look for the fields `:y_true` and `:y_pred` in the given loop's step state. They then apply the given metric function on those inputs. You can also define metrics which work on other fields. For example you can track the running average of a given parameter with a metric just by defining a custom output transform:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\noutput_transform = fn %{model_state: model_state} ->\n [model_state[\"dense_0\"][\"kernel\"]]\nend\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(&Nx.mean/1, \"dense_0_kernel_mean\", :running_average, output_transform)\n |> Axon.Loop.metric(&Nx.variance/1, \"dense_0_kernel_var\", :running_average, output_transform)\n```\n\n\n\n```\n#Axon.Loop ,\n &Nx.mean/1},\n \"dense_0_kernel_var\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n &Nx.variance/1},\n \"loss\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nAxon will apply your custom output transform to the loop's step state and forward the result to your custom metric function:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, dense_0_kernel_mean: -0.1978206 dense_0_kernel_var: 0.2699870 loss: 0.0605523\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou can also define custom accumulation functions. Axon has definitions for computing running averages and running sums; however, you might find you need something like an exponential moving average:\n\n```elixir\ndefmodule CustomAccumulator do\n import Nx.Defn\n\n defn running_ema(acc, obs, _i, opts \\\\ []) do\n opts = keyword!(opts, alpha: 0.9)\n obs * opts[:alpha] + acc * (1 - opts[:alpha])\n end\nend\n```\n\n\n\n```\n{:module, CustomAccumulator, <<70, 79, 82, 49, 0, 0, 11, ...>>, true}\n```\n\nYour accumulator must be an arity-3 function which accepts the current accumulated value, the current observation, and the current iteration and returns the aggregated metric. You can pass a function direct as an accumulator in your metric:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\noutput_transform = fn %{model_state: model_state} ->\n [model_state[\"dense_0\"][\"kernel\"]]\nend\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.metric(\n &Nx.mean/1,\n \"dense_0_kernel_ema_mean\",\n &CustomAccumulator.running_ema/3,\n output_transform\n )\n```\n\n\n\n```\n#Axon.Loop ,\n &Nx.mean/1},\n \"loss\" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nThen when you run the loop, Axon will use your custom accumulator:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, iterations: 1000)\n```\n\n\n\n```\nEpoch: 0, Batch: 950, dense_0_kernel_ema_mean: -0.0139760 loss: 0.0682910\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```","ref":"writing_custom_metrics.html#writing-custom-metrics","title":"Writing custom metrics - Writing custom metrics","type":"extras"},{"doc":"# Writing custom event handlers\n\n```elixir\nMix.install([\n {:axon, \">= 0.5.0\"}\n])\n```\n\n\n\n```\n:ok\n```","ref":"writing_custom_event_handlers.html","title":"Writing custom event handlers","type":"extras"},{"doc":"If you require functionality not offered by any of Axon's built-in event handlers, then you'll need to write a custom event handler. Custom event handlers are functions which accept loop state, perform some action, and then defer execution back to the main loop. For example, you can write custom loop handlers which visualize model outputs, communicate with an external Kino process, or simply halt the loop based on some criteria.\n\nAll event handlers must accept an `%Axon.Loop.State{}` struct and return a tuple of `{control_term, state}` where `control_term` is one of `:continue`, `:halt_epoch`, or `:halt_loop` and `state` is the updated loop state:\n\n```elixir\ndefmodule CustomEventHandler0 do\n alias Axon.Loop.State\n\n def my_weird_handler(%State{} = state) do\n IO.puts(\"My weird handler: fired\")\n {:continue, state}\n end\nend\n```\n\n\n\n```\n{:module, CustomEventHandler0, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:my_weird_handler, 1}}\n```\n\nTo register event handlers, you use `Axon.Loop.handle/4`:\n\n```elixir\nmodel =\n Axon.input(\"data\")\n |> Axon.dense(8)\n |> Axon.relu()\n |> Axon.dense(4)\n |> Axon.relu()\n |> Axon.dense(1)\n\nloop =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :sgd)\n |> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler0.my_weird_handler/1)\n```\n\n\n\n```\n#Axon.Loop ,\n #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}\n },\n handlers: %{\n completed: [],\n epoch_completed: [\n {&CustomEventHandler0.my_weird_handler/1,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n epoch_halted: [],\n epoch_started: [],\n halted: [],\n iteration_completed: [\n {#Function<27.37390314/1 in Axon.Loop.log/3>,\n #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}\n ],\n iteration_started: [],\n started: []\n },\n ...\n>\n```\n\nAxon will trigger your custom handler to run on the attached event:\n\n```elixir\ntrain_data =\n Stream.repeatedly(fn ->\n {xs, _next_key} =\n :random.uniform(9999)\n |> Nx.Random.key()\n |> Nx.Random.normal(shape: {8, 1})\n\n ys = Nx.sin(xs)\n {xs, ys}\n end)\n\nAxon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.0990703\nMy weird handler: fired\nEpoch: 1, Batch: 50, loss: 0.0567622\nMy weird handler: fired\nEpoch: 2, Batch: 50, loss: 0.0492784\nMy weird handler: fired\nEpoch: 3, Batch: 50, loss: 0.0462587\nMy weird handler: fired\nEpoch: 4, Batch: 50, loss: 0.0452806\nMy weird handler: fired\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou can use event handlers to early-stop a loop or loop epoch by returning a `:halt_*` control term. Halt control terms can be one of `:halt_epoch` or `:halt_loop`. `:halt_epoch` halts the current epoch and continues to the next. `:halt_loop` halts the loop altogether.\n\n```elixir\ndefmodule CustomEventHandler1 do\n alias Axon.Loop.State\n\n def always_halts(%State{} = state) do\n IO.puts(\"stopping loop\")\n {:halt_loop, state}\n end\nend\n```\n\n\n\n```\n{:module, CustomEventHandler1, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:always_halts, 1}}\n```\n\nThe loop will immediately stop executing and return the current state at the time it was halted:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler1.always_halts/1)\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 50, loss: 0.2201974\nstopping loop\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nNote that halting an epoch will fire a different event than completing an epoch. So if you implement a custom handler to halt the loop when an epoch completes, it will never fire if the epoch always halts prematurely:\n\n```elixir\ndefmodule CustomEventHandler2 do\n alias Axon.Loop.State\n\n def always_halts_epoch(%State{} = state) do\n IO.puts(\"\\nstopping epoch\")\n {:halt_epoch, state}\n end\n\n def always_halts_loop(%State{} = state) do\n IO.puts(\"stopping loop\\n\")\n {:halt_loop, state}\n end\nend\n```\n\n\n\n```\n{:module, CustomEventHandler2, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:always_halts_loop, 1}}\n```\n\nIf you run these handlers in conjunction, the loop will not terminate prematurely:\n\n```elixir\nmodel\n|> Axon.Loop.trainer(:mean_squared_error, :sgd)\n|> Axon.Loop.handle_event(:iteration_completed, &CustomEventHandler2.always_halts_epoch/1)\n|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler2.always_halts_loop/1)\n|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)\n```\n\n\n\n```\nEpoch: 0, Batch: 0, loss: 0.0000000\nstopping epoch\n\nstopping epoch\n\nstopping epoch\n\nstopping epoch\n\nstopping epoch\n```\n\n\n\n```\n%{\n \"dense_0\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_1\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n },\n \"dense_2\" => %{\n \"bias\" => #Nx.Tensor ,\n \"kernel\" => #Nx.Tensor \n }\n}\n```\n\nYou may access and update any portion of the loop state. Keep in mind that event handlers are **not** JIT-compiled, so you should be certain to manually JIT-compile any long-running or expensive operations.","ref":"writing_custom_event_handlers.html#writing-custom-event-handlers","title":"Writing custom event handlers - Writing custom event handlers","type":"extras"},{"doc":"# Converting ONNX models to Axon\n\n```elixir\nMix.install(\n [\n {:axon, \">= 0.5.0\"},\n {:exla, \">= 0.5.0\"},\n {:axon_onnx, \">= 0.4.0\"},\n {:stb_image, \">= 0.6.0\"},\n {:kino, \">= 0.9.0\"},\n {:req, \">= 0.3.8\"}\n ]\n # for Nvidia GPU change to \"cuda111\" for CUDA 11.1+ or \"cuda118\" for CUDA 11.8\n # CUDA 12.x not supported by XLA\n # or you can put this value in ENV variables in Livebook settings\n # XLA_TARGET=cuda111\n # system_env: %{\"XLA_TARGET\" => xla_target}\n)\n```","ref":"onnx_to_axon.html","title":"Converting ONNX models to Axon","type":"extras"},{"doc":"Axon is a new machine learning capability, specific to Elixir. We would like to take\nadvantage of a large amount of models that have been written in other languages and\nmachine learning frameworks. Let's take a look at how we could use a model developed\nin another language.\n\nConverting models developed by data scientists into a production capable implementation is a\nchallenge for all languages and frameworks. [ONNX](https://onnx.ai/) is an interchange\nformat that allows models written in one language or framework to be converted into\nanother language and framework.\n\nThe source model must use constructs mapped into ONNX. Also, the destination framework must\nsupport the model's ONNX constructs. From an Elixir focus, we are interested in ONNX models\nthat [axon_onnx](https://github.com/elixir-nx/axon_onnx) can convert into Axon models.\n\n\n\n#","ref":"onnx_to_axon.html#converting-an-onnx-model-into-axon","title":"Converting an ONNX model into Axon - Converting ONNX models to Axon","type":"extras"},{"doc":"\n\nElixir can get access to thousands of public models and your organization may have private models\nwritten in other languages and frameworks. Axon will be hard pressed to quickly repeat the\ncountless person-hours spent on developing models in other languages like Tensorflow and PyTorch.\nHowever, if the model can be converted into ONNX and then into Axon, we can directly run the model\nin Elixir.\n\n\n\n#","ref":"onnx_to_axon.html#why-is-onnx-important-to-axon","title":"Why is ONNX important to Axon? - Converting ONNX models to Axon","type":"extras"},{"doc":"\n\nAxon runs on top of [Nx (Numerical Elixir)](https://hexdocs.pm/nx). Nx has backends for\nboth Google's XLA (via EXLA) and PyTorch (via Torchx). In this guide, we will use EXLA.\nWe'll also convert from an ONNX model into an Axon model using\n[`axon_onnx`](https://github.com/elixir-nx/axon_onnx).\n\nYou can find all dependencies in the installation cell at the top of the notebook.\nIn there, you will also find the `XLA_TARGET` environment variable which you can set\nto \"cuda111\" or \"rocm\" if you have any of those GPUs available. Let's also configure\nNx to store tensors in EXLA by default:\n\n```elixir\n# Nx.default_backend(EXLA.Backend)\n```\n\nWe'll also need local access to ONNX files. For this notebook, the models/onnx folder\ncontains the ONNX model file. This notebook assumes the output file location will be\nin models axon. Copy your ONNX model files into the models/onnx folder.\n\nThis opinionated module presents a simple API for loading in an ONNX file and saving\nthe converted Axon model in the provided directory. This API will allow us to\nsave multiple models pretty quickly.\n\n```elixir\ndefmodule OnnxToAxon do\n @moduledoc \"\"\"\n Helper module from ONNX to Axon.\n \"\"\"\n\n @doc \"\"\"\n Loads an ONNX model into Axon and saves the model","ref":"onnx_to_axon.html#setting-up-our-environment","title":"Setting up our environment - Converting ONNX models to Axon","type":"extras"},{"doc":"OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)\n\n \"\"\"\n def onnx_axon(path_to_onnx_file, path_to_axon_dir) do\n axon_name = axon_name_from_onnx_path(path_to_onnx_file)\n path_to_axon = Path.join(path_to_axon_dir, axon_name)\n\n {model, parameters} = AxonOnnx.import(path_to_onnx_file)\n model_bytes = Axon.serialize(model, parameters)\n File.write!(path_to_axon, model_bytes)\n end\n\n defp axon_name_from_onnx_path(onnx_path) do\n model_root = onnx_path |> Path.basename() |> Path.rootname()\n \"#{model_root}.axon\"\n end\nend\n```","ref":"onnx_to_axon.html#examples","title":"Examples - Converting ONNX models to Axon","type":"extras"},{"doc":"For this example, we'll use a couple ONNX models that have been saved in the Huggingface Hub.\n\n\n\nThe ONNX models were trained in Fast.ai (PyTorch) using the following notebooks:\n\n* https://github.com/meanderingstream/fastai_course22/blob/main/saving-a-basic-fastai-model-in-onnx.ipynb\n* https://github.com/meanderingstream/fastai_course22/blob/main/saving-cat-dog-breed-fastai-model-in-onnx.ipynb\n\nTo repeat this notebook, the onnx files for this notebook can be found on huggingface hub. Download the onnx models from:\n\n* https://huggingface.co/ScottMueller/Cats_v_Dogs.ONNX\n* https://huggingface.co/ScottMueller/Cat_Dog_Breeds.ONNX\n\nDownload the files and place them in a directory of your choice. By default, we will assume you downloaded them to the same directory as the notebook:\n\n```elixir\nFile.cd!(__DIR__)\n```\n\nNow let's convert an ONNX model into Axon\n\n```elixir\npath_to_onnx_file = \"cats_v_dogs.onnx\"\npath_to_axon_dir = \".\"\nOnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)\n```\n\n```elixir\npath_to_onnx_file = \"cat_dog_breeds.onnx\"\npath_to_axon_dir = \".\"\nOnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)\n```","ref":"onnx_to_axon.html#onnx-model","title":"ONNX model - Converting ONNX models to Axon","type":"extras"},{"doc":"To run inference on the model, you'll need 10 images focused on cats or dogs. You can download the images used in training the model at:\n\n\"https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet.tgz\"\n\nOr you can find or use your own images. In this notebook, we are going to use the local copies of the Oxford Pets dataset that was used in training the model.\n\n\n\nLet's load the Axon model.\n\n```elixir\ncats_v_dogs = File.read!(\"cats_v_dogs.axon\")\n{cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)\n```\n\nWe need a tensor representation of an image. Let's start by looking at samples of\nour data.\n\n```elixir\nFile.read!(\"oxford-iiit-pet/images/havanese_71.jpg\")\n|> Kino.Image.new(:jpeg)\n```\n\nTo manipulate the images, we will use the `StbImage` library:\n\n```elixir\n{:ok, img} = StbImage.read_file(\"oxford-iiit-pet/images/havanese_71.jpg\")\n%StbImage{data: binary, shape: shape, type: type} = StbImage.resize(img, 224, 224)\n```\n\nNow let's work on a batch of images and convert them to tensors. Here are the images we will work with:\n\n```elixir\nfile_names = [\n \"havanese_71.jpg\",\n \"yorkshire_terrier_9.jpg\",\n \"Sphynx_206.jpg\",\n \"Siamese_95.jpg\",\n \"Egyptian_Mau_63.jpg\",\n \"keeshond_175.jpg\",\n \"samoyed_88.jpg\",\n \"British_Shorthair_122.jpg\",\n \"Russian_Blue_20.jpg\",\n \"boxer_99.jpg\"\n]\n```\n\nNext we resize the images:\n\n```elixir\nresized_images =\n Enum.map(file_names, fn file_name ->\n (\"oxford-iiit-pet/images/\" <> file_name)\n |> IO.inspect(label: file_name)\n |> StbImage.read_file!()\n |> StbImage.resize(224, 224)\n end)\n```\n\nAnd finally convert them into tensors by using `StbImage.to_nx/1`. The created tensor will have three axes, named `:height`, `:width`, and `:channel` respectively. Our goal is to stack the tensors, then normalize and transpose their axes to the order expected by the neural network:\n\n```elixir\nimg_tensors =\n resized_images\n |> Enum.map(&StbImage.to_nx/1)\n |> Nx.stack(name: :index)\n |> Nx.divide(255.0)\n |> Nx.transpose(axes: [:index, :channels, :height, :width])\n```\n\nWith our input data, it is finally time to work on predictions. First let's define a helper module:\n\n```elixir\ndefmodule Predictions do\n @doc \"\"\"\n When provided a Tensor of single label predictions, returns the best vocabulary match for\n each row in the prediction tensor.","ref":"onnx_to_axon.html#inference-on-onnx-derived-models","title":"Inference on ONNX derived models - Converting ONNX models to Axon","type":"extras"},{"doc":"# iex> Predictions.sindle_label_prediction(path_to_onnx_file, path_to_axon_dir)\n # [\"dog\", \"cat\", \"dog\"]\n\n \"\"\"\n def single_label_classification(predictions_batch, vocabulary) do\n IO.inspect(Nx.shape(predictions_batch), label: \"predictions batch shape\")\n\n for prediction_tensor <- Nx.to_batched(predictions_batch, 1) do\n {_prediction_value, prediction_label} =\n prediction_tensor\n |> Nx.to_flat_list()\n |> Enum.zip(vocabulary)\n |> Enum.max()\n\n prediction_label\n end\n end\nend\n```\n\nNow we deserialize the model\n\n```elixir\n{cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)\n```\n\nrun a prediction using the `EXLA` compiler for performance\n\n```elixir\ntensor_of_predictions =\n Axon.predict(cats_v_dogs_model, cats_v_dogs_params, img_tensors, compiler: EXLA)\n```\n\nand finally retrieve the predicted label\n\n```elixir\ndog_cat_vocabulary = [\n \"dog\",\n \"cat\"\n]\n\nPredictions.single_label_classification(tensor_of_predictions, dog_cat_vocabulary)\n```\n\nLet's repeat the above process for the dog and cat breed model.\n\n```elixir\ncat_dog_vocabulary = [\n \"abyssinian\",\n \"american_bulldog\",\n \"american_pit_bull_terrier\",\n \"basset_hound\",\n \"beagle\",\n \"bengal\",\n \"birman\",\n \"bombay\",\n \"boxer\",\n \"british_shorthair\",\n \"chihuahua\",\n \"egyptian_mau\",\n \"english_cocker_spaniel\",\n \"english_setter\",\n \"german_shorthaired\",\n \"great_pyrenees\",\n \"havanese\",\n \"japanese_chin\",\n \"keeshond\",\n \"leonberger\",\n \"maine_coon\",\n \"miniature_pinscher\",\n \"newfoundland\",\n \"persian\",\n \"pomeranian\",\n \"pug\",\n \"ragdoll\",\n \"russian_blue\",\n \"saint_bernard\",\n \"samoyed\",\n \"scottish_terrier\",\n \"shiba_inu\",\n \"siamese\",\n \"sphynx\",\n \"staffordshire_bull_terrier\",\n \"wheaten_terrier\",\n \"yorkshire_terrier\"\n]\n```\n\n```elixir\ncat_dog_breeds = File.read!(\"cat_dog_breeds.axon\")\n{cat_dog_breeds_model, cat_dog_breeds_params} = Axon.deserialize(cat_dog_breeds)\n```\n\n```elixir\nAxon.predict(cat_dog_breeds_model, cat_dog_breeds_params, img_tensors)\n|> Predictions.single_label_classification(cat_dog_vocabulary)\n```\n\nFor cat and dog breeds, the model performed pretty well, but it was not perfect.","ref":"onnx_to_axon.html#examples","title":"Examples - Converting ONNX models to Axon","type":"extras"},{"doc":"# Modeling XOR with a neural network\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:kino_vega_lite, \"~> 0.1.6\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\n\nalias VegaLite, as: Vl\n```","ref":"xor.html","title":"Modeling XOR with a neural network","type":"extras"},{"doc":"In this notebook we try to create a model and learn it the **logical XOR**.\n\nEven though XOR seems like a trivial operation, it cannot be modeled using a single dense layer ([single-layer perceptron](https://en.wikipedia.org/wiki/Feedforward_neural_network#Single-layer_perceptron)). The underlying reason is that the classes in XOR are not linearly separable. We cannot draw a straight line to separate the points $(0,0)$, $(1,1)$ from the points $(0,1)$, $(1,0)$. To model this properly, we need to turn to deep learning methods. Deep learning is capable of learning non-linear relationships like XOR.","ref":"xor.html#introduction","title":"Introduction - Modeling XOR with a neural network","type":"extras"},{"doc":"Let's start with the model. We need two inputs, since XOR has two operands. We then concatenate them into a single input vector with `Axon.concatenate/3`. Then we have one hidden layer and one output layer, both of them dense.\n\nNote: the model is a sequential neural network. In Axon, we can conveniently create such a model by using the pipe operator (`|>`) to add layers one by one.\n\n```elixir\nx1_input = Axon.input(\"x1\", shape: {nil, 1})\nx2_input = Axon.input(\"x2\", shape: {nil, 1})\n\nmodel =\n x1_input\n |> Axon.concatenate(x2_input)\n |> Axon.dense(8, activation: :tanh)\n |> Axon.dense(1, activation: :sigmoid)\n```","ref":"xor.html#the-model","title":"The model - Modeling XOR with a neural network","type":"extras"},{"doc":"The next step is to prepare training data. Since we are modeling a well-defined operation, we can just generate random operands and compute the expected XOR result for them.\n\nThe training works with batches of examples, so we *repeatedly* generate a whole batch of inputs and the expected result.\n\n```elixir\nbatch_size = 32\n\ndata =\n Stream.repeatedly(fn ->\n x1 = Nx.random_uniform({batch_size, 1}, 0, 2)\n x2 = Nx.random_uniform({batch_size, 1}, 0, 2)\n y = Nx.logical_xor(x1, x2)\n\n {%{\"x1\" => x1, \"x2\" => x2}, y}\n end)\n```\n\nHere's how a sample batch looks:\n\n```elixir\nEnum.at(data, 0)\n```","ref":"xor.html#training-data","title":"Training data - Modeling XOR with a neural network","type":"extras"},{"doc":"It's time to train our model. In this case we use *binary cross entropy* for the loss and *stochastic gradient descent* as the optimizer. We use binary cross entropy because we can consider the task of computing XOR the same as a binary classification problem. We want our output to have a binary label `0` or `1`, and binary cross entropy is typically used in these cases. Having defined our training loop, we run it with `Axon.Loop.run/4`.\n\n```elixir\nepochs = 10\n\nparams =\n model\n |> Axon.Loop.trainer(:binary_cross_entropy, :sgd)\n |> Axon.Loop.run(data, %{}, epochs: epochs, iterations: 1000)\n```","ref":"xor.html#training","title":"Training - Modeling XOR with a neural network","type":"extras"},{"doc":"Finally, we can test our model on sample data.\n\n```elixir\nAxon.predict(model, params, %{\n \"x1\" => Nx.tensor([[0]]),\n \"x2\" => Nx.tensor([[1]])\n})\n```\n\nTry other combinations of $x_1$ and $x_2$ and see what the output is. To improve the model performance, you can increase the number of training epochs.","ref":"xor.html#trying-the-model","title":"Trying the model - Modeling XOR with a neural network","type":"extras"},{"doc":"The original XOR we modeled only works with binary values $0$ and $1$, however our model operates in continuous space. This means that we can give it $x_1 = 0.5$, $x_2 = 0.5$ as input and we expect _some_ output. We can use this to visualize the non-linear relationship between inputs $x_1$, $x_2$ and outputs that our model has learned.\n\n```elixir\n# The number of points per axis, determines the resolution\nn = 50\n\n# We generate coordinates of inputs in the (n x n) grid\nx1 = Nx.iota({n, n}, axis: 0) |> Nx.divide(n) |> Nx.reshape({:auto, 1})\nx2 = Nx.iota({n, n}, axis: 1) |> Nx.divide(n) |> Nx.reshape({:auto, 1})\n\n# The output is also a real number, but we round it into one of the two classes\ny = Axon.predict(model, params, %{\"x1\" => x1, \"x2\" => x2}) |> Nx.round()\n\nVl.new(width: 300, height: 300)\n|> Vl.data_from_values(\n x1: Nx.to_flat_list(x1),\n x2: Nx.to_flat_list(x2),\n y: Nx.to_flat_list(y)\n)\n|> Vl.mark(:circle)\n|> Vl.encode_field(:x, \"x1\", type: :quantitative)\n|> Vl.encode_field(:y, \"x2\", type: :quantitative)\n|> Vl.encode_field(:color, \"y\", type: :nominal)\n```\n\nFrom the plot we can clearly see that during training our model learnt two clean boundaries to separate $(0,0)$, $(1,1)$ from $(0,1)$, $(1,0)$.","ref":"xor.html#visualizing-the-model-predictions","title":"Visualizing the model predictions - Modeling XOR with a neural network","type":"extras"},{"doc":"# Classifying handwritten digits\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:req, \"~> 0.3.1\"}\n])\n```","ref":"mnist.html","title":"Classifying handwritten digits","type":"extras"},{"doc":"This livebook will walk you through training a basic neural network using Axon, accelerated by the EXLA compiler. We'll be working on the [MNIST](https://en.wikipedia.org/wiki/MNIST_database) dataset which is a dataset of handwritten digits with corresponding labels. The goal is to train a model that correctly classifies these handwritten digits with a single label [0-9].","ref":"mnist.html#introduction","title":"Introduction - Classifying handwritten digits","type":"extras"},{"doc":"The MNIST dataset is available for free online. Using `Req` we'll download both training images and training labels. Both `train_images` and `train_labels` are compressed binary data. Fortunately, `Req` takes care of the decompression for us.\n\nYou can read more about the format of the ubyte files [here](http://yann.lecun.com/exdb/mnist/). Each file starts with a magic number and some metadata. We can use binary pattern matching to extract the information we want. In this case we extract the raw binary images and labels.\n\n```elixir\nbase_url = \"https://storage.googleapis.com/cvdf-datasets/mnist/\"\n%{body: train_images} = Req.get!(base_url <> \"train-images-idx3-ubyte.gz\")\n%{body: train_labels} = Req.get!(base_url <> \"train-labels-idx1-ubyte.gz\")\n\n<<_::32, n_images::32, n_rows::32, n_cols::32, images::binary>> = train_images\n<<_::32, n_labels::32, labels::binary>> = train_labels\n```\n\nWe can easily read that binary data into a tensor using `Nx.from_binary/2`. `Nx.from_binary/2` expects a raw binary and a data type. In this case, both images and labels are stored as unsigned 8-bit integers. We can start by parsing our images:\n\n```elixir\nimages =\n images\n |> Nx.from_binary({:u, 8})\n |> Nx.reshape({n_images, 1, n_rows, n_cols}, names: [:images, :channels, :height, :width])\n |> Nx.divide(255)\n```\n\n`Nx.from_binary/2` returns a flat tensor. Using `Nx.reshape/3` we can manipulate this flat tensor into meaningful dimensions. Notice we also *normalized* the tensor by dividing the input data by 255. This squeezes the data between 0 and 1 which often leads to better behavior when training models. Now, let's see what these images look like:\n\n```elixir\nimages[[images: 0..4]] |> Nx.to_heatmap()\n```\n\nIn the reshape operation above, we give each dimension of the tensor a name. This makes it much easier to do things like slicing, and helps make your code easier to understand. Here we slice the `images` dimension of the images tensor to obtain the first 5 training images. Then, we convert them to a heatmap for easy visualization.\n\nIt's common to train neural networks in batches (actually correctly called minibatches, but you'll see batch and minibatch used interchangeably). We can \"batch\" our images into batches of 32 like this:\n\n```elixir\nimages = Nx.to_batched(images, 32)\n```\n\nNow, we'll need to get our labels into batches as well, but first we need to *one-hot encode* the labels. One-hot encoding converts input data from labels such as `3`, `5`, `7`, etc. into vectors of 0's and a single 1 at the correct labels index. As an example, a label of: `3` gets converted to: `[0, 0, 0, 1, 0, 0, 0, 0, 0, 0]`.\n\n```elixir\ntargets =\n labels\n |> Nx.from_binary({:u, 8})\n |> Nx.new_axis(-1)\n |> Nx.equal(Nx.tensor(Enum.to_list(0..9)))\n |> Nx.to_batched(32)\n```","ref":"mnist.html#retrieving-and-exploring-the-dataset","title":"Retrieving and exploring the dataset - Classifying handwritten digits","type":"extras"},{"doc":"Let's start by defining a simple model:\n\n```elixir\nmodel =\n Axon.input(\"input\", shape: {nil, 1, 28, 28})\n |> Axon.flatten()\n |> Axon.dense(128, activation: :relu)\n |> Axon.dense(10, activation: :softmax)\n```\n\nAll `Axon` models start with an input layer to tell subsequent layers what shapes to expect. We then use `Axon.flatten/2` which flattens the previous layer by squeezing all dimensions but the first dimension into a single dimension. Our model consists of 2 fully connected layers with 128 and 10 units respectively. The first layer uses `:relu` activation which returns `max(0, input)` element-wise. The final layer uses `:softmax` activation to return a probability distribution over the 10 labels [0 - 9].","ref":"mnist.html#defining-the-model","title":"Defining the model - Classifying handwritten digits","type":"extras"},{"doc":"In Axon we express the task of training using a declarative loop API. First, we need to specify a loss function and optimizer, there are many built-in variants to choose from. In this example, we'll use *categorical cross-entropy* and the *Adam* optimizer. We will also keep track of the *accuracy* metric. Finally, we run training loop passing our batched images and labels. We'll train for 10 epochs using the `EXLA` compiler.\n\n```elixir\nparams =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, :adam)\n |> Axon.Loop.metric(:accuracy, \"Accuracy\")\n |> Axon.Loop.run(Stream.zip(images, targets), %{}, epochs: 10, compiler: EXLA)\n```","ref":"mnist.html#training","title":"Training - Classifying handwritten digits","type":"extras"},{"doc":"Now that we have the parameters from the training step, we can use them for predictions.\nFor this the `Axon.predict` can be used.\n\n```elixir\nfirst_batch = Enum.at(images, 0)\n\noutput = Axon.predict(model, params, first_batch)\n```\n\nFor each image, the model outputs probability distribution. This informs us how certain the model is about its prediction. Let's see the most probable digit for each image:\n\n```elixir\nNx.argmax(output, axis: 1)\n```\n\nIf you look at the original images and you will see the predictions match the data!","ref":"mnist.html#prediction","title":"Prediction - Classifying handwritten digits","type":"extras"},{"doc":"# Classifying horses and humans\n\n```elixir\nMix.install([\n {:axon, \"~> 0.6.0\"},\n {:nx, \"~> 0.6.0\"},\n {:exla, \"~> 0.6.0\"},\n {:stb_image, \"~> 0.6.0\"},\n {:req, \"~> 0.4.5\"},\n {:kino, \"~> 0.11.0\"}\n])\n\nNx.global_default_backend(EXLA.Backend)\nNx.Defn.global_default_options(compiler: EXLA)\n```","ref":"horses_or_humans.html","title":"Classifying horses and humans","type":"extras"},{"doc":"In this notebook, we want to predict whether an image presents a horse or a human. To do this efficiently, we will build a Convolutional Neural Network (CNN) and compare the learning process with and without gradient centralization.","ref":"horses_or_humans.html#introduction","title":"Introduction - Classifying horses and humans","type":"extras"},{"doc":"We will be using the [Horses or Humans Dataset](https://laurencemoroney.com/datasets.html#horses-or-humans-dataset). The dataset is available as a ZIP with image files, we will download it using `req`. Conveniently, `req` will unzip the files for us, we just need to convert the filenames from strings.\n\n```elixir\n%{body: files} =\n Req.get!(\"https://storage.googleapis.com/learning-datasets/horse-or-human.zip\")\n\nfiles = for {name, binary} <- files, do: {List.to_string(name), binary}\n```\n\n#","ref":"horses_or_humans.html#loading-the-data","title":"Loading the data - Classifying horses and humans","type":"extras"},{"doc":"We need to know how many images to include in a batch. A batch is a group of images to load into the GPU at a time. If the batch size is too big for your GPU, it will run out of memory, in such case you can reduce the batch size. It is generally optimal to utilize almost all of the GPU memory during training. It will take more time to train with a lower batch size.\n\n```elixir\nbatch_size = 32\nbatches_per_epoch = div(length(files), batch_size)\n```","ref":"horses_or_humans.html#note-on-batching","title":"Note on batching - Classifying horses and humans","type":"extras"},{"doc":"We'll have a really quick look at our data. Let's see what we are dealing with:\n\n```elixir\n{name, binary} = Enum.random(files)\nKino.Markdown.new(name) |> Kino.render()\nKino.Image.new(binary, :png)\n```\n\nReevaluate the cell a couple times to view different images. Note that the file names are either `horse[N]-[M].png` or `human[N]-[M].png`, so we can derive the expected class from that.\n\n\n\nWhile we are at it, look at this beautiful animation:\n\n```elixir\nnames_to_animate = [\"horse01\", \"horse05\", \"human01\", \"human05\"]\n\nimages_to_animate =\n for {name, binary} <- files, Enum.any?(names_to_animate, &String.contains?(name, &1)) do\n Kino.Image.new(binary, :png)\n end\n\nKino.animate(50, images_to_animate, fn\n _i, [image | images] -> {:cont, image, images}\n _i, [] -> :halt\nend)\n```\n\nHow many images are there?\n\n```elixir\nlength(files)\n```\n\nHow many images will not be used for training? The remainder of the integer division will be ignored.\n\n```elixir\nfiles\n|> length()\n|> rem(batch_size)\n```","ref":"horses_or_humans.html#a-look-at-the-data","title":"A look at the data - Classifying horses and humans","type":"extras"},{"doc":"First, we need to preprocess the data for our CNN. At the beginning of the process, we chunk images into batches. Then, we use the `parse_file/1` function to load images and label them accurately. Finally, we \"augment\" the input, which means that we normalize data and flip the images along one of the axes. The last procedure helps a neural network to make predictions regardless of the orientation of the image.\n\n```elixir\ndefmodule HorsesHumans.DataProcessing do\n import Nx.Defn\n\n def data_stream(files, batch_size) do\n files\n |> Enum.shuffle()\n |> Stream.chunk_every(batch_size, batch_size, :discard)\n |> Task.async_stream(\n fn batch ->\n {images, labels} = batch |> Enum.map(&parse_file/1) |> Enum.unzip()\n {Nx.stack(images), Nx.stack(labels)}\n end,\n timeout: :infinity\n )\n |> Stream.map(fn {:ok, {images, labels}} -> {augment(images), labels} end)\n |> Stream.cycle()\n end\n\n defp parse_file({filename, binary}) do\n label =\n if String.starts_with?(filename, \"horses/\"),\n do: Nx.tensor([1, 0], type: {:u, 8}),\n else: Nx.tensor([0, 1], type: {:u, 8})\n\n image = binary |> StbImage.read_binary!() |> StbImage.to_nx()\n\n {image, label}\n end\n\n defnp augment(images) do\n # Normalize\n images = images / 255.0\n\n # Optional vertical/horizontal flip\n { u, _new_key } = Nx.Random.key(1987) |> Nx.Random.uniform()\n\n cond do\n u < 0.25 -> images\n u < 0.5 -> Nx.reverse(images, axes: [2])\n u < 0.75 -> Nx.reverse(images, axes: [3])\n true -> Nx.reverse(images, axes: [2, 3])\n end\n end\nend\n```","ref":"horses_or_humans.html#data-processing","title":"Data processing - Classifying horses and humans","type":"extras"},{"doc":"The next step is creating our model. In this notebook, we choose the classic Convolutional Neural Network architecture. Let's dive in to the core components of a CNN.\n\n\n\n`Axon.conv/3` adds a convolutional layer, which is at the core of a CNN. A convolutional layer applies a filter function throughout the image, sliding a window with shape `:kernel_size`. As opposed to dense layers, a convolutional layer exploits weight sharing to better model data where locality matters. This feature is a natural fit for images.\n\n\n\n| ![](https://miroslawmamczur.pl/wp-content/uploads/2021/03/06.gif) |\n| :-------------------------------------------------------------------------------------: |\n| Figure 1: A step-by-step visualization of a convolution layer for `kernel_size: {3, 3}` |\n\n\n\n`Axon.max_pool/2` adds a downscaling operation that takes the maximum value from a subtensor according to `:kernel_size`.\n\n\n\n| ![](https://production-media.paperswithcode.com/methods/MaxpoolSample2.png) |\n| :-------------------------------------------------------------------------: |\n| Figure 2: Max pooling operation for `kernel_size: {2, 2}` |\n\n\n\n`Axon.dropout/2` and `Axon.spatial_dropout/2` add dropout layers which prevent a neural network from overfitting. Standard dropout drops a given rate of randomly chosen neurons during the training process. On the other hand, spatial dropout gets rid of whole feature maps. The graphical difference between dropout and spatial dropout is presented in a picture below.\n\n\n\n| ![](https://miro.medium.com/max/1400/1*KkqxjvXTIV_b365B41ltfg.png) |\n| :-------------------------------------------------------------------: |\n| Figure 3: The difference between standard dropout and spatial dropout |\n\n\n\nKnowing the relevant building blocks, let's build our network! It will have a convolutional part, composed of convolutional and pooling layers, this part should capture the spatial features of an image. Then at the end, we will add a dense layer with 512 neurons fed with all the spatial features, and a final two-neuron layer for as our classification output.\n\n```elixir\nmodel =\n Axon.input(\"input\", shape: {nil, 300, 300, 4})\n |> Axon.conv(16, kernel_size: {3, 3}, activation: :relu)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(32, kernel_size: {3, 3}, activation: :relu)\n |> Axon.spatial_dropout(rate: 0.5)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)\n |> Axon.spatial_dropout(rate: 0.5)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)\n |> Axon.max_pool(kernel_size: {2, 2})\n |> Axon.flatten()\n |> Axon.dropout(rate: 0.5)\n |> Axon.dense(512, activation: :relu)\n |> Axon.dense(2, activation: :softmax)\n```","ref":"horses_or_humans.html#building-the-model","title":"Building the model - Classifying horses and humans","type":"extras"},{"doc":"It's time to train our model. We specify the loss, optimizer and choose accuracy as our metric. We also set `log: 1` to frequently update the training progress. We manually specify the number of iterations, such that each epoch goes through all of the baches once.\n\n```elixir\ndata = HorsesHumans.DataProcessing.data_stream(files, batch_size)\n\noptimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-4)\n\nparams =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, log: 1)\n |> Axon.Loop.metric(:accuracy)\n |> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)\n```\n\n","ref":"horses_or_humans.html#training-the-model","title":"Training the model - Classifying horses and humans","type":"extras"},{"doc":"We can improve the training by applying gradient centralization. It is a technique with a similar purpose to batch normalization. For each loss gradient, we subtract a mean value to have a gradient with mean equal to zero. This process prevents gradients from exploding.\n\n```elixir\ncentralized_optimizer = Polaris.Updates.compose(Polaris.Updates.centralize(), optimizer)\n\nmodel\n|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, log: 1)\n|> Axon.Loop.metric(:accuracy)\n|> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)\n```","ref":"horses_or_humans.html#extra-gradient-centralization","title":"Extra: gradient centralization - Classifying horses and humans","type":"extras"},{"doc":"We can now use our trained model, let's try a couple examples.\n\n```elixir\n{name, binary} = Enum.random(files)\nKino.Markdown.new(name) |> Kino.render()\nKino.Image.new(binary, :png) |> Kino.render()\n\ninput =\n binary\n |> StbImage.read_binary!()\n |> StbImage.to_nx()\n |> Nx.new_axis(0)\n |> Nx.divide(255.0)\n\nAxon.predict(model, params, input)\n```\n\n_Note: the model output refers to the probability that the image presents a horse and a human respectively._\n\n\n\nYou can find a validation set [here](https://storage.googleapis.com/learning-datasets/validation-horse-or-human.zip), in case you want to experiment further!","ref":"horses_or_humans.html#inference","title":"Inference - Classifying horses and humans","type":"extras"},{"doc":"# Generating text with LSTM\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:req, \"~> 0.3.1\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\nNx.global_default_backend(EXLA.Backend)\n```","ref":"lstm_generation.html","title":"Generating text with LSTM","type":"extras"},{"doc":"Recurrent Neural Networks (RNNs) can be used as generative models. This means that in addition to being used for predictive models (making predictions) they can learn the sequences of a problem and then generate entirely new plausible sequences for the problem domain.\n\nGenerative models like this are useful not only to study how well a model has learned a problem, but to learn more about the problem domain itself.\n\nIn this example, we will discover how to create a generative model for text, character-by-character using Long Short-Term Memory (LSTM) recurrent neural networks in Elixir with Axon.","ref":"lstm_generation.html#introduction","title":"Introduction - Generating text with LSTM","type":"extras"},{"doc":"Using [Project Gutenburg](https://www.gutenberg.org/) we can download a text books that are no longer protected under copywrite, so we can experiment with them.\n\nThe one that we will use for this experiment is [Alice's Adventures in Wonderland by Lewis Carroll](https://www.gutenberg.org/ebooks/11). You can choose any other text or book that you like for this experiment.\n\n```elixir\n# Change the URL if you'd like to experiment with other books\ndownload_url = \"https://www.gutenberg.org/files/11/11-0.txt\"\n\nbook_text = Req.get!(download_url).body\n```\n\nFirst of all, we need to normalize the content of the book. We are only interested in the sequence of English characters, periods and new lines. Also currently we don't care about the capitalization and things like apostrophe so we can remove all other unknown characters and downcase everything. We can use a regular expression for that.\n\nWe can also convert the string into a list of characters so we can handle them easier. You will understand exactly why a bit further.\n\n```elixir\nnormalized_book_text =\n book_text\n |> String.downcase()\n |> String.replace(~r/[^a-z \\.\\n]/, \"\")\n |> String.to_charlist()\n```\n\nWe converted the text to a list of characters, where each character is a number (specifically, a Unicode code point). Lowercase English characters are represented with numbers between `97 = a` and `122 = z`, a space is `32 = [ ]`, a new line is `10 = \\n` and the period is `46 = .`.\n\nSo we should have 26 + 3 (= 29) characters in total. Let's see if that's true.\n\n```elixir\nnormalized_book_text |> Enum.uniq() |> Enum.count()\n```\n\nSince we want to use this 29 characters as possible values for each input in our neural network, we can re-map them to values between 0 and 28. So each specific neuron will indicate a specific character.\n\n```elixir\n# Extract all then unique characters we have and sort them for clarity\ncharacters = normalized_book_text |> Enum.uniq() |> Enum.sort()\ncharacters_count = Enum.count(characters)\n\n# Create a mapping for every character\nchar_to_idx = characters |> Enum.with_index() |> Map.new()\n# And a reverse mapping to convert back to characters\nidx_to_char = characters |> Enum.with_index(&{&2, &1}) |> Map.new()\n\nIO.puts(\"Total book characters: #{Enum.count(normalized_book_text)}\")\nIO.puts(\"Total unique characters: #{characters_count}\")\n```\n\nNow we need to create our training and testing data sets. But how?\n\nOur goal is to teach the machine what comes after a sequence of characters (usually). For example given the following sequence **\"Hello, My name i\"** the computer should be able to guess that the next character is probably **\"s\"**.\n\n\n\n\n\n```mermaid\ngraph LR;\n A[Input: Hello my name i]-->NN[Neural Network]-->B[Output: s];\n```\n\n\n\nLet's choose an arbitrary sequence length and create a data set from the book text. All we need to do is read X amount of characters from the book as the input and then read 1 more as the designated output.\n\nAfter doing all that, we also want to convert every character to it's index using the `char_to_idx` mapping that we have created before.\n\nNeural networks work best if you scale your inputs and outputs. In this case we are going to scale everything between 0 and 1 by dividing them by the number of unique characters that we have.\n\nAnd for the final step we will reshape it so we can use the data in our LSTM model.\n\n```elixir\nsequence_length = 100\n\ntrain_data =\n normalized_book_text\n |> Enum.map(&Map.fetch!(char_to_idx, &1))\n |> Enum.chunk_every(sequence_length, 1, :discard)\n # We don't want the last chunk since we don't have a prediction for it.\n |> Enum.drop(-1)\n |> Nx.tensor()\n |> Nx.divide(characters_count)\n |> Nx.reshape({:auto, sequence_length, 1})\n```\n\nFor our train results, We will do the same. Drop the first `sequence_length` characters and then convert them to the mapping. Additionally, we will do **one-hot encoding**.\n\nThe reason we want to use one-hot encoding is that in our model we don't want to only return a character as the output. We want it to return the probability of each character for the output. This way we can decide if certain probability is good or not or even we can decide between multiple possible outputs or even discard everything if the network is not confident enough.\n\nIn Nx, you can achieve this encoding by using this snippet\n\n```elixir\nNx.tensor([\n [0],\n [1],\n [2]\n])\n|> Nx.equal(Nx.iota({1, 3}))\n```\n\nTo sum it up, Here is how we generate the train results.\n\n```elixir\ntrain_results =\n normalized_book_text\n |> Enum.drop(sequence_length)\n |> Enum.map(&Map.fetch!(char_to_idx, &1))\n |> Nx.tensor()\n |> Nx.reshape({:auto, 1})\n |> Nx.equal(Nx.iota({1, characters_count}))\n```","ref":"lstm_generation.html#preparation","title":"Preparation - Generating text with LSTM","type":"extras"},{"doc":"```elixir\n# As the input, we expect the sequence_length characters\n\nmodel =\n Axon.input(\"input_chars\", shape: {nil, sequence_length, 1})\n # The LSTM layer of our network\n |> Axon.lstm(256)\n # Selecting only the output from the LSTM Layer\n |> then(fn {out, _} -> out end)\n # Since we only want the last sequence in LSTM we will slice it and\n # select the last one\n |> Axon.nx(fn t -> t[[0..-1//1, -1]] end)\n # 20% dropout so we will not become too dependent on specific neurons\n |> Axon.dropout(rate: 0.2)\n # The output layer. One neuron for each character and using softmax,\n # as activation so every node represents a probability\n |> Axon.dense(characters_count, activation: :softmax)\n```","ref":"lstm_generation.html#defining-the-model","title":"Defining the Model - Generating text with LSTM","type":"extras"},{"doc":"To train the network, we will use Axon's Loop API. It is pretty straightforward.\n\nFor the loss function we can use _categorical cross-entropy_ since we are dealing with categories (each character) in our output. For the optimizer we can use _Adam_.\n\nWe will train our network for 20 epochs. Note that we are working with a fair amount data, so it may take a long time unless you run it on a GPU.\n\n```elixir\nbatch_size = 128\ntrain_batches = Nx.to_batched(train_data, batch_size)\nresult_batches = Nx.to_batched(train_results, batch_size)\n\nIO.puts(\"Total batches: #{Enum.count(train_batches)}\")\n\nparams =\n model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))\n |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 20, compiler: EXLA)\n\n:ok\n```","ref":"lstm_generation.html#training-the-network","title":"Training the network - Generating text with LSTM","type":"extras"},{"doc":"Now we have a trained neural network, so we can start generating text with it! We just need to pass the initial sequence as the input to the network and select the most probable output. `Axon.predict/3` will give us the output layer and then using `Nx.argmax/1` we get the most confident neuron index, then simply convert that index back to its Unicode representation.\n\n```elixir\ngenerate_fn = fn model, params, init_seq ->\n # The initial sequence that we want the network to complete for us.\n init_seq =\n init_seq\n |> String.trim()\n |> String.downcase()\n |> String.to_charlist()\n |> Enum.map(&Map.fetch!(char_to_idx, &1))\n\n Enum.reduce(1..100, init_seq, fn _, seq ->\n init_seq =\n seq\n |> Enum.take(-sequence_length)\n |> Nx.tensor()\n |> Nx.divide(characters_count)\n |> Nx.reshape({1, sequence_length, 1})\n\n char =\n Axon.predict(model, params, init_seq)\n |> Nx.argmax()\n |> Nx.to_number()\n\n seq ++ [char]\n end)\n |> Enum.map(&Map.fetch!(idx_to_char, &1))\nend\n\n# The initial sequence that we want the network to complete for us.\ninit_seq = \"\"\"\nnot like to drop the jar for fear\nof killing somebody underneath so managed to put it into one of the\ncupboards as she fell past it.\n\"\"\"\n\ngenerate_fn.(model, params, init_seq) |> IO.puts()\n```","ref":"lstm_generation.html#generating-text","title":"Generating text - Generating text with LSTM","type":"extras"},{"doc":"We can improve our network by stacking multiple LSTM layers together. We just need to change our model and re-train our network.\n\n```elixir\nnew_model =\n Axon.input(\"input_chars\", shape: {nil, sequence_length, 1})\n |> Axon.lstm(256)\n |> then(fn {out, _} -> out end)\n |> Axon.dropout(rate: 0.2)\n # This time we will pass all of the `out` to the next lstm layer.\n # We just need to slice the last one.\n |> Axon.lstm(256)\n |> then(fn {out, _} -> out end)\n |> Axon.nx(fn x -> x[[0..-1//1, -1]] end)\n |> Axon.dropout(rate: 0.2)\n |> Axon.dense(characters_count, activation: :softmax)\n```\n\nThen we can train the network using the exact same code as before\n\n```elixir\n# Using a smaller batch size in this case will give the network more opportunity to learn\nbatch_size = 64\ntrain_batches = Nx.to_batched(train_data, batch_size)\nresult_batches = Nx.to_batched(train_results, batch_size)\n\nIO.puts(\"Total batches: #{Enum.count(train_batches)}\")\n\nnew_params =\n new_model\n |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))\n |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 50, compiler: EXLA)\n\n:ok\n```","ref":"lstm_generation.html#multi-lstm-layers","title":"Multi LSTM layers - Generating text with LSTM","type":"extras"},{"doc":"```elixir\ngenerate_fn.(new_model, new_params, init_seq) |> IO.puts()\n```\n\nAs you may see, it improved a lot with this new model and the extensive training. This time it knows about rules like adding a space after period.","ref":"lstm_generation.html#generate-text-with-the-new-network","title":"Generate text with the new network - Generating text with LSTM","type":"extras"},{"doc":"The above example was written heavily inspired by [this article](https://machinelearningmastery.com/text-generation-lstm-recurrent-neural-networks-python-keras/) by Jason Brownlee.","ref":"lstm_generation.html#references","title":"References - Generating text with LSTM","type":"extras"},{"doc":"# Classifying fraudulent transactions\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:explorer, \"~> 0.3.1\"},\n {:kino, \"~> 0.7.0\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\nNx.global_default_backend(EXLA.Backend)\n\nalias Explorer.{DataFrame, Series}\n```","ref":"credit_card_fraud.html","title":"Classifying fraudulent transactions","type":"extras"},{"doc":"This time we will examine the Credit Card Fraud Dataset. Due to confidentiality, the original data were preprocessed by principal component analysis (PCA), and then 31 principal components were selected for the final data set. The dataset is highly imbalanced. The positive class (frauds) account for 0.172% of all transactions. Eventually, we will create a classifier which has not only great accuracy but, what is even more important, a high _recall_ and _precision_ - two metrics that are much more indicative of performance with imbalanced classification problems.","ref":"credit_card_fraud.html#introduction","title":"Introduction - Classifying fraudulent transactions","type":"extras"},{"doc":"The first step is to prepare the data for training and evaluation. Please download the dataset in the CSV format from https://www.kaggle.com/mlg-ulb/creditcardfraud (this requires a Kaggla account). Once done, put the file path in the input below.\n\n```elixir\ndata_path_input = Kino.Input.text(\"Data path (CSV)\")\n```\n\nNow, let's read the data into an `Explorer.Dataframe`:\n\n```elixir\ndata_path = Kino.Input.read(data_path_input)\n\ndf = DataFrame.from_csv!(data_path, dtypes: [{\"Time\", :float}])\n```\n\nFor further processing, we will need a couple helper functions. We will group them in a module for convenience.\n\n```elixir\ndefmodule CredidCard.Data do\n import Nx.Defn\n\n def split_train_test(df, portion) do\n num_examples = DataFrame.n_rows(df)\n num_train = ceil(portion * num_examples)\n num_test = num_examples - num_train\n\n train = DataFrame.slice(df, 0, num_train)\n test = DataFrame.slice(df, num_train, num_test)\n {train, test}\n end\n\n def split_features_targets(df) do\n features = DataFrame.select(df, &(&1 == \"Class\"), :drop)\n targets = DataFrame.select(df, &(&1 == \"Class\"), :keep)\n {features, targets}\n end\n\n def df_to_tensor(df) do\n df\n |> DataFrame.names()\n |> Enum.map(&Series.to_tensor(df[&1]))\n |> Nx.stack(axis: 1)\n end\n\n defn normalize_features(tensor) do\n max =\n tensor\n |> Nx.abs()\n |> Nx.reduce_max(axes: [0], keep_axes: true)\n\n tensor / max\n end\nend\n```\n\nWith that, we can start converting the data into the desired format. First, we split the data into training and test data (in proportion 80% into a training set and 20% into a test set).\n\n```elixir\n{train_df, test_df} = CredidCard.Data.split_train_test(df, 0.8)\n{DataFrame.n_rows(train_df), DataFrame.n_rows(test_df)}\n```\n\nNext, we separate features from labels and convert both to tensors. In case of features we additionally normalize each of them, dividing by the maximum absolute value of that feature.\n\n```elixir\n{train_features, train_targets} = CredidCard.Data.split_features_targets(train_df)\n{test_features, test_targets} = CredidCard.Data.split_features_targets(test_df)\n\ntrain_inputs =\n train_features\n |> CredidCard.Data.df_to_tensor()\n |> CredidCard.Data.normalize_features()\n\ntest_inputs =\n test_features\n |> CredidCard.Data.df_to_tensor()\n |> CredidCard.Data.normalize_features()\n\ntrain_targets = CredidCard.Data.df_to_tensor(train_targets)\ntest_targets = CredidCard.Data.df_to_tensor(test_targets)\n\n:ok\n```","ref":"credit_card_fraud.html#data-processing","title":"Data processing - Classifying fraudulent transactions","type":"extras"},{"doc":"Our model for predicting whether a transaction was fraudulent or not is a dense neural network. It consists of two dense layers with 256 neurons, ReLU activation functions, one dropout layer, and a dense layer with one neuron (since the problem is a binary prediction) followed by a sigmoid activation function.\n\n```elixir\nmodel =\n Axon.input(\"input\")\n |> Axon.dense(256)\n |> Axon.relu()\n |> Axon.dense(256)\n |> Axon.relu()\n |> Axon.dropout(rate: 0.3)\n |> Axon.dense(1)\n |> Axon.sigmoid()\n```","ref":"credit_card_fraud.html#building-the-model","title":"Building the model - Classifying fraudulent transactions","type":"extras"},{"doc":"Now we have both data and model architecture prepared, it's time to train!\n\nNote the disproportion in the data samples:\n\n```elixir\nfraud = Nx.sum(train_targets) |> Nx.to_number()\nlegit = Nx.size(train_targets) - fraud\n\nbatched_train_inputs = Nx.to_batched(train_inputs, 2048)\nbatched_train_targets = Nx.to_batched(train_targets, 2048)\nbatched_train = Stream.zip(batched_train_inputs, batched_train_targets)\n\nIO.puts(\"# of legit transactions (train): #{legit}\")\nIO.puts(\"# of fraudulent transactions (train): #{fraud}\")\nIO.puts(\"% fraudlent transactions (train): #{100 * (fraud / (legit + fraud))}%\")\n```\n\nAs always, we define our train loop. We are using _binary cross-entropy_ as our loss function and Adam as the optimizer with a learning rate of 0.01. Then we immediately start the training passing our train portion of the dataset.\n\n```elixir\nloss =\n &Axon.Losses.binary_cross_entropy(\n &1,\n &2,\n negative_weight: 1 / legit,\n positive_weight: 1 / fraud,\n reduction: :mean\n )\n\noptimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-2)\n\nparams =\n model\n |> Axon.Loop.trainer(loss, optimizer)\n |> Axon.Loop.run(batched_train, %{}, epochs: 30, compiler: EXLA)\n\n:ok\n```","ref":"credit_card_fraud.html#training-our-model","title":"Training our model - Classifying fraudulent transactions","type":"extras"},{"doc":"After the training, there is only one thing left: testing. Here, we will focus on the number of true positive, true negative, false positive, and false negative values, but also on the likelihood of denying legit and fraudulent transactions.\n\n```elixir\nbatched_test_inputs = Nx.to_batched(test_inputs, 2048)\nbatched_test_targets = Nx.to_batched(test_targets, 2048)\nbatched_test = Stream.zip(batched_test_inputs, batched_test_targets)\n\nsummarize = fn %Axon.Loop.State{metrics: metrics} = state ->\n legit_transactions_declined = Nx.to_number(metrics[\"fp\"])\n legit_transactions_accepted = Nx.to_number(metrics[\"tn\"])\n fraud_transactions_accepted = Nx.to_number(metrics[\"fn\"])\n fraud_transactions_declined = Nx.to_number(metrics[\"tp\"])\n total_fraud = fraud_transactions_declined + fraud_transactions_accepted\n total_legit = legit_transactions_declined + legit_transactions_accepted\n\n fraud_denial_percent = 100 * (fraud_transactions_declined / total_fraud)\n legit_denial_percent = 100 * (legit_transactions_declined / total_legit)\n\n IO.write(\"\\n\")\n IO.puts(\"Legit Transactions Declined: #{legit_transactions_declined}\")\n IO.puts(\"Fraudulent Transactions Caught: #{fraud_transactions_declined}\")\n IO.puts(\"Fraudulent Transactions Missed: #{fraud_transactions_accepted}\")\n IO.puts(\"Likelihood of catching fraud: #{fraud_denial_percent}%\")\n IO.puts(\"Likelihood of denying legit transaction: #{legit_denial_percent}%\")\n\n {:continue, state}\nend\n\nmodel\n|> Axon.Loop.evaluator()\n|> Axon.Loop.metric(:true_positives, \"tp\", :running_sum)\n|> Axon.Loop.metric(:true_negatives, \"tn\", :running_sum)\n|> Axon.Loop.metric(:false_positives, \"fp\", :running_sum)\n|> Axon.Loop.metric(:false_negatives, \"fn\", :running_sum)\n|> Axon.Loop.handle(:epoch_completed, summarize)\n|> Axon.Loop.run(batched_test, params, compiler: EXLA)\n\n:ok\n```","ref":"credit_card_fraud.html#model-evaluation","title":"Model evaluation - Classifying fraudulent transactions","type":"extras"},{"doc":"# MNIST Denoising Autoencoder using Kino for visualization\n\n```elixir\nMix.install([\n {:exla, \"~> 0.4.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:axon, \"~> 0.3.0\"},\n {:req, \"~> 0.3.1\"},\n {:kino, \"~> 0.7.0\"},\n {:scidata, \"~> 0.1.9\"},\n {:stb_image, \"~> 0.5.2\"},\n {:table_rex, \"~> 3.1.1\"}\n])\n```","ref":"mnist_autoencoder_using_kino.html","title":"MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"The goal of this notebook is to build a Denoising Autoencoder from scratch using Livebook. This notebook is based on [Training an Autoencoder on Fashion MNIST](fashionmnist_autoencoder.livemd), but includes some tips on using Livebook to train the model and using [Kino](https://hexdocs.pm/kino/Kino.html) (Livebook's interactive widget library) to play with and visualize our results.","ref":"mnist_autoencoder_using_kino.html#introduction","title":"Introduction - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"An autoencoder learns to recreate data it's seen in the dataset. For this notebook, we're going to try something simple: generating images of digits using the MNIST digit recognition dataset.\n\n\n\nFollowing along with the [Fashion MNIST Autoencoder example](fashionmnist_autoencoder.livemd), we'll use [Scidata](https://github.com/elixir-nx/scidata) to download the MNIST dataset and then preprocess the data.\n\n```elixir\n# We're not going to use the labels so we'll ignore them\n{train_images, _train_labels} = Scidata.MNIST.download()\n{train_images_binary, type, shape} = train_images\n```\n\nThe `shape` tells us we have 60,000 images with a single channel of size 28x28.\n\nAccording to [the MNIST website](http://yann.lecun.com/exdb/mnist/):\n\n> Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).\n\nLet's preprocess and normalize the data accordingly.\n\n```elixir\ntrain_images =\n train_images_binary\n |> Nx.from_binary(type)\n # Since pixels are organized row-wise, reshape into rows x columns\n |> Nx.reshape(shape, names: [:images, :channels, :height, :width])\n # Normalize the pixel values to be between 0 and 1\n |> Nx.divide(255)\n```\n\n```elixir\n# Make sure they look like numbers\ntrain_images[[images: 0..2]] |> Nx.to_heatmap()\n```\n\nThat looks right! Let's repeat the process for the test set.\n\n```elixir\n{test_images, _train_labels} = Scidata.MNIST.download_test()\n{test_images_binary, type, shape} = test_images\n\ntest_images =\n test_images_binary\n |> Nx.from_binary(type)\n # Since pixels are organized row-wise, reshape into rows x columns\n |> Nx.reshape(shape, names: [:images, :channels, :height, :width])\n # Normalize the pixel values to be between 0 and 1\n |> Nx.divide(255)\n\ntest_images[[images: 0..2]] |> Nx.to_heatmap()\n```","ref":"mnist_autoencoder_using_kino.html#data-loading","title":"Data loading - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"An autoencoder is a a network that has the same sized input as output, with a \"bottleneck\" layer in the middle with far fewer parameters than the input. Its goal is to force the output to reconstruct the input. The bottleneck layer forces the network to learn a compressed representation of the input space.\n\nA _denoising_ autoencoder is a small tweak on an autoencoder that takes a corrupted input (often corrupted by adding noise or zeroing out pixels) and reconstructs the original input, removing the noise in the process.\n\nThe part of the autoencoder that takes the input and compresses it into the bottleneck layer is called the _encoder_ and the part that takes the compressed representation and reconstructs the input is called the _decoder_. Usually the decoder mirrors the encoder.\n\nMNIST is a pretty easy dataset, so we're going to try a fairly small autoencoder.\n\nThe input image has size 784 (28 rows _ 28 cols _ 1 pixel). We'll set up the encoder to turn that into 256 features, then 128, 64, and then 10 features for the bottleneck layer. The decoder will do the reverse, take the 10 features and go to 64, 128, 256 and 784. I'll use fully-connected (dense) layers.\n\n\n\n#","ref":"mnist_autoencoder_using_kino.html#building-the-model","title":"Building the model - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"```elixir\nmodel =\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu)\n |> Axon.dense(128, activation: :relu)\n |> Axon.dense(64, activation: :relu)\n # Bottleneck layer\n |> Axon.dense(10, activation: :relu)\n # The decoder\n |> Axon.dense(64, activation: :relu)\n |> Axon.dense(128, activation: :relu)\n |> Axon.dense(256, activation: :relu)\n |> Axon.dense(784, activation: :sigmoid)\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n\n# We can use Axon.Display to show us what each of the layers would look like\n# assuming we send in a batch of 4 images\nAxon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()\n```\n\nChecking our understanding, since the layers are all dense layers, the number of parameters should be `input_features * output_features` parameters for the weights + `output_features` parameters for the biases for each layer.\n\nThis should match the `Total Parameters` output from Axon.Display (486298 parameters)\n\n```elixir\n# encoder\nencoder_parameters = 784 * 256 + 256 + (256 * 128 + 128) + (128 * 64 + 64) + (64 * 10 + 10)\ndecoder_parameters = 10 * 64 + 64 + (64 * 128 + 128) + (128 * 256 + 256) + (256 * 784 + 784)\ntotal_parameters = encoder_parameters + decoder_parameters\n```\n\n#","ref":"mnist_autoencoder_using_kino.html#the-model","title":"The model - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"With the model set up, we can now try to train the model. We'll use MSE loss to compare our reconstruction with the original\n\n\n\nWe'll create the training input by turning our image list into batches of size 128 and then using the same image as both the input and the target. However, the input image will have noise added to it that the autoencoder will have to remove.\n\nFor validation data, we'll use the test set and look at how the autoencoder does at reconstructing the test set to make sure we're not overfitting\n\n\n\nThe function below adds some noise to the image by adding the image with gaussian noise scaled by a noise factor. We then have to make sure the pixel values are still within the 0..1.0 range.\n\nWe have to define this function using `defn` so that `Nx` can optimize it. If we don't do this, adding noise will take a really long time, making our training loop very slow. See [Nx.defn](https://hexdocs.pm/nx/Nx.Defn.html) for more details. `defn` can only be used in a module so we'll define a little module to contain it.\n\n```elixir\ndefmodule Noiser do\n import Nx.Defn\n\n @noise_factor 0.4\n\n defn add_noise(images) do\n @noise_factor\n |> Nx.multiply(Nx.random_normal(images))\n |> Nx.add(images)\n |> Nx.clip(0.0, 1.0)\n end\nend\n\nadd_noise = Nx.Defn.jit(&Noiser.add_noise/1, compiler: EXLA)\n```\n\n```elixir\nbatch_size = 128\n\n# The original image which is the target the network will trying to match\nbatched_train_images =\n train_images\n |> Nx.to_batched(batch_size)\n\nbatched_noisy_train_images =\n train_images\n |> Nx.to_batched(batch_size)\n # goes after to_batched so the noise is different every time\n |> Stream.map(add_noise)\n\n# The noisy image is the input to the network\n# and the original image is the target it's trying to match\ntrain_data = Stream.zip(batched_noisy_train_images, batched_train_images)\n\nbatched_test_images =\n test_images\n |> Nx.to_batched(batch_size)\n\nbatched_noisy_test_images =\n test_images\n |> Nx.to_batched(batch_size)\n |> Stream.map(add_noise)\n\ntest_data = Stream.zip(batched_noisy_test_images, batched_test_images)\n```\n\nLet's see what an element of the input and target look like\n\n```elixir\n{input_batch, target_batch} = Enum.at(train_data, 0)\n{Nx.to_heatmap(input_batch[images: 0]), Nx.to_heatmap(target_batch[images: 0])}\n```\n\nLooks right (and tricky). Let's see how the model does.\n\n```elixir\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))\n |> Axon.Loop.validate(model, test_data)\n |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)\n\n:ok\n```\n\nNow that we have a model that theoretically has learned _something_, we'll see what it's learned by running it on some images from the test set. We'll use Kino to allow us to select the image from the test set to run the model against. To avoid losing the params that took a while to train, we'll create another branch so we can experiment with the params and stop execution when needed without having to retrain.\n\n","ref":"mnist_autoencoder_using_kino.html#training","title":"Training - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"**A note on branching**\n\nBy default, everything in Livebook runs sequentially in a single process. Stopping a running cell aborts that process and consequently all its state is lost. A **branching section** copies everything from its parent and runs in a separate process. Thanks to this **isolation**, when we stop a cell in a branching section, only the state within that section is gone.\n\nSince we just spent a bunch of time training the model and don't want to lose that memory state as we continue to experiment, we create a branching section. This does add some memory overhead, but it's worth it so we can experiment without fear!\n\n\n\nTo use `Kino` to give us an interactive tool to evaluate the model, we'll create a `Kino.Frame` that we can dynamically update. We'll also create a form using `Kino.Control` to allow the user to select which image from the test set they'd like to evaluate the model on. Finally `Kino.Control.stream` enables us to respond to changes in the user's selection when the user clicks the \"Render\" button.\n\nWe can use `Nx.concatenate` to stack the images side by side for a prettier output.\n\n```elixir\nform =\n Kino.Control.form(\n [\n test_image_index: Kino.Input.number(\"Test Image Index\", default: 0)\n ],\n submit: \"Render\"\n )\n\nKino.render(form)\n\nform\n|> Kino.Control.stream()\n|> Kino.animate(fn %{data: %{test_image_index: image_index}} ->\n test_image = test_images[[images: image_index]] |> add_noise.()\n\n reconstructed_image =\n model\n |> Axon.predict(params, test_image)\n # Get rid of the batch dimension\n |> Nx.squeeze(axes: [0])\n\n combined_image = Nx.concatenate([test_image, reconstructed_image], axis: :width)\n Nx.to_heatmap(combined_image)\nend)\n```\n\nThat looks pretty good!\n\nNote we used `Kino.animate/2` which runs asynchronously so we don't block execution of the rest of the notebook.\n\n","ref":"mnist_autoencoder_using_kino.html#evaluation","title":"Evaluation - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"_Note that we branch from the \"Building a model\" section since we only need the model definition for this section and not the previously trained model._\n\n\n\nIt'd be nice to see how the model improves as it trains. In this section (also a branch since I plan to experiment and don't want to lose the execution state) we'll improve the training loop to use `Kino` to show us how it's doing.\n\n[Axon.Loop.handle](https://hexdocs.pm/axon/Axon.Loop.html#handle/4) gives us a hook into various points of the training loop. We'll can use it with the `:iteration_completed` event to get a copy of the state of the params after some number of completed iterations of the training loop. By using those params to render an image in the test set, we can get a live view of the autoencoder learning to reconstruct its inputs.\n\n```elixir\n# A helper function to display the input and output side by side\ncombined_input_output = fn params, image_index ->\n test_image = test_images[[images: image_index]] |> add_noise.()\n reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])\n Nx.concatenate([test_image, reconstructed_image], axis: :width)\nend\n\nNx.to_heatmap(combined_input_output.(params, 0))\n```\n\nIt'd also be nice to have a prettier version of the output. Let's convert the heatmap to a png to make that happen.\n\n```elixir\nimage_to_kino = fn image ->\n image\n |> Nx.multiply(255)\n |> Nx.as_type(:u8)\n |> Nx.transpose(axes: [:height, :width, :channels])\n |> StbImage.from_nx()\n |> StbImage.resize(200, 400)\n |> StbImage.to_binary(:png)\n |> Kino.Image.new(:png)\nend\n\nimage_to_kino.(combined_input_output.(params, 0))\n```\n\nMuch nicer!\n\nOnce again we'll use `Kino.Frame` for dynamically updating output:\n\n```elixir\nframe = Kino.Frame.new() |> Kino.render()\n\nrender_example_handler = fn state ->\n Kino.Frame.append(frame, \"Epoch: #{state.epoch}, Iteration: #{state.iteration}\")\n # state.step_state[:model_state] contains the model params when this event is fired\n params = state.step_state[:model_state]\n image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))\n image = combined_input_output.(params, image_index) |> image_to_kino.()\n Kino.Frame.append(frame, image)\n {:continue, state}\nend\n\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))\n |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)\n |> Axon.Loop.validate(model, test_data)\n |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)\n\n:ok\n```\n\nAwesome! We have a working denoising autoencoder that we can visualize getting better in 20 epochs!","ref":"mnist_autoencoder_using_kino.html#a-better-training-loop","title":"A better training loop - MNIST Denoising Autoencoder using Kino for visualization","type":"extras"},{"doc":"# Training an Autoencoder on Fashion MNIST\n\n```elixir\nMix.install([\n {:axon, \"~> 0.3.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:exla, \"~> 0.4.0\"},\n {:scidata, \"~> 0.1.9\"}\n])\n\nNx.Defn.default_options(compiler: EXLA)\n```","ref":"fashionmnist_autoencoder.html","title":"Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"An autoencoder is a deep learning model which consists of two parts: encoder and decoder. The encoder compresses high dimensional data into a low dimensional representation and feeds it to the decoder. The decoder tries to recreate the original data from the low dimensional representation.\nAutoencoders can be used in the following problems:\n\n* Dimensionality reduction\n* Noise reduction\n* Generative models\n* Data augmentation\n\nLet's walk through a basic autoencoder implementation in Axon to get a better understanding of how they work in practice.","ref":"fashionmnist_autoencoder.html#introduction","title":"Introduction - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"To train and test how our model works, we use one of the most popular data sets: [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist). It consists of small black and white images of clothes. Loading this data set is very simple with the help of `Scidata`.\n\n```elixir\n{image_data, _label_data} = Scidata.FashionMNIST.download()\n{bin, type, shape} = image_data\n```\n\nWe get the data in a raw format, but this is exactly the information we need to build an Nx tensor.\n\n```elixir\ntrain_images =\n bin\n |> Nx.from_binary(type)\n |> Nx.reshape(shape)\n |> Nx.divide(255.0)\n```\n\nWe also normalize pixel values into the range $[0, 1]$.\n\n\n\nWe can visualize one of the images by looking at the tensor heatmap:\n\n```elixir\nNx.to_heatmap(train_images[1])\n```","ref":"fashionmnist_autoencoder.html#downloading-the-data","title":"Downloading the data - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"First we need to define the encoder and decoder. Both are one-layer neural networks.\n\nIn the encoder, we start by flattening the input, so we get from shape `{batch_size, 1, 28, 28}` to `{batch_size, 784}` and we pass the input into a dense layer. Our dense layer has only `latent_dim` number of neurons. The `latent_dim` (or the latent space) is a compressed representation of data. Remember, we want our encoder to compress the input data into a lower-dimensional representation, so we choose a `latent_dim` which is less than the dimensionality of the input.\n\n```elixir\nencoder = fn x, latent_dim ->\n x\n |> Axon.flatten()\n |> Axon.dense(latent_dim, activation: :relu)\nend\n```\n\nNext, we pass the output of the encoder to the decoder and try to reconstruct the compressed data into its original form. Since our original input had a dimensionality of 784, we use a dense layer with 784 neurons. Because our original data was normalized to have pixel values between 0 and 1, we use a `:sigmoid` activation in our dense layer to squeeze output values between 0 and 1. Our original input shape was 28x28, so we use `Axon.reshape` to convert the flattened representation of the outputs into an image with correct the width and height.\n\n```elixir\ndecoder = fn x ->\n x\n |> Axon.dense(784, activation: :sigmoid)\n |> Axon.reshape({:batch, 1, 28, 28})\nend\n```\n\nIf we just bind the encoder and decoder sequentially, we'll get the desired model. This was pretty smooth, wasn't it?\n\n```elixir\nmodel =\n Axon.input(\"input\", shape: {nil, 1, 28, 28})\n |> encoder.(64)\n |> decoder.()\n```","ref":"fashionmnist_autoencoder.html#encoder-and-decoder","title":"Encoder and decoder - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"Finally, we can train the model. We'll use the `:adam` and `:mean_squared_error` loss with `Axon.Loop.trainer`. Our loss function will measure the aggregate error between pixels of original images and the model's reconstructed images. We'll also `:mean_absolute_error` using `Axon.Loop.metric`. `Axon.Loop.run` trains the model with the given training data.\n\n```elixir\nbatch_size = 32\nepochs = 5\n\nbatched_images = Nx.to_batched(train_images, batch_size)\ntrain_batches = Stream.zip(batched_images, batched_images)\n\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, :adam)\n |> Axon.Loop.metric(:mean_absolute_error, \"Error\")\n |> Axon.Loop.run(train_batches, %{}, epochs: epochs, compiler: EXLA)\n```","ref":"fashionmnist_autoencoder.html#training-the-model","title":"Training the model - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"To better understand what is mean absolute error (MAE) and mean square error (MSE) let's go through an example.\n\n```elixir\n# Error definitions for a single sample\n\nmean_square_error = fn y_pred, y ->\n y_pred\n |> Nx.subtract(y)\n |> Nx.power(2)\n |> Nx.mean()\nend\n\nmean_absolute_error = fn y_pred, y ->\n y_pred\n |> Nx.subtract(y)\n |> Nx.abs()\n |> Nx.mean()\nend\n```\n\nWe will work with a sample image of a shoe, a slightly noised version of that image, and also an entirely different image from the dataset.\n\n```elixir\nshoe_image = train_images[0]\nnoised_shoe_image = Nx.add(shoe_image, Nx.random_normal(shoe_image, 0.0, 0.05))\nother_image = train_images[1]\n:ok\n```\n\nFor the same image both errors should be 0, because when we have two exact copies, there is no pixel difference.\n\n```elixir\n{\n mean_square_error.(shoe_image, shoe_image),\n mean_absolute_error.(shoe_image, shoe_image)\n}\n```\n\nNow the noised image:\n\n```elixir\n{\n mean_square_error.(shoe_image, noised_shoe_image),\n mean_absolute_error.(shoe_image, noised_shoe_image)\n}\n```\n\nAnd a different image:\n\n```elixir\n{\n mean_square_error.(shoe_image, other_image),\n mean_absolute_error.(shoe_image, other_image)\n}\n```\n\nAs we can see, the noised image has a non-zero MSE and MAE but is much smaller than the error of two completely different pictures. In other words, both of these error types measure the level of similarity between images. A small error implies decent prediction values. On the other hand, a large error value suggests poor quality of predictions.\n\nIf you look at our implementation of MAE and MSE, you will notice that they are very similar. MAE and MSE can also be called the $L_1$ and $L_2$ loss respectively for the $L_1$ and $L_2$ norm. The $L_2$ loss (MSE) is typically preferred because it's a smoother function whereas $L_1$ is often difficult to optimize with stochastic gradient descent (SGD).","ref":"fashionmnist_autoencoder.html#extra-losses","title":"Extra: losses - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"Now, let's see how our model is doing! We will compare a sample image before and after compression.\n\n```elixir\nsample_image = train_images[0..0//1]\ncompressed_image = Axon.predict(model, params, sample_image, compiler: EXLA)\n\nsample_image\n|> Nx.to_heatmap()\n|> IO.inspect(label: \"Original\")\n\ncompressed_image\n|> Nx.to_heatmap()\n|> IO.inspect(label: \"Compressed\")\n\n:ok\n```\n\nAs we can see, the generated image is similar to the input image. The only difference between them is the absence of a sign in the middle of the second shoe. The model treated the sign as noise and bled this into the plain shoe.","ref":"fashionmnist_autoencoder.html#inference","title":"Inference - Training an Autoencoder on Fashion MNIST","type":"extras"},{"doc":"# A Variational Autoencoder for MNIST\n\n```elixir\nMix.install([\n {:exla, \"~> 0.4.0\"},\n {:nx, \"~> 0.4.0\", override: true},\n {:axon, \"~> 0.3.0\"},\n {:req, \"~> 0.3.1\"},\n {:kino, \"~> 0.7.0\"},\n {:scidata, \"~> 0.1.9\"},\n {:stb_image, \"~> 0.5.2\"},\n {:kino_vega_lite, \"~> 0.1.6\"},\n {:vega_lite, \"~> 0.1.6\"},\n {:table_rex, \"~> 3.1.1\"}\n])\n\nalias VegaLite, as: Vl\n\n# This speeds up all our `Nx` operations without having to use `defn`\nNx.global_default_backend(EXLA.Backend)\n\n:ok\n```","ref":"fashionmnist_vae.html","title":"A Variational Autoencoder for MNIST","type":"extras"},{"doc":"In this notebook, we'll be building a variational autoencoder (VAE). This will help demonstrate splitting up models, defining custom layers and loss functions, using multiple outputs, and a few additional Kino tricks for training models.\n\nThis notebook builds on the [denoising autoencoder example](mnist_autoencoder_using_kino.livemd) and turns the simple autoencoder into a variational one for the same dataset.","ref":"fashionmnist_vae.html#introduction","title":"Introduction - A Variational Autoencoder for MNIST","type":"extras"},{"doc":"This section will proceed without much explanation as most of it is extracted from [denoising autoencoder example](mnist_autoencoder_using_kino.livemd). If anything here doesn't make sense, take a look at that notebook for an explanation.\n\n```elixir\ndefmodule Data do\n @moduledoc \"\"\"\n A module to hold useful data processing utilities,\n mostly extracted from the previous notebook\n \"\"\"\n\n @doc \"\"\"\n Converts the given image into a `Kino.Image`.\n\n `image` must be a single channel `Nx` tensor with pixel values between 0 and 1.\n `height` and `width` are the output size in pixels\n \"\"\"\n def image_to_kino(image, height \\\\ 200, width \\\\ 200) do\n image\n |> Nx.multiply(255)\n |> Nx.as_type(:u8)\n |> Nx.transpose(axes: [:height, :width, :channels])\n |> StbImage.from_nx()\n |> StbImage.resize(height, width)\n |> StbImage.to_binary(:png)\n |> Kino.Image.new(:png)\n end\n\n @doc \"\"\"\n Converts image data from `Scidata.MNIST` into an `Nx` tensor and normalizes it.\n \"\"\"\n def preprocess_data(data) do\n {image_data, _labels} = data\n {images_binary, type, shape} = image_data\n\n images_binary\n |> Nx.from_binary(type)\n # Since pixels are organized row-wise, reshape into rows x columns\n |> Nx.reshape(shape, names: [:images, :channels, :height, :width])\n # Normalize the pixel values to be between 0 and 1\n |> Nx.divide(255)\n end\n\n @doc \"\"\"\n Converts a tensor of images into random batches of paired images for model training\n \"\"\"\n def prepare_training_data(images, batch_size) do\n Stream.flat_map([nil], fn nil ->\n images |> Nx.shuffle(axis: :images) |> Nx.to_batched(batch_size)\n end)\n |> Stream.map(fn batch -> {batch, batch} end)\n end\nend\n```\n\n```elixir\ntrain_images = Data.preprocess_data(Scidata.FashionMNIST.download())\ntest_images = Data.preprocess_data(Scidata.FashionMNIST.download_test())\n\nKino.render(train_images[[images: 0]] |> Data.image_to_kino())\nKino.render(test_images[[images: 0]] |> Data.image_to_kino())\n\n:ok\n```\n\nNow for our simple autoencoder model. We won't be using a denoising autoencoder here.\n\nNote that we're giving each of the layers a name - the reason for this will be apparent later.\n\nI'm also using a small custom layer to shift and scale the output of the sigmoid layer slightly so it can hit the 0 and 1 targets. I noticed the gradients tend to explode without this.\n\n```elixir\ndefmodule CustomLayer do\n import Nx.Defn\n\n def scaling_layer(%Axon{} = input, _opts \\\\ []) do\n Axon.layer(&scaling_layer_impl/2, [input])\n end\n\n defnp scaling_layer_impl(x, _opts \\\\ []) do\n x\n |> Nx.subtract(0.05)\n |> Nx.multiply(1.2)\n end\nend\n```\n\n```elixir\nmodel =\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu, name: \"encoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"encoder_layer_2\")\n |> Axon.dense(64, activation: :relu, name: \"encoder_layer_3\")\n # Bottleneck layer\n |> Axon.dense(10, activation: :relu, name: \"bottleneck_layer\")\n # The decoder\n |> Axon.dense(64, activation: :relu, name: \"decoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"decoder_layer_2\")\n |> Axon.dense(256, activation: :relu, name: \"decoder_layer_3\")\n |> Axon.dense(784, activation: :sigmoid, name: \"decoder_layer_4\")\n |> CustomLayer.scaling_layer()\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n\n# We can use Axon.Display to show us what each of the layers would look like\n# assuming we send in a batch of 4 images\nAxon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()\n```\n\n```elixir\nbatch_size = 128\n\ntrain_data = Data.prepare_training_data(train_images, 128)\ntest_data = Data.prepare_training_data(test_images, 128)\n\n{input_batch, target_batch} = Enum.at(train_data, 0)\nKino.render(input_batch[[images: 0]] |> Data.image_to_kino())\nKino.render(target_batch[[images: 0]] |> Data.image_to_kino())\n\n:ok\n```\n\nWhen training, it can be useful to stop execution early - either when you see it's failing and you don't want to waste time waiting for the remaining epochs to finish, or if it's good enough and you want to start experimenting with it.\n\nThe `kino_early_stop/1` function below is a handy handler to give us a `Kino.Control.button` that will stop the training loop when clicked.\n\nWe also have `plot_losses/1` function to visualize our train and validation losses using `VegaLite`.\n\n```elixir\ndefmodule KinoAxon do\n @doc \"\"\"\n Adds handler function which adds a frame with a \"stop\" button\n to the cell with the training loop.\n\n Clicking \"stop\" will halt the training loop.\n \"\"\"\n def kino_early_stop(loop) do\n frame = Kino.Frame.new() |> Kino.render()\n stop_button = Kino.Control.button(\"stop\")\n Kino.Frame.render(frame, stop_button)\n\n {:ok, button_agent} = Agent.start_link(fn -> nil end)\n\n stop_button\n |> Kino.Control.stream()\n |> Kino.listen(fn _event ->\n Agent.update(button_agent, fn _ -> :stop end)\n end)\n\n handler = fn state ->\n stop_state = Agent.get(button_agent, & &1)\n\n if stop_state == :stop do\n Agent.stop(button_agent)\n Kino.Frame.render(frame, \"stopped\")\n {:halt_loop, state}\n else\n {:continue, state}\n end\n end\n\n Axon.Loop.handle(loop, :iteration_completed, handler)\n end\n\n @doc \"\"\"\n Plots the training and validation losses using Kino and VegaLite.\n\n This *must* come after `Axon.Loop.validate`.\n \"\"\"\n def plot_losses(loop) do\n vl_widget =\n Vl.new(width: 600, height: 400)\n |> Vl.mark(:point, tooltip: true)\n |> Vl.encode_field(:x, \"epoch\", type: :ordinal)\n |> Vl.encode_field(:y, \"loss\", type: :quantitative)\n |> Vl.encode_field(:color, \"dataset\", type: :nominal)\n |> Kino.VegaLite.new()\n |> Kino.render()\n\n handler = fn state ->\n %Axon.Loop.State{metrics: metrics, epoch: epoch} = state\n loss = metrics[\"loss\"] |> Nx.to_number()\n val_loss = metrics[\"validation_loss\"] |> Nx.to_number()\n\n points = [\n %{epoch: epoch, loss: loss, dataset: \"train\"},\n %{epoch: epoch, loss: val_loss, dataset: \"validation\"}\n ]\n\n Kino.VegaLite.push_many(vl_widget, points)\n {:continue, state}\n end\n\n Axon.Loop.handle(loop, :epoch_completed, handler)\n end\nend\n```\n\n```elixir\n# A helper function to display the input and output side by side\ncombined_input_output = fn params, image_index ->\n test_image = test_images[[images: image_index]]\n reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])\n Nx.concatenate([test_image, reconstructed_image], axis: :width)\nend\n\nframe = Kino.Frame.new() |> Kino.render()\n\nrender_example_handler = fn state ->\n # state.step_state[:model_state] contains the model params when this event is fired\n params = state.step_state[:model_state]\n image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))\n image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)\n Kino.Frame.render(frame, image)\n Kino.Frame.append(frame, \"Epoch: #{state.epoch}, Iteration: #{state.iteration}\")\n {:continue, state}\nend\n\nparams =\n model\n |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))\n |> KinoAxon.kino_early_stop()\n |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)\n |> Axon.Loop.validate(model, test_data)\n |> KinoAxon.plot_losses()\n |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)\n\n:ok\n```\n\n","ref":"fashionmnist_vae.html#training-a-simple-autoencoder","title":"Training a simple autoencoder - A Variational Autoencoder for MNIST","type":"extras"},{"doc":"Cool! We now have the parameters for a trained, simple autoencoder. Our next step is to split up the model so we can use the encoder and decoder separately. By doing that, we'll be able to take an image and _encode_ it to get the model's compressed image representation (the latent vector). We can then manipulate the latent vector and run the manipulated latent vector through the _decoder_ to get a new image.\n\nLet's start by defining the encoder and decoder separately as two different models.\n\n```elixir\nencoder =\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu, name: \"encoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"encoder_layer_2\")\n |> Axon.dense(64, activation: :relu, name: \"encoder_layer_3\")\n # Bottleneck layer\n |> Axon.dense(10, activation: :relu, name: \"bottleneck_layer\")\n\n# The output from the encoder\ndecoder =\n Axon.input(\"latent\", shape: {nil, 10})\n # The decoder\n |> Axon.dense(64, activation: :relu, name: \"decoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"decoder_layer_2\")\n |> Axon.dense(256, activation: :relu, name: \"decoder_layer_3\")\n |> Axon.dense(784, activation: :sigmoid, name: \"decoder_layer_4\")\n |> CustomLayer.scaling_layer()\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n\nAxon.Display.as_table(encoder, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()\nAxon.Display.as_table(decoder, Nx.template({4, 10}, :f32)) |> IO.puts()\n```\n\nWe have the two models, but the problem is these are untrained models so we don't have the corresponding set of parameters. We'd like to use the parameters from the autoencoder we just trained and apply them to our split up models.\n\nLet's first take a look at what params actually are:\n\n```elixir\nparams\n```\n\nParams are just a `Map` with the layer name as the key identifying which parameters to use. We can easily match up the layer names with the output from the `Axon.Display.as_table/2` call for the autoencoder model.\n\nSo all we need to do is create a new Map that plucks out the right layers from our autoencoder `params` for each model and use that to run inference on our split up models.\n\nFortunately, since we gave each of the layers names, this requires no work at all - we can use the Map as it is since the layer names match up! Axon will ignore any extra keys so those won't be a problem.\n\nNote that naming the layers wasn't _required_, if the layers didn't have names we would have some renaming to do to get the names to match between the models. But giving them names made it very convenient :)\n\nLet's try encoding an image, printing the latent and then decoding the latent using our split up model to make sure it's working.\n\n```elixir\nimage = test_images[[images: 0]]\n\n# Encode the image\nlatent = Axon.predict(encoder, params, image)\nIO.inspect(latent, label: \"Latent\")\n# Decode the image\nreconstructed_image = Axon.predict(decoder, params, latent) |> Nx.squeeze(axes: [0])\n\ncombined_image = Nx.concatenate([image, reconstructed_image], axis: :width)\nData.image_to_kino(combined_image, 200, 400)\n```\n\nPerfect! Seems like the split up models are working as expected. Now let's try to generate some new images using our autoencoder. To do this, we'll manipulate the latent so it's slightly different from what the encoder gave us. Specifically, we'll try to interpolate between two images, showing 100 steps from our starting image to our final image.\n\n```elixir\nnum_steps = 100\n\n# Get our latents, image at index 0 is our starting point\n# index 1 is where we'll end\nlatents = Axon.predict(encoder, params, test_images[[images: 0..1]])\n# Latents is a {2, 10} tensor\n# The step we'll add to our latent to move it towards image[1]\nstep = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)\n# We can make a batch of all our new latents\nnew_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])\n\nreconstructed_images = Axon.predict(decoder, params, new_latents)\n\nreconstructed_images =\n Nx.reshape(\n reconstructed_images,\n Nx.shape(reconstructed_images),\n names: [:images, :channels, :height, :width]\n )\n\nStream.interval(div(5000, num_steps))\n|> Stream.take(num_steps + 1)\n|> Kino.animate(fn i ->\n Data.image_to_kino(reconstructed_images[i])\nend)\n```\n\nCool! We have interpolation! But did you notice that some of the intermediate frames don't look fashionable at all? Autoencoders don't generally return good results for random vectors in their latent space. That's where a VAE can help.\n\n","ref":"fashionmnist_vae.html#splitting-up-the-model","title":"Splitting up the model - A Variational Autoencoder for MNIST","type":"extras"},{"doc":"In a VAE, instead of outputting a latent vector, our encoder will output a distribution. Essentially this means instead of 10 outputs we'll have 20. 10 of them will represent the mean and 10 will represent the log of the variance of the latent. We'll have to sample from this distribution to get our latent vector. Finally, we'll have to modify our loss function to also compute the KL Divergence between the latent distribution and a standard normal distribution (this acts as a regularizer of the latent space).\n\nWe'll start by defining our model:\n\n```elixir\ndefmodule Vae do\n import Nx.Defn\n\n @latent_features 10\n\n defp sampling_layer(%Axon{} = input, _opts \\\\ []) do\n Axon.layer(&sampling_layer_impl/2, [input], name: \"sampling_layer\", op_name: :sample)\n end\n\n defnp sampling_layer_impl(x, _opts \\\\ []) do\n mu = x[[0..-1//1, 0, 0..-1//1]]\n log_var = x[[0..-1//1, 1, 0..-1//1]]\n std_dev = Nx.exp(0.5 * log_var)\n eps = Nx.random_normal(std_dev)\n sample = mu + std_dev * eps\n Nx.stack([sample, mu, std_dev], axis: 1)\n end\n\n defp encoder_partial() do\n Axon.input(\"image\", shape: {nil, 1, 28, 28})\n # This is now 28*28*1 = 784\n |> Axon.flatten()\n # The encoder\n |> Axon.dense(256, activation: :relu, name: \"encoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"encoder_layer_2\")\n |> Axon.dense(64, activation: :relu, name: \"encoder_layer_3\")\n # Bottleneck layer\n |> Axon.dense(@latent_features * 2, name: \"bottleneck_layer\")\n # Split up the mu and logvar\n |> Axon.reshape({:auto, 2, @latent_features})\n |> sampling_layer()\n end\n\n def encoder() do\n encoder_partial()\n # Grab only the sample (ie. the sampled latent)\n |> Axon.nx(fn x -> x[[0..-1//1, 0]] end)\n end\n\n def decoder(input_latent) do\n input_latent\n |> Axon.dense(64, activation: :relu, name: \"decoder_layer_1\")\n |> Axon.dense(128, activation: :relu, name: \"decoder_layer_2\")\n |> Axon.dense(256, activation: :relu, name: \"decoder_layer_3\")\n |> Axon.dense(784, activation: :sigmoid, name: \"decoder_layer_4\")\n |> CustomLayer.scaling_layer()\n # Turn it back into a 28x28 single channel image\n |> Axon.reshape({:auto, 1, 28, 28})\n end\n\n def autoencoder() do\n encoder_partial = encoder_partial()\n encoder = encoder()\n autoencoder = decoder(encoder)\n Axon.container(%{mu_sigma: encoder_partial, reconstruction: autoencoder})\n end\nend\n```\n\nThere's a few interesting things going on here. First, since our model has become more complex, we've used a module to keep it organized. We also built a custom layer to do the sampling and output the sampled latent vector as well as the distribution parameters (mu and sigma).\n\nFinally, we need the distribution itself so we can calculate the KL Divergence in our loss function. To make the model output the distribution parameters (mu and sigma), we use `Axon.container/1` to produce two outputs from our model instead of one. Now, instead of getting a tensor as an output, we'll get a map with the two tensors we need for our loss function.\n\nOur loss function also has to be modified so be the sum of the KL divergence and MSE. Here's our custom loss function:\n\n```elixir\ndefmodule CustomLoss do\n import Nx.Defn\n\n defn loss(y_true, %{reconstruction: reconstruction, mu_sigma: mu_sigma}) do\n mu = mu_sigma[[0..-1//1, 1, 0..-1//1]]\n sigma = mu_sigma[[0..-1//1, 2, 0..-1//1]]\n kld = Nx.sum(-Nx.log(sigma) - 0.5 + Nx.multiply(sigma, sigma) + Nx.multiply(mu, mu))\n kld * 0.1 + Axon.Losses.mean_squared_error(y_true, reconstruction, reduction: :sum)\n end\nend\n```\n\nWith all our pieces ready, we can pretty much use the same training loop as we did earlier. The only modifications needed are to account for the fact that the model outputs a map with two values instead of a single tensor and telling the trainer to use our custom loss.\n\n```elixir\nmodel = Vae.autoencoder()\n\n# A helper function to display the input and output side by side\ncombined_input_output = fn params, image_index ->\n test_image = test_images[[images: image_index]]\n %{reconstruction: reconstructed_image} = Axon.predict(model, params, test_image)\n reconstructed_image = reconstructed_image |> Nx.squeeze(axes: [0])\n Nx.concatenate([test_image, reconstructed_image], axis: :width)\nend\n\nframe = Kino.Frame.new() |> Kino.render()\n\nrender_example_handler = fn state ->\n # state.step_state[:model_state] contains the model params when this event is fired\n params = state.step_state[:model_state]\n image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))\n image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)\n Kino.Frame.render(frame, image)\n Kino.Frame.append(frame, \"Epoch: #{state.epoch}, Iteration: #{state.iteration}\")\n {:continue, state}\nend\n\nparams =\n model\n |> Axon.Loop.trainer(&CustomLoss.loss/2, Polaris.Optimizers.adam(learning_rate: 0.001))\n |> KinoAxon.kino_early_stop()\n |> Axon.Loop.handle(:epoch_completed, render_example_handler)\n |> Axon.Loop.validate(model, test_data)\n |> KinoAxon.plot_losses()\n |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)\n\n:ok\n```\n\nFinally, we can try our interpolation again:\n\n```elixir\nnum_steps = 100\n\n# Get our latents, image at index 0 is our starting point\n# index 1 is where we'll end\nlatents = Axon.predict(Vae.encoder(), params, test_images[[images: 0..1]])\n# Latents is a {2, 10} tensor\n# The step we'll add to our latent to move it towards image[1]\nstep = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)\n# We can make a batch of all our new latents\nnew_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])\n\ndecoder = Axon.input(\"latent\", shape: {nil, 10}) |> Vae.decoder()\n\nreconstructed_images = Axon.predict(decoder, params, new_latents)\n\nreconstructed_images =\n Nx.reshape(\n reconstructed_images,\n Nx.shape(reconstructed_images),\n names: [:images, :channels, :height, :width]\n )\n\nStream.interval(div(5000, num_steps))\n|> Stream.take(num_steps + 1)\n|> Kino.animate(fn i ->\n Data.image_to_kino(reconstructed_images[i])\nend)\n```\n\nDid you notice the difference? Every step in our interpolation looks similar to items in our dataset! This is the benefit of the VAE: we can generate new items by using random latents. In contrast, in the simple autoencoder, for the most part only latents we got from our encoder were likely to produce sensible outputs.","ref":"fashionmnist_vae.html#making-it-variational","title":"Making it variational - A Variational Autoencoder for MNIST","type":"extras"}]} \ No newline at end of file diff --git a/dist/sidebar_items-B66D7C0E.js b/dist/sidebar_items-B66D7C0E.js deleted file mode 100644 index b287669c..00000000 --- a/dist/sidebar_items-B66D7C0E.js +++ /dev/null @@ -1 +0,0 @@ -sidebarNodes={"extras":[{"group":"","headers":[{"anchor":"modules","id":"Modules"}],"id":"api-reference","title":"API Reference"},{"group":"","headers":[{"anchor":"model-creation","id":"Model Creation"},{"anchor":"model-execution","id":"Model Execution"},{"anchor":"training-and-evaluation","id":"Training and Evaluation"},{"anchor":"serialization","id":"Serialization"}],"id":"guides","title":"Axon Guides"},{"group":"Guides: Model Creation","headers":[{"anchor":"your-first-model","id":"Your first model"}],"id":"your_first_axon_model","title":"Your first Axon model"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-a-sequential-model","id":"Creating a sequential model"}],"id":"sequential_models","title":"Sequential models"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-more-complex-models","id":"Creating more complex models"}],"id":"complex_models","title":"Complex models"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-multi-input-models","id":"Creating multi-input models"},{"anchor":"creating-multi-output-models","id":"Creating multi-output models"}],"id":"multi_input_multi_output_models","title":"Multi-input / multi-output models"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-custom-layers","id":"Creating custom layers"}],"id":"custom_layers","title":"Custom layers"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-models-with-hooks","id":"Creating models with hooks"}],"id":"model_hooks","title":"Model hooks"},{"group":"Guides: Model Execution","headers":[{"anchor":"using-nx-backends-in-axon","id":"Using Nx Backends in Axon"},{"anchor":"using-nx-compilers-in-axon","id":"Using Nx Compilers in Axon"},{"anchor":"a-note-on-cpus-gpus-tpus","id":"A Note on CPUs/GPUs/TPUs"}],"id":"accelerating_axon","title":"Accelerating Axon"},{"group":"Guides: Model Execution","headers":[{"anchor":"executing-models-in-inference-mode","id":"Executing models in inference mode"},{"anchor":"executing-models-in-training-mode","id":"Executing models in training mode"}],"id":"training_and_inference_mode","title":"Training and inference mode"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"creating-an-axon-training-loop","id":"Creating an Axon training loop"}],"id":"your_first_training_loop","title":"Your first training loop"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"adding-metrics-to-training-loops","id":"Adding metrics to training loops"}],"id":"instrumenting_loops_with_metrics","title":"Instrumenting loops with metrics"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"creating-an-axon-evaluation-loop","id":"Creating an Axon evaluation loop"}],"id":"your_first_evaluation_loop","title":"Your first evaluation loop"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"adding-event-handlers-to-training-loops","id":"Adding event handlers to training loops"}],"id":"using_loop_event_handlers","title":"Using loop event handlers"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"using-custom-models-in-training-loops","id":"Using custom models in training loops"},{"anchor":"using-custom-loss-functions-in-training-loops","id":"Using custom loss functions in training loops"},{"anchor":"using-custom-optimizers-in-training-loops","id":"Using custom optimizers in training loops"}],"id":"custom_models_loss_optimizers","title":"Custom models, loss functions, and optimizers"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"writing-custom-metrics","id":"Writing custom metrics"}],"id":"writing_custom_metrics","title":"Writing custom metrics"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"writing-custom-event-handlers","id":"Writing custom event handlers"}],"id":"writing_custom_event_handlers","title":"Writing custom event handlers"},{"group":"Guides: Serialization","headers":[{"anchor":"converting-an-onnx-model-into-axon","id":"Converting an ONNX model into Axon"},{"anchor":"onnx-model","id":"ONNX model"},{"anchor":"inference-on-onnx-derived-models","id":"Inference on ONNX derived models"}],"id":"onnx_to_axon","title":"Converting ONNX models to Axon"},{"group":"Examples: Basics","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"the-model","id":"The model"},{"anchor":"training-data","id":"Training data"},{"anchor":"training","id":"Training"},{"anchor":"trying-the-model","id":"Trying the model"},{"anchor":"visualizing-the-model-predictions","id":"Visualizing the model predictions"}],"id":"xor","title":"Modeling XOR with a neural network"},{"group":"Examples: Vision","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"retrieving-and-exploring-the-dataset","id":"Retrieving and exploring the dataset"},{"anchor":"defining-the-model","id":"Defining the model"},{"anchor":"training","id":"Training"},{"anchor":"prediction","id":"Prediction"}],"id":"mnist","title":"Classifying handwritten digits"},{"group":"Examples: Vision","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"loading-the-data","id":"Loading the data"},{"anchor":"a-look-at-the-data","id":"A look at the data"},{"anchor":"data-processing","id":"Data processing"},{"anchor":"building-the-model","id":"Building the model"},{"anchor":"training-the-model","id":"Training the model"},{"anchor":"extra-gradient-centralization","id":"Extra: gradient centralization"},{"anchor":"inference","id":"Inference"}],"id":"horses_or_humans","title":"Classifying horses and humans"},{"group":"Examples: Text","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"preparation","id":"Preparation"},{"anchor":"defining-the-model","id":"Defining the Model"},{"anchor":"training-the-network","id":"Training the network"},{"anchor":"generating-text","id":"Generating text"},{"anchor":"multi-lstm-layers","id":"Multi LSTM layers"},{"anchor":"generate-text-with-the-new-network","id":"Generate text with the new network"},{"anchor":"references","id":"References"}],"id":"lstm_generation","title":"Generating text with LSTM"},{"group":"Examples: Structured","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"data-processing","id":"Data processing"},{"anchor":"building-the-model","id":"Building the model"},{"anchor":"training-our-model","id":"Training our model"},{"anchor":"model-evaluation","id":"Model evaluation"}],"id":"credit_card_fraud","title":"Classifying fraudulent transactions"},{"group":"Examples: Generative","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"data-loading","id":"Data loading"},{"anchor":"building-the-model","id":"Building the model"},{"anchor":"evaluation","id":"Evaluation"},{"anchor":"a-better-training-loop","id":"A better training loop"}],"id":"mnist_autoencoder_using_kino","title":"MNIST Denoising Autoencoder using Kino for visualization"},{"group":"Examples: Generative","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"downloading-the-data","id":"Downloading the data"},{"anchor":"encoder-and-decoder","id":"Encoder and decoder"},{"anchor":"training-the-model","id":"Training the model"},{"anchor":"extra-losses","id":"Extra: losses"},{"anchor":"inference","id":"Inference"}],"id":"fashionmnist_autoencoder","title":"Training an Autoencoder on Fashion MNIST"},{"group":"Examples: Generative","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"training-a-simple-autoencoder","id":"Training a simple autoencoder"},{"anchor":"splitting-up-the-model","id":"Splitting up the model"},{"anchor":"making-it-variational","id":"Making it variational"}],"id":"fashionmnist_vae","title":"A Variational Autoencoder for MNIST"}],"modules":[{"deprecated":false,"group":"Model","id":"Axon","nodeGroups":[{"key":"layers-special","name":"Layers: Special","nodes":[{"anchor":"block/2","deprecated":false,"id":"block/2","title":"block(fun, opts \\\\ [])"},{"anchor":"constant/2","deprecated":false,"id":"constant/2","title":"constant(tensor, opts \\\\ [])"},{"anchor":"container/2","deprecated":false,"id":"container/2","title":"container(container, opts \\\\ [])"},{"anchor":"input/2","deprecated":false,"id":"input/2","title":"input(name, opts \\\\ [])"},{"anchor":"layer/3","deprecated":false,"id":"layer/3","title":"layer(op, inputs, opts \\\\ [])"},{"anchor":"namespace/2","deprecated":false,"id":"namespace/2","title":"namespace(axon, name)"},{"anchor":"nx/3","deprecated":false,"id":"nx/3","title":"nx(input, fun, opts \\\\ [])"},{"anchor":"optional/2","deprecated":false,"id":"optional/2","title":"optional(x, opts \\\\ [])"},{"anchor":"param/3","deprecated":false,"id":"param/3","title":"param(name, shape, opts \\\\ [])"},{"anchor":"stack_columns/2","deprecated":false,"id":"stack_columns/2","title":"stack_columns(x, opts \\\\ [])"}]},{"key":"layers-activation","name":"Layers: Activation","nodes":[{"anchor":"activation/3","deprecated":false,"id":"activation/3","title":"activation(x, activation, opts \\\\ [])"},{"anchor":"celu/2","deprecated":false,"id":"celu/2","title":"celu(x, opts \\\\ [])"},{"anchor":"elu/2","deprecated":false,"id":"elu/2","title":"elu(x, opts \\\\ [])"},{"anchor":"exp/2","deprecated":false,"id":"exp/2","title":"exp(x, opts \\\\ [])"},{"anchor":"gelu/2","deprecated":false,"id":"gelu/2","title":"gelu(x, opts \\\\ [])"},{"anchor":"hard_sigmoid/2","deprecated":false,"id":"hard_sigmoid/2","title":"hard_sigmoid(x, opts \\\\ [])"},{"anchor":"hard_silu/2","deprecated":false,"id":"hard_silu/2","title":"hard_silu(x, opts \\\\ [])"},{"anchor":"hard_tanh/2","deprecated":false,"id":"hard_tanh/2","title":"hard_tanh(x, opts \\\\ [])"},{"anchor":"leaky_relu/2","deprecated":false,"id":"leaky_relu/2","title":"leaky_relu(x, opts \\\\ [])"},{"anchor":"linear/2","deprecated":false,"id":"linear/2","title":"linear(x, opts \\\\ [])"},{"anchor":"log_sigmoid/2","deprecated":false,"id":"log_sigmoid/2","title":"log_sigmoid(x, opts \\\\ [])"},{"anchor":"log_softmax/2","deprecated":false,"id":"log_softmax/2","title":"log_softmax(x, opts \\\\ [])"},{"anchor":"log_sumexp/2","deprecated":false,"id":"log_sumexp/2","title":"log_sumexp(x, opts \\\\ [])"},{"anchor":"mish/2","deprecated":false,"id":"mish/2","title":"mish(x, opts \\\\ [])"},{"anchor":"relu6/2","deprecated":false,"id":"relu6/2","title":"relu6(x, opts \\\\ [])"},{"anchor":"relu/2","deprecated":false,"id":"relu/2","title":"relu(x, opts \\\\ [])"},{"anchor":"selu/2","deprecated":false,"id":"selu/2","title":"selu(x, opts \\\\ [])"},{"anchor":"sigmoid/2","deprecated":false,"id":"sigmoid/2","title":"sigmoid(x, opts \\\\ [])"},{"anchor":"silu/2","deprecated":false,"id":"silu/2","title":"silu(x, opts \\\\ [])"},{"anchor":"softmax/2","deprecated":false,"id":"softmax/2","title":"softmax(x, opts \\\\ [])"},{"anchor":"softplus/2","deprecated":false,"id":"softplus/2","title":"softplus(x, opts \\\\ [])"},{"anchor":"softsign/2","deprecated":false,"id":"softsign/2","title":"softsign(x, opts \\\\ [])"},{"anchor":"tanh/2","deprecated":false,"id":"tanh/2","title":"tanh(x, opts \\\\ [])"}]},{"key":"layers-linear","name":"Layers: Linear","nodes":[{"anchor":"bias/2","deprecated":false,"id":"bias/2","title":"bias(x, opts \\\\ [])"},{"anchor":"bilinear/4","deprecated":false,"id":"bilinear/4","title":"bilinear(input1, input2, units, opts \\\\ [])"},{"anchor":"dense/3","deprecated":false,"id":"dense/3","title":"dense(x, units, opts \\\\ [])"},{"anchor":"embedding/4","deprecated":false,"id":"embedding/4","title":"embedding(x, vocab_size, embedding_size, opts \\\\ [])"}]},{"key":"layers-convolution","name":"Layers: Convolution","nodes":[{"anchor":"conv/3","deprecated":false,"id":"conv/3","title":"conv(x, units, opts \\\\ [])"},{"anchor":"conv_transpose/3","deprecated":false,"id":"conv_transpose/3","title":"conv_transpose(x, units, opts \\\\ [])"},{"anchor":"depthwise_conv/3","deprecated":false,"id":"depthwise_conv/3","title":"depthwise_conv(x, channel_multiplier, opts \\\\ [])"},{"anchor":"separable_conv2d/3","deprecated":false,"id":"separable_conv2d/3","title":"separable_conv2d(x, channel_multiplier, opts \\\\ [])"},{"anchor":"separable_conv3d/3","deprecated":false,"id":"separable_conv3d/3","title":"separable_conv3d(x, channel_multiplier, opts \\\\ [])"}]},{"key":"layers-dropout","name":"Layers: Dropout","nodes":[{"anchor":"alpha_dropout/2","deprecated":false,"id":"alpha_dropout/2","title":"alpha_dropout(x, opts \\\\ [])"},{"anchor":"dropout/2","deprecated":false,"id":"dropout/2","title":"dropout(x, opts \\\\ [])"},{"anchor":"feature_alpha_dropout/2","deprecated":false,"id":"feature_alpha_dropout/2","title":"feature_alpha_dropout(x, opts \\\\ [])"},{"anchor":"spatial_dropout/2","deprecated":false,"id":"spatial_dropout/2","title":"spatial_dropout(x, opts \\\\ [])"}]},{"key":"layers-pooling","name":"Layers: Pooling","nodes":[{"anchor":"adaptive_avg_pool/2","deprecated":false,"id":"adaptive_avg_pool/2","title":"adaptive_avg_pool(x, opts \\\\ [])"},{"anchor":"adaptive_lp_pool/2","deprecated":false,"id":"adaptive_lp_pool/2","title":"adaptive_lp_pool(x, opts \\\\ [])"},{"anchor":"adaptive_max_pool/2","deprecated":false,"id":"adaptive_max_pool/2","title":"adaptive_max_pool(x, opts \\\\ [])"},{"anchor":"avg_pool/2","deprecated":false,"id":"avg_pool/2","title":"avg_pool(x, opts \\\\ [])"},{"anchor":"global_avg_pool/2","deprecated":false,"id":"global_avg_pool/2","title":"global_avg_pool(x, opts \\\\ [])"},{"anchor":"global_lp_pool/2","deprecated":false,"id":"global_lp_pool/2","title":"global_lp_pool(x, opts \\\\ [])"},{"anchor":"global_max_pool/2","deprecated":false,"id":"global_max_pool/2","title":"global_max_pool(x, opts \\\\ [])"},{"anchor":"lp_pool/2","deprecated":false,"id":"lp_pool/2","title":"lp_pool(x, opts \\\\ [])"},{"anchor":"max_pool/2","deprecated":false,"id":"max_pool/2","title":"max_pool(x, opts \\\\ [])"}]},{"key":"layers-normalization","name":"Layers: Normalization","nodes":[{"anchor":"batch_norm/2","deprecated":false,"id":"batch_norm/2","title":"batch_norm(x, opts \\\\ [])"},{"anchor":"group_norm/3","deprecated":false,"id":"group_norm/3","title":"group_norm(x, num_groups, opts \\\\ [])"},{"anchor":"instance_norm/2","deprecated":false,"id":"instance_norm/2","title":"instance_norm(x, opts \\\\ [])"},{"anchor":"layer_norm/2","deprecated":false,"id":"layer_norm/2","title":"layer_norm(x, opts \\\\ [])"}]},{"key":"layers-recurrent","name":"Layers: Recurrent","nodes":[{"anchor":"conv_lstm/2","deprecated":false,"id":"conv_lstm/2","title":"conv_lstm(x, units)"},{"anchor":"conv_lstm/3","deprecated":false,"id":"conv_lstm/3","title":"conv_lstm(x, units, opts)"},{"anchor":"conv_lstm/4","deprecated":false,"id":"conv_lstm/4","title":"conv_lstm(x, hidden_state, units, opts)"},{"anchor":"gru/2","deprecated":false,"id":"gru/2","title":"gru(x, units)"},{"anchor":"gru/3","deprecated":false,"id":"gru/3","title":"gru(x, units, opts)"},{"anchor":"gru/4","deprecated":false,"id":"gru/4","title":"gru(x, hidden_state, units, opts)"},{"anchor":"lstm/2","deprecated":false,"id":"lstm/2","title":"lstm(x, units)"},{"anchor":"lstm/3","deprecated":false,"id":"lstm/3","title":"lstm(x, units, opts)"},{"anchor":"lstm/4","deprecated":false,"id":"lstm/4","title":"lstm(x, hidden_state, units, opts \\\\ [])"},{"anchor":"mask/3","deprecated":false,"id":"mask/3","title":"mask(input, eos_token, opts \\\\ [])"}]},{"key":"layers-combinators","name":"Layers: Combinators","nodes":[{"anchor":"add/3","deprecated":false,"id":"add/3","title":"add(x, y, opts)"},{"anchor":"concatenate/3","deprecated":false,"id":"concatenate/3","title":"concatenate(x, y, opts)"},{"anchor":"cond/5","deprecated":false,"id":"cond/5","title":"cond(parent, cond_fn, true_graph, false_graph, opts \\\\ [])"},{"anchor":"multiply/3","deprecated":false,"id":"multiply/3","title":"multiply(x, y, opts)"},{"anchor":"split/3","deprecated":false,"id":"split/3","title":"split(parent, splits, opts \\\\ [])"},{"anchor":"subtract/3","deprecated":false,"id":"subtract/3","title":"subtract(x, y, opts)"}]},{"key":"layers-shape","name":"Layers: Shape","nodes":[{"anchor":"flatten/2","deprecated":false,"id":"flatten/2","title":"flatten(x, opts \\\\ [])"},{"anchor":"pad/4","deprecated":false,"id":"pad/4","title":"pad(x, config, value \\\\ 0.0, opts \\\\ [])"},{"anchor":"reshape/3","deprecated":false,"id":"reshape/3","title":"reshape(x, new_shape, opts \\\\ [])"},{"anchor":"resize/3","deprecated":false,"id":"resize/3","title":"resize(x, resize_shape, opts \\\\ [])"},{"anchor":"transpose/3","deprecated":false,"id":"transpose/3","title":"transpose(x, permutation \\\\ nil, opts \\\\ [])"}]},{"key":"model","name":"Model","nodes":[{"anchor":"build/2","deprecated":false,"id":"build/2","title":"build(model, opts \\\\ [])"},{"anchor":"compile/4","deprecated":false,"id":"compile/4","title":"compile(model, template, init_params \\\\ %{}, opts \\\\ [])"},{"anchor":"deserialize/2","deprecated":false,"id":"deserialize/2","title":"deserialize(serialized, opts \\\\ [])"},{"anchor":"freeze/2","deprecated":false,"id":"freeze/2","title":"freeze(model, fun_or_predicate \\\\ :all)"},{"anchor":"predict/4","deprecated":false,"id":"predict/4","title":"predict(model, params, input, opts \\\\ [])"},{"anchor":"serialize/3","deprecated":false,"id":"serialize/3","title":"serialize(axon, params, opts \\\\ [])"},{"anchor":"unfreeze/2","deprecated":false,"id":"unfreeze/2","title":"unfreeze(model, fun_or_predicate \\\\ :all)"}]},{"key":"model-manipulation","name":"Model: Manipulation","nodes":[{"anchor":"get_inputs/1","deprecated":false,"id":"get_inputs/1","title":"get_inputs(axon)"},{"anchor":"get_op_counts/1","deprecated":false,"id":"get_op_counts/1","title":"get_op_counts(axon)"},{"anchor":"get_options/1","deprecated":false,"id":"get_options/1","title":"get_options(axon)"},{"anchor":"get_output_shape/3","deprecated":false,"id":"get_output_shape/3","title":"get_output_shape(axon, inputs, opts \\\\ [])"},{"anchor":"get_parameters/1","deprecated":false,"id":"get_parameters/1","title":"get_parameters(axon)"},{"anchor":"map_nodes/2","deprecated":false,"id":"map_nodes/2","title":"map_nodes(axon, fun)"},{"anchor":"pop_node/1","deprecated":false,"id":"pop_node/1","title":"pop_node(axon)"},{"anchor":"reduce_nodes/3","deprecated":false,"id":"reduce_nodes/3","title":"reduce_nodes(axon, acc, fun)"},{"anchor":"set_options/2","deprecated":false,"id":"set_options/2","title":"set_options(axon, new_opts)"},{"anchor":"set_parameters/2","deprecated":false,"id":"set_parameters/2","title":"set_parameters(axon, new_params)"}]},{"key":"model-debugging","name":"Model: Debugging","nodes":[{"anchor":"attach_hook/3","deprecated":false,"id":"attach_hook/3","title":"attach_hook(x, fun, opts \\\\ [])"},{"anchor":"trace_backward/5","deprecated":false,"id":"trace_backward/5","title":"trace_backward(model, inputs, params, loss, opts \\\\ [])"},{"anchor":"trace_forward/4","deprecated":false,"id":"trace_forward/4","title":"trace_forward(model, inputs, params, opts \\\\ [])"},{"anchor":"trace_init/4","deprecated":false,"id":"trace_init/4","title":"trace_init(model, template, params \\\\ %{}, opts \\\\ [])"}]},{"key":"types","name":"Types","nodes":[{"anchor":"t:t/0","deprecated":false,"id":"t/0","title":"t()"}]},{"key":"functions","name":"Functions","nodes":[{"anchor":"bidirectional/4","deprecated":false,"id":"bidirectional/4","title":"bidirectional(input, forward_fun, merge_fun, opts \\\\ [])"},{"anchor":"blur_pool/2","deprecated":false,"id":"blur_pool/2","title":"blur_pool(x, opts \\\\ [])"}]}],"sections":[{"anchor":"module-model-creation","id":"Model Creation"},{"anchor":"module-model-execution","id":"Model Execution"},{"anchor":"module-model-training","id":"Model Training"},{"anchor":"module-using-with-nx-serving","id":"Using with Nx.Serving"}],"title":"Axon"},{"deprecated":false,"group":"Model","id":"Axon.Initializers","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"full/1","deprecated":false,"id":"full/1","title":"full(value)"},{"anchor":"glorot_normal/1","deprecated":false,"id":"glorot_normal/1","title":"glorot_normal(opts \\\\ [])"},{"anchor":"glorot_uniform/1","deprecated":false,"id":"glorot_uniform/1","title":"glorot_uniform(opts \\\\ [])"},{"anchor":"he_normal/1","deprecated":false,"id":"he_normal/1","title":"he_normal(opts \\\\ [])"},{"anchor":"he_uniform/1","deprecated":false,"id":"he_uniform/1","title":"he_uniform(opts \\\\ [])"},{"anchor":"identity/0","deprecated":false,"id":"identity/0","title":"identity()"},{"anchor":"lecun_normal/1","deprecated":false,"id":"lecun_normal/1","title":"lecun_normal(opts \\\\ [])"},{"anchor":"lecun_uniform/1","deprecated":false,"id":"lecun_uniform/1","title":"lecun_uniform(opts \\\\ [])"},{"anchor":"normal/1","deprecated":false,"id":"normal/1","title":"normal(opts \\\\ [])"},{"anchor":"ones/0","deprecated":false,"id":"ones/0","title":"ones()"},{"anchor":"orthogonal/1","deprecated":false,"id":"orthogonal/1","title":"orthogonal(opts \\\\ [])"},{"anchor":"uniform/1","deprecated":false,"id":"uniform/1","title":"uniform(opts \\\\ [])"},{"anchor":"variance_scaling/1","deprecated":false,"id":"variance_scaling/1","title":"variance_scaling(opts \\\\ [])"},{"anchor":"zeros/0","deprecated":false,"id":"zeros/0","title":"zeros()"}]}],"sections":[],"title":"Axon.Initializers"},{"deprecated":false,"group":"Model","id":"Axon.MixedPrecision","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"cast/3","deprecated":false,"id":"cast/3","title":"cast(policy, tensor_or_container, variable_type)"},{"anchor":"create_policy/1","deprecated":false,"id":"create_policy/1","title":"create_policy(opts \\\\ [])"}]}],"sections":[],"title":"Axon.MixedPrecision"},{"deprecated":false,"group":"Model","id":"Axon.None","sections":[],"title":"Axon.None"},{"deprecated":false,"group":"Model","id":"Axon.StatefulOutput","sections":[],"title":"Axon.StatefulOutput"},{"deprecated":false,"group":"Summary","id":"Axon.Display","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"as_graph/3","deprecated":false,"id":"as_graph/3","title":"as_graph(axon, input_templates, opts \\\\ [])"},{"anchor":"as_table/2","deprecated":false,"id":"as_table/2","title":"as_table(axon, input_templates)"}]}],"sections":[],"title":"Axon.Display"},{"deprecated":false,"group":"Functional","id":"Axon.Activations","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"celu/2","deprecated":false,"id":"celu/2","title":"celu(x, opts \\\\ [])"},{"anchor":"elu/2","deprecated":false,"id":"elu/2","title":"elu(x, opts \\\\ [])"},{"anchor":"exp/1","deprecated":false,"id":"exp/1","title":"exp(x)"},{"anchor":"gelu/1","deprecated":false,"id":"gelu/1","title":"gelu(x)"},{"anchor":"hard_sigmoid/2","deprecated":false,"id":"hard_sigmoid/2","title":"hard_sigmoid(x, opts \\\\ [])"},{"anchor":"hard_silu/2","deprecated":false,"id":"hard_silu/2","title":"hard_silu(x, opts \\\\ [])"},{"anchor":"hard_tanh/1","deprecated":false,"id":"hard_tanh/1","title":"hard_tanh(x)"},{"anchor":"leaky_relu/2","deprecated":false,"id":"leaky_relu/2","title":"leaky_relu(x, opts \\\\ [])"},{"anchor":"linear/1","deprecated":false,"id":"linear/1","title":"linear(x)"},{"anchor":"log_sigmoid/1","deprecated":false,"id":"log_sigmoid/1","title":"log_sigmoid(x)"},{"anchor":"log_softmax/2","deprecated":false,"id":"log_softmax/2","title":"log_softmax(x, opts \\\\ [])"},{"anchor":"log_sumexp/2","deprecated":false,"id":"log_sumexp/2","title":"log_sumexp(x, opts \\\\ [])"},{"anchor":"mish/1","deprecated":false,"id":"mish/1","title":"mish(x)"},{"anchor":"relu6/1","deprecated":false,"id":"relu6/1","title":"relu6(x)"},{"anchor":"relu/1","deprecated":false,"id":"relu/1","title":"relu(x)"},{"anchor":"selu/2","deprecated":false,"id":"selu/2","title":"selu(x, opts \\\\ [])"},{"anchor":"sigmoid/1","deprecated":false,"id":"sigmoid/1","title":"sigmoid(x)"},{"anchor":"silu/1","deprecated":false,"id":"silu/1","title":"silu(x)"},{"anchor":"softmax/2","deprecated":false,"id":"softmax/2","title":"softmax(x, opts \\\\ [])"},{"anchor":"softplus/1","deprecated":false,"id":"softplus/1","title":"softplus(x)"},{"anchor":"softsign/1","deprecated":false,"id":"softsign/1","title":"softsign(x)"},{"anchor":"tanh/1","deprecated":false,"id":"tanh/1","title":"tanh(x)"}]}],"sections":[],"title":"Axon.Activations"},{"deprecated":false,"group":"Functional","id":"Axon.Layers","nodeGroups":[{"key":"layers-linear","name":"Layers: Linear","nodes":[{"anchor":"bilinear/5","deprecated":false,"id":"bilinear/5","title":"bilinear(input1, input2, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"dense/4","deprecated":false,"id":"dense/4","title":"dense(input, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"embedding/3","deprecated":false,"id":"embedding/3","title":"embedding(input, kernel, arg3 \\\\ [])"}]},{"key":"layers-dropout","name":"Layers: Dropout","nodes":[{"anchor":"alpha_dropout/3","deprecated":false,"id":"alpha_dropout/3","title":"alpha_dropout(input, key, opts \\\\ [])"},{"anchor":"dropout/3","deprecated":false,"id":"dropout/3","title":"dropout(input, key, opts \\\\ [])"},{"anchor":"feature_alpha_dropout/3","deprecated":false,"id":"feature_alpha_dropout/3","title":"feature_alpha_dropout(input, key, opts \\\\ [])"},{"anchor":"spatial_dropout/3","deprecated":false,"id":"spatial_dropout/3","title":"spatial_dropout(input, key, opts \\\\ [])"}]},{"key":"layers-pooling","name":"Layers: Pooling","nodes":[{"anchor":"adaptive_avg_pool/2","deprecated":false,"id":"adaptive_avg_pool/2","title":"adaptive_avg_pool(input, opts \\\\ [])"},{"anchor":"adaptive_lp_pool/2","deprecated":false,"id":"adaptive_lp_pool/2","title":"adaptive_lp_pool(input, opts \\\\ [])"},{"anchor":"adaptive_max_pool/2","deprecated":false,"id":"adaptive_max_pool/2","title":"adaptive_max_pool(input, opts \\\\ [])"},{"anchor":"avg_pool/2","deprecated":false,"id":"avg_pool/2","title":"avg_pool(input, opts \\\\ [])"},{"anchor":"blur_pool/2","deprecated":false,"id":"blur_pool/2","title":"blur_pool(input, opts \\\\ [])"},{"anchor":"global_avg_pool/2","deprecated":false,"id":"global_avg_pool/2","title":"global_avg_pool(input, opts \\\\ [])"},{"anchor":"global_lp_pool/2","deprecated":false,"id":"global_lp_pool/2","title":"global_lp_pool(input, opts \\\\ [])"},{"anchor":"global_max_pool/2","deprecated":false,"id":"global_max_pool/2","title":"global_max_pool(input, opts \\\\ [])"},{"anchor":"lp_pool/2","deprecated":false,"id":"lp_pool/2","title":"lp_pool(input, opts \\\\ [])"},{"anchor":"max_pool/2","deprecated":false,"id":"max_pool/2","title":"max_pool(input, opts \\\\ [])"}]},{"key":"layers-normalization","name":"Layers: Normalization","nodes":[{"anchor":"batch_norm/6","deprecated":false,"id":"batch_norm/6","title":"batch_norm(input, gamma, beta, ra_mean, ra_var, opts \\\\ [])"},{"anchor":"group_norm/4","deprecated":false,"id":"group_norm/4","title":"group_norm(input, gamma, beta, opts \\\\ [])"},{"anchor":"instance_norm/6","deprecated":false,"id":"instance_norm/6","title":"instance_norm(input, gamma, beta, ra_mean, ra_var, opts \\\\ [])"},{"anchor":"layer_norm/4","deprecated":false,"id":"layer_norm/4","title":"layer_norm(input, gamma, beta, opts \\\\ [])"}]},{"key":"layers-shape","name":"Layers: Shape","nodes":[{"anchor":"flatten/2","deprecated":false,"id":"flatten/2","title":"flatten(input, opts \\\\ [])"},{"anchor":"resize/2","deprecated":false,"id":"resize/2","title":"resize(input, opts \\\\ [])"}]},{"key":"functions-convolutional","name":"Functions: Convolutional","nodes":[{"anchor":"conv/4","deprecated":false,"id":"conv/4","title":"conv(input, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"conv_transpose/4","deprecated":false,"id":"conv_transpose/4","title":"conv_transpose(input, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"depthwise_conv/4","deprecated":false,"id":"depthwise_conv/4","title":"depthwise_conv(inputs, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"separable_conv2d/6","deprecated":false,"id":"separable_conv2d/6","title":"separable_conv2d(input, k1, b1, k2, b2, opts \\\\ [])"},{"anchor":"separable_conv3d/8","deprecated":false,"id":"separable_conv3d/8","title":"separable_conv3d(input, k1, b1, k2, b2, k3, b3, opts \\\\ [])"}]},{"key":"functions","name":"Functions","nodes":[{"anchor":"celu/2","deprecated":false,"id":"celu/2","title":"celu(input, opts \\\\ [])"},{"anchor":"conv_lstm/7","deprecated":false,"id":"conv_lstm/7","title":"conv_lstm(input, hidden_state, mask, input_kernel, hidden_kernel, bias \\\\ [], opts \\\\ [])"},{"anchor":"conv_lstm_cell/7","deprecated":false,"id":"conv_lstm_cell/7","title":"conv_lstm_cell(input, carry, arg3, ih, hh, bi, opts \\\\ [])"},{"anchor":"dynamic_unroll/7","deprecated":false,"id":"dynamic_unroll/7","title":"dynamic_unroll(cell_fn, input_sequence, carry, mask, input_kernel, recurrent_kernel, bias)"},{"anchor":"elu/2","deprecated":false,"id":"elu/2","title":"elu(input, opts \\\\ [])"},{"anchor":"gru/7","deprecated":false,"id":"gru/7","title":"gru(input, hidden_state, mask, input_kernel, hidden_kernel, bias \\\\ [], opts \\\\ [])"},{"anchor":"gru_cell/8","deprecated":false,"id":"gru_cell/8","title":"gru_cell(input, carry, mask, arg4, arg5, arg6, gate_fn \\\\ &Axon.Activations.sigmoid/1, activation_fn \\\\ &Axon.Activations.tanh/1)"},{"anchor":"hard_sigmoid/2","deprecated":false,"id":"hard_sigmoid/2","title":"hard_sigmoid(input, opts \\\\ [])"},{"anchor":"hard_silu/2","deprecated":false,"id":"hard_silu/2","title":"hard_silu(input, opts \\\\ [])"},{"anchor":"leaky_relu/2","deprecated":false,"id":"leaky_relu/2","title":"leaky_relu(input, opts \\\\ [])"},{"anchor":"log_softmax/2","deprecated":false,"id":"log_softmax/2","title":"log_softmax(input, opts \\\\ [])"},{"anchor":"log_sumexp/2","deprecated":false,"id":"log_sumexp/2","title":"log_sumexp(input, opts \\\\ [])"},{"anchor":"lstm/7","deprecated":false,"id":"lstm/7","title":"lstm(input, hidden_state, mask, input_kernel, hidden_kernel, bias \\\\ [], opts \\\\ [])"},{"anchor":"lstm_cell/8","deprecated":false,"id":"lstm_cell/8","title":"lstm_cell(input, carry, mask, arg4, arg5, arg6, gate_fn \\\\ &Axon.Activations.sigmoid/1, activation_fn \\\\ &Axon.Activations.tanh/1)"},{"anchor":"multiply/2","deprecated":false,"id":"multiply/2","title":"multiply(inputs, opts \\\\ [])"},{"anchor":"padding_config_transform/2","deprecated":false,"id":"padding_config_transform/2","title":"padding_config_transform(config, channels)"},{"anchor":"selu/2","deprecated":false,"id":"selu/2","title":"selu(input, opts \\\\ [])"},{"anchor":"softmax/2","deprecated":false,"id":"softmax/2","title":"softmax(input, opts \\\\ [])"},{"anchor":"static_unroll/7","deprecated":false,"id":"static_unroll/7","title":"static_unroll(cell_fn, input_sequence, carry, mask, input_kernel, recurrent_kernel, bias)"},{"anchor":"subtract/2","deprecated":false,"id":"subtract/2","title":"subtract(inputs, opts \\\\ [])"}]}],"sections":[],"title":"Axon.Layers"},{"deprecated":false,"group":"Functional","id":"Axon.LossScale","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"dynamic/1","deprecated":false,"id":"dynamic/1","title":"dynamic(opts \\\\ [])"},{"anchor":"identity/1","deprecated":false,"id":"identity/1","title":"identity(opts \\\\ [])"},{"anchor":"static/1","deprecated":false,"id":"static/1","title":"static(opts \\\\ [])"}]}],"sections":[],"title":"Axon.LossScale"},{"deprecated":false,"group":"Functional","id":"Axon.Losses","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"apply_label_smoothing/3","deprecated":false,"id":"apply_label_smoothing/3","title":"apply_label_smoothing(y_true, y_pred, opts \\\\ [])"},{"anchor":"binary_cross_entropy/3","deprecated":false,"id":"binary_cross_entropy/3","title":"binary_cross_entropy(y_true, y_pred, opts \\\\ [])"},{"anchor":"categorical_cross_entropy/3","deprecated":false,"id":"categorical_cross_entropy/3","title":"categorical_cross_entropy(y_true, y_pred, opts \\\\ [])"},{"anchor":"categorical_hinge/3","deprecated":false,"id":"categorical_hinge/3","title":"categorical_hinge(y_true, y_pred, opts \\\\ [])"},{"anchor":"connectionist_temporal_classification/3","deprecated":false,"id":"connectionist_temporal_classification/3","title":"connectionist_temporal_classification(arg1, y_pred, opts \\\\ [])"},{"anchor":"cosine_similarity/3","deprecated":false,"id":"cosine_similarity/3","title":"cosine_similarity(y_true, y_pred, opts \\\\ [])"},{"anchor":"hinge/3","deprecated":false,"id":"hinge/3","title":"hinge(y_true, y_pred, opts \\\\ [])"},{"anchor":"huber/3","deprecated":false,"id":"huber/3","title":"huber(y_true, y_pred, opts \\\\ [])"},{"anchor":"kl_divergence/3","deprecated":false,"id":"kl_divergence/3","title":"kl_divergence(y_true, y_pred, opts \\\\ [])"},{"anchor":"label_smoothing/2","deprecated":false,"id":"label_smoothing/2","title":"label_smoothing(loss_fun, opts \\\\ [])"},{"anchor":"log_cosh/3","deprecated":false,"id":"log_cosh/3","title":"log_cosh(y_true, y_pred, opts \\\\ [])"},{"anchor":"margin_ranking/3","deprecated":false,"id":"margin_ranking/3","title":"margin_ranking(y_true, arg2, opts \\\\ [])"},{"anchor":"mean_absolute_error/3","deprecated":false,"id":"mean_absolute_error/3","title":"mean_absolute_error(y_true, y_pred, opts \\\\ [])"},{"anchor":"mean_squared_error/3","deprecated":false,"id":"mean_squared_error/3","title":"mean_squared_error(y_true, y_pred, opts \\\\ [])"},{"anchor":"poisson/3","deprecated":false,"id":"poisson/3","title":"poisson(y_true, y_pred, opts \\\\ [])"},{"anchor":"soft_margin/3","deprecated":false,"id":"soft_margin/3","title":"soft_margin(y_true, y_pred, opts \\\\ [])"}]}],"sections":[],"title":"Axon.Losses"},{"deprecated":false,"group":"Functional","id":"Axon.Metrics","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"accuracy/3","deprecated":false,"id":"accuracy/3","title":"accuracy(y_true, y_pred, opts \\\\ [])"},{"anchor":"accuracy_transform/4","deprecated":false,"id":"accuracy_transform/4","title":"accuracy_transform(y_true, y_pred, from_logits, sparse)"},{"anchor":"false_negatives/3","deprecated":false,"id":"false_negatives/3","title":"false_negatives(y_true, y_pred, opts \\\\ [])"},{"anchor":"false_positives/3","deprecated":false,"id":"false_positives/3","title":"false_positives(y_true, y_pred, opts \\\\ [])"},{"anchor":"mean_absolute_error/2","deprecated":false,"id":"mean_absolute_error/2","title":"mean_absolute_error(y_true, y_pred)"},{"anchor":"precision/3","deprecated":false,"id":"precision/3","title":"precision(y_true, y_pred, opts \\\\ [])"},{"anchor":"recall/3","deprecated":false,"id":"recall/3","title":"recall(y_true, y_pred, opts \\\\ [])"},{"anchor":"running_average/1","deprecated":false,"id":"running_average/1","title":"running_average(metric)"},{"anchor":"running_sum/1","deprecated":false,"id":"running_sum/1","title":"running_sum(metric)"},{"anchor":"sensitivity/3","deprecated":false,"id":"sensitivity/3","title":"sensitivity(y_true, y_pred, opts \\\\ [])"},{"anchor":"specificity/3","deprecated":false,"id":"specificity/3","title":"specificity(y_true, y_pred, opts \\\\ [])"},{"anchor":"top_k_categorical_accuracy/3","deprecated":false,"id":"top_k_categorical_accuracy/3","title":"top_k_categorical_accuracy(y_true, y_pred, opts \\\\ [])"},{"anchor":"true_negatives/3","deprecated":false,"id":"true_negatives/3","title":"true_negatives(y_true, y_pred, opts \\\\ [])"},{"anchor":"true_positives/3","deprecated":false,"id":"true_positives/3","title":"true_positives(y_true, y_pred, opts \\\\ [])"}]}],"sections":[],"title":"Axon.Metrics"},{"deprecated":false,"group":"Loop","id":"Axon.Loop","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"checkpoint/2","deprecated":false,"id":"checkpoint/2","title":"checkpoint(loop, opts \\\\ [])"},{"anchor":"deserialize_state/2","deprecated":false,"id":"deserialize_state/2","title":"deserialize_state(serialized, opts \\\\ [])"},{"anchor":"early_stop/3","deprecated":false,"id":"early_stop/3","title":"early_stop(loop, monitor, opts \\\\ [])"},{"anchor":"eval_step/1","deprecated":false,"id":"eval_step/1","title":"eval_step(model)"},{"anchor":"evaluator/1","deprecated":false,"id":"evaluator/1","title":"evaluator(model)"},{"anchor":"from_state/2","deprecated":false,"id":"from_state/2","title":"from_state(loop, state)"},{"anchor":"handle_event/4","deprecated":false,"id":"handle_event/4","title":"handle_event(loop, event, handler, filter \\\\ :always)"},{"anchor":"kino_vega_lite_plot/4","deprecated":false,"id":"kino_vega_lite_plot/4","title":"kino_vega_lite_plot(loop, plot, metric, opts \\\\ [])"},{"anchor":"log/3","deprecated":false,"id":"log/3","title":"log(loop, message_fn, opts \\\\ [])"},{"anchor":"loop/3","deprecated":false,"id":"loop/3","title":"loop(step_fn, init_fn \\\\ &default_init/2, output_transform \\\\ & &1)"},{"anchor":"metric/5","deprecated":false,"id":"metric/5","title":"metric(loop, metric, name \\\\ nil, accumulate \\\\ :running_average, transform_or_fields \\\\ [:y_true, :y_pred])"},{"anchor":"monitor/5","deprecated":false,"id":"monitor/5","title":"monitor(loop, metric, fun, name, opts \\\\ [])"},{"anchor":"reduce_lr_on_plateau/3","deprecated":false,"id":"reduce_lr_on_plateau/3","title":"reduce_lr_on_plateau(loop, monitor, opts \\\\ [])"},{"anchor":"run/4","deprecated":false,"id":"run/4","title":"run(loop, data, init_state \\\\ %{}, opts \\\\ [])"},{"anchor":"serialize_state/2","deprecated":false,"id":"serialize_state/2","title":"serialize_state(state, opts \\\\ [])"},{"anchor":"train_step/4","deprecated":false,"id":"train_step/4","title":"train_step(model, loss, optimizer, opts \\\\ [])"},{"anchor":"trainer/4","deprecated":false,"id":"trainer/4","title":"trainer(model, loss, optimizer, opts \\\\ [])"},{"anchor":"validate/4","deprecated":false,"id":"validate/4","title":"validate(loop, model, validation_data, opts \\\\ [])"}]}],"sections":[{"anchor":"module-initialize-and-step","id":"Initialize and Step"},{"anchor":"module-metrics","id":"Metrics"},{"anchor":"module-events-and-handlers","id":"Events and Handlers"},{"anchor":"module-factories","id":"Factories"},{"anchor":"module-running-loops","id":"Running loops"},{"anchor":"module-resuming-loops","id":"Resuming loops"}],"title":"Axon.Loop"},{"deprecated":false,"group":"Loop","id":"Axon.Loop.State","sections":[],"title":"Axon.Loop.State"},{"deprecated":false,"group":"Exceptions","id":"Axon.CompileError","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"message/1","deprecated":false,"id":"message/1","title":"message(exception)"}]}],"sections":[],"title":"Axon.CompileError"}],"tasks":[]} \ No newline at end of file diff --git a/dist/sidebar_items-D4AB84D3.js b/dist/sidebar_items-D4AB84D3.js new file mode 100644 index 00000000..7d122b53 --- /dev/null +++ b/dist/sidebar_items-D4AB84D3.js @@ -0,0 +1 @@ +sidebarNodes={"extras":[{"group":"","headers":[{"anchor":"modules","id":"Modules"}],"id":"api-reference","title":"API Reference"},{"group":"","headers":[{"anchor":"model-creation","id":"Model Creation"},{"anchor":"model-execution","id":"Model Execution"},{"anchor":"training-and-evaluation","id":"Training and Evaluation"},{"anchor":"serialization","id":"Serialization"}],"id":"guides","title":"Axon Guides"},{"group":"Guides: Model Creation","headers":[{"anchor":"your-first-model","id":"Your first model"}],"id":"your_first_axon_model","title":"Your first Axon model"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-a-sequential-model","id":"Creating a sequential model"}],"id":"sequential_models","title":"Sequential models"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-more-complex-models","id":"Creating more complex models"}],"id":"complex_models","title":"Complex models"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-multi-input-models","id":"Creating multi-input models"},{"anchor":"creating-multi-output-models","id":"Creating multi-output models"}],"id":"multi_input_multi_output_models","title":"Multi-input / multi-output models"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-custom-layers","id":"Creating custom layers"}],"id":"custom_layers","title":"Custom layers"},{"group":"Guides: Model Creation","headers":[{"anchor":"creating-models-with-hooks","id":"Creating models with hooks"}],"id":"model_hooks","title":"Model hooks"},{"group":"Guides: Model Execution","headers":[{"anchor":"using-nx-backends-in-axon","id":"Using Nx Backends in Axon"},{"anchor":"using-nx-compilers-in-axon","id":"Using Nx Compilers in Axon"},{"anchor":"a-note-on-cpus-gpus-tpus","id":"A Note on CPUs/GPUs/TPUs"}],"id":"accelerating_axon","title":"Accelerating Axon"},{"group":"Guides: Model Execution","headers":[{"anchor":"executing-models-in-inference-mode","id":"Executing models in inference mode"},{"anchor":"executing-models-in-training-mode","id":"Executing models in training mode"}],"id":"training_and_inference_mode","title":"Training and inference mode"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"creating-an-axon-training-loop","id":"Creating an Axon training loop"}],"id":"your_first_training_loop","title":"Your first training loop"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"adding-metrics-to-training-loops","id":"Adding metrics to training loops"}],"id":"instrumenting_loops_with_metrics","title":"Instrumenting loops with metrics"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"creating-an-axon-evaluation-loop","id":"Creating an Axon evaluation loop"}],"id":"your_first_evaluation_loop","title":"Your first evaluation loop"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"adding-event-handlers-to-training-loops","id":"Adding event handlers to training loops"}],"id":"using_loop_event_handlers","title":"Using loop event handlers"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"using-custom-models-in-training-loops","id":"Using custom models in training loops"},{"anchor":"using-custom-loss-functions-in-training-loops","id":"Using custom loss functions in training loops"},{"anchor":"using-custom-optimizers-in-training-loops","id":"Using custom optimizers in training loops"}],"id":"custom_models_loss_optimizers","title":"Custom models, loss functions, and optimizers"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"writing-custom-metrics","id":"Writing custom metrics"}],"id":"writing_custom_metrics","title":"Writing custom metrics"},{"group":"Guides: Training and Evaluation","headers":[{"anchor":"writing-custom-event-handlers","id":"Writing custom event handlers"}],"id":"writing_custom_event_handlers","title":"Writing custom event handlers"},{"group":"Guides: Serialization","headers":[{"anchor":"converting-an-onnx-model-into-axon","id":"Converting an ONNX model into Axon"},{"anchor":"onnx-model","id":"ONNX model"},{"anchor":"inference-on-onnx-derived-models","id":"Inference on ONNX derived models"}],"id":"onnx_to_axon","title":"Converting ONNX models to Axon"},{"group":"Examples: Basics","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"the-model","id":"The model"},{"anchor":"training-data","id":"Training data"},{"anchor":"training","id":"Training"},{"anchor":"trying-the-model","id":"Trying the model"},{"anchor":"visualizing-the-model-predictions","id":"Visualizing the model predictions"}],"id":"xor","title":"Modeling XOR with a neural network"},{"group":"Examples: Vision","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"retrieving-and-exploring-the-dataset","id":"Retrieving and exploring the dataset"},{"anchor":"defining-the-model","id":"Defining the model"},{"anchor":"training","id":"Training"},{"anchor":"prediction","id":"Prediction"}],"id":"mnist","title":"Classifying handwritten digits"},{"group":"Examples: Vision","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"loading-the-data","id":"Loading the data"},{"anchor":"a-look-at-the-data","id":"A look at the data"},{"anchor":"data-processing","id":"Data processing"},{"anchor":"building-the-model","id":"Building the model"},{"anchor":"training-the-model","id":"Training the model"},{"anchor":"extra-gradient-centralization","id":"Extra: gradient centralization"},{"anchor":"inference","id":"Inference"}],"id":"horses_or_humans","title":"Classifying horses and humans"},{"group":"Examples: Text","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"preparation","id":"Preparation"},{"anchor":"defining-the-model","id":"Defining the Model"},{"anchor":"training-the-network","id":"Training the network"},{"anchor":"generating-text","id":"Generating text"},{"anchor":"multi-lstm-layers","id":"Multi LSTM layers"},{"anchor":"generate-text-with-the-new-network","id":"Generate text with the new network"},{"anchor":"references","id":"References"}],"id":"lstm_generation","title":"Generating text with LSTM"},{"group":"Examples: Structured","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"data-processing","id":"Data processing"},{"anchor":"building-the-model","id":"Building the model"},{"anchor":"training-our-model","id":"Training our model"},{"anchor":"model-evaluation","id":"Model evaluation"}],"id":"credit_card_fraud","title":"Classifying fraudulent transactions"},{"group":"Examples: Generative","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"data-loading","id":"Data loading"},{"anchor":"building-the-model","id":"Building the model"},{"anchor":"evaluation","id":"Evaluation"},{"anchor":"a-better-training-loop","id":"A better training loop"}],"id":"mnist_autoencoder_using_kino","title":"MNIST Denoising Autoencoder using Kino for visualization"},{"group":"Examples: Generative","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"downloading-the-data","id":"Downloading the data"},{"anchor":"encoder-and-decoder","id":"Encoder and decoder"},{"anchor":"training-the-model","id":"Training the model"},{"anchor":"extra-losses","id":"Extra: losses"},{"anchor":"inference","id":"Inference"}],"id":"fashionmnist_autoencoder","title":"Training an Autoencoder on Fashion MNIST"},{"group":"Examples: Generative","headers":[{"anchor":"introduction","id":"Introduction"},{"anchor":"training-a-simple-autoencoder","id":"Training a simple autoencoder"},{"anchor":"splitting-up-the-model","id":"Splitting up the model"},{"anchor":"making-it-variational","id":"Making it variational"}],"id":"fashionmnist_vae","title":"A Variational Autoencoder for MNIST"}],"modules":[{"deprecated":false,"group":"","id":"Axon.ModelState","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"empty/0","deprecated":false,"id":"empty/0","title":"empty()"},{"anchor":"freeze/2","deprecated":false,"id":"freeze/2","title":"freeze(model_state, mask \\\\ fn _ -> true end)"},{"anchor":"frozen_parameters/1","deprecated":false,"id":"frozen_parameters/1","title":"frozen_parameters(model_state)"},{"anchor":"frozen_state/1","deprecated":false,"id":"frozen_state/1","title":"frozen_state(model_state)"},{"anchor":"new/1","deprecated":false,"id":"new/1","title":"new(data)"},{"anchor":"trainable_parameters/1","deprecated":false,"id":"trainable_parameters/1","title":"trainable_parameters(model_state)"},{"anchor":"trainable_state/1","deprecated":false,"id":"trainable_state/1","title":"trainable_state(model_state)"},{"anchor":"unfreeze/2","deprecated":false,"id":"unfreeze/2","title":"unfreeze(model_state, mask \\\\ fn _ -> true end)"},{"anchor":"update/3","deprecated":false,"id":"update/3","title":"update(model_state, updated_parameters, updated_state \\\\ %{})"}]}],"sections":[],"title":"Axon.ModelState"},{"deprecated":false,"group":"Model","id":"Axon","nodeGroups":[{"key":"layers-special","name":"Layers: Special","nodes":[{"anchor":"block/2","deprecated":false,"id":"block/2","title":"block(fun, opts \\\\ [])"},{"anchor":"constant/2","deprecated":false,"id":"constant/2","title":"constant(tensor, opts \\\\ [])"},{"anchor":"container/2","deprecated":false,"id":"container/2","title":"container(container, opts \\\\ [])"},{"anchor":"input/2","deprecated":false,"id":"input/2","title":"input(name, opts \\\\ [])"},{"anchor":"layer/3","deprecated":false,"id":"layer/3","title":"layer(op, inputs, opts \\\\ [])"},{"anchor":"nx/3","deprecated":false,"id":"nx/3","title":"nx(input, fun, opts \\\\ [])"},{"anchor":"optional/2","deprecated":false,"id":"optional/2","title":"optional(x, opts \\\\ [])"},{"anchor":"param/3","deprecated":false,"id":"param/3","title":"param(name, shape, opts \\\\ [])"},{"anchor":"stack_columns/2","deprecated":false,"id":"stack_columns/2","title":"stack_columns(x, opts \\\\ [])"}]},{"key":"layers-activation","name":"Layers: Activation","nodes":[{"anchor":"activation/3","deprecated":false,"id":"activation/3","title":"activation(x, activation, opts \\\\ [])"},{"anchor":"celu/2","deprecated":false,"id":"celu/2","title":"celu(x, opts \\\\ [])"},{"anchor":"elu/2","deprecated":false,"id":"elu/2","title":"elu(x, opts \\\\ [])"},{"anchor":"exp/2","deprecated":false,"id":"exp/2","title":"exp(x, opts \\\\ [])"},{"anchor":"gelu/2","deprecated":false,"id":"gelu/2","title":"gelu(x, opts \\\\ [])"},{"anchor":"hard_sigmoid/2","deprecated":false,"id":"hard_sigmoid/2","title":"hard_sigmoid(x, opts \\\\ [])"},{"anchor":"hard_silu/2","deprecated":false,"id":"hard_silu/2","title":"hard_silu(x, opts \\\\ [])"},{"anchor":"hard_tanh/2","deprecated":false,"id":"hard_tanh/2","title":"hard_tanh(x, opts \\\\ [])"},{"anchor":"leaky_relu/2","deprecated":false,"id":"leaky_relu/2","title":"leaky_relu(x, opts \\\\ [])"},{"anchor":"linear/2","deprecated":false,"id":"linear/2","title":"linear(x, opts \\\\ [])"},{"anchor":"log_sigmoid/2","deprecated":false,"id":"log_sigmoid/2","title":"log_sigmoid(x, opts \\\\ [])"},{"anchor":"log_softmax/2","deprecated":false,"id":"log_softmax/2","title":"log_softmax(x, opts \\\\ [])"},{"anchor":"log_sumexp/2","deprecated":false,"id":"log_sumexp/2","title":"log_sumexp(x, opts \\\\ [])"},{"anchor":"mish/2","deprecated":false,"id":"mish/2","title":"mish(x, opts \\\\ [])"},{"anchor":"relu6/2","deprecated":false,"id":"relu6/2","title":"relu6(x, opts \\\\ [])"},{"anchor":"relu/2","deprecated":false,"id":"relu/2","title":"relu(x, opts \\\\ [])"},{"anchor":"selu/2","deprecated":false,"id":"selu/2","title":"selu(x, opts \\\\ [])"},{"anchor":"sigmoid/2","deprecated":false,"id":"sigmoid/2","title":"sigmoid(x, opts \\\\ [])"},{"anchor":"silu/2","deprecated":false,"id":"silu/2","title":"silu(x, opts \\\\ [])"},{"anchor":"softmax/2","deprecated":false,"id":"softmax/2","title":"softmax(x, opts \\\\ [])"},{"anchor":"softplus/2","deprecated":false,"id":"softplus/2","title":"softplus(x, opts \\\\ [])"},{"anchor":"softsign/2","deprecated":false,"id":"softsign/2","title":"softsign(x, opts \\\\ [])"},{"anchor":"tanh/2","deprecated":false,"id":"tanh/2","title":"tanh(x, opts \\\\ [])"}]},{"key":"layers-linear","name":"Layers: Linear","nodes":[{"anchor":"bias/2","deprecated":false,"id":"bias/2","title":"bias(x, opts \\\\ [])"},{"anchor":"bilinear/4","deprecated":false,"id":"bilinear/4","title":"bilinear(input1, input2, units, opts \\\\ [])"},{"anchor":"dense/3","deprecated":false,"id":"dense/3","title":"dense(x, units, opts \\\\ [])"},{"anchor":"embedding/4","deprecated":false,"id":"embedding/4","title":"embedding(x, vocab_size, embedding_size, opts \\\\ [])"}]},{"key":"layers-convolution","name":"Layers: Convolution","nodes":[{"anchor":"conv/3","deprecated":false,"id":"conv/3","title":"conv(x, units, opts \\\\ [])"},{"anchor":"conv_transpose/3","deprecated":false,"id":"conv_transpose/3","title":"conv_transpose(x, units, opts \\\\ [])"},{"anchor":"depthwise_conv/3","deprecated":false,"id":"depthwise_conv/3","title":"depthwise_conv(x, channel_multiplier, opts \\\\ [])"},{"anchor":"separable_conv2d/3","deprecated":false,"id":"separable_conv2d/3","title":"separable_conv2d(x, channel_multiplier, opts \\\\ [])"},{"anchor":"separable_conv3d/3","deprecated":false,"id":"separable_conv3d/3","title":"separable_conv3d(x, channel_multiplier, opts \\\\ [])"}]},{"key":"layers-dropout","name":"Layers: Dropout","nodes":[{"anchor":"alpha_dropout/2","deprecated":false,"id":"alpha_dropout/2","title":"alpha_dropout(x, opts \\\\ [])"},{"anchor":"dropout/2","deprecated":false,"id":"dropout/2","title":"dropout(x, opts \\\\ [])"},{"anchor":"feature_alpha_dropout/2","deprecated":false,"id":"feature_alpha_dropout/2","title":"feature_alpha_dropout(x, opts \\\\ [])"},{"anchor":"spatial_dropout/2","deprecated":false,"id":"spatial_dropout/2","title":"spatial_dropout(x, opts \\\\ [])"}]},{"key":"layers-pooling","name":"Layers: Pooling","nodes":[{"anchor":"adaptive_avg_pool/2","deprecated":false,"id":"adaptive_avg_pool/2","title":"adaptive_avg_pool(x, opts \\\\ [])"},{"anchor":"adaptive_lp_pool/2","deprecated":false,"id":"adaptive_lp_pool/2","title":"adaptive_lp_pool(x, opts \\\\ [])"},{"anchor":"adaptive_max_pool/2","deprecated":false,"id":"adaptive_max_pool/2","title":"adaptive_max_pool(x, opts \\\\ [])"},{"anchor":"avg_pool/2","deprecated":false,"id":"avg_pool/2","title":"avg_pool(x, opts \\\\ [])"},{"anchor":"global_avg_pool/2","deprecated":false,"id":"global_avg_pool/2","title":"global_avg_pool(x, opts \\\\ [])"},{"anchor":"global_lp_pool/2","deprecated":false,"id":"global_lp_pool/2","title":"global_lp_pool(x, opts \\\\ [])"},{"anchor":"global_max_pool/2","deprecated":false,"id":"global_max_pool/2","title":"global_max_pool(x, opts \\\\ [])"},{"anchor":"lp_pool/2","deprecated":false,"id":"lp_pool/2","title":"lp_pool(x, opts \\\\ [])"},{"anchor":"max_pool/2","deprecated":false,"id":"max_pool/2","title":"max_pool(x, opts \\\\ [])"}]},{"key":"layers-normalization","name":"Layers: Normalization","nodes":[{"anchor":"batch_norm/2","deprecated":false,"id":"batch_norm/2","title":"batch_norm(x, opts \\\\ [])"},{"anchor":"group_norm/3","deprecated":false,"id":"group_norm/3","title":"group_norm(x, num_groups, opts \\\\ [])"},{"anchor":"instance_norm/2","deprecated":false,"id":"instance_norm/2","title":"instance_norm(x, opts \\\\ [])"},{"anchor":"layer_norm/2","deprecated":false,"id":"layer_norm/2","title":"layer_norm(x, opts \\\\ [])"}]},{"key":"layers-recurrent","name":"Layers: Recurrent","nodes":[{"anchor":"conv_lstm/2","deprecated":false,"id":"conv_lstm/2","title":"conv_lstm(x, units)"},{"anchor":"conv_lstm/3","deprecated":false,"id":"conv_lstm/3","title":"conv_lstm(x, units, opts)"},{"anchor":"conv_lstm/4","deprecated":false,"id":"conv_lstm/4","title":"conv_lstm(x, hidden_state, units, opts)"},{"anchor":"gru/2","deprecated":false,"id":"gru/2","title":"gru(x, units)"},{"anchor":"gru/3","deprecated":false,"id":"gru/3","title":"gru(x, units, opts)"},{"anchor":"gru/4","deprecated":false,"id":"gru/4","title":"gru(x, hidden_state, units, opts)"},{"anchor":"lstm/2","deprecated":false,"id":"lstm/2","title":"lstm(x, units)"},{"anchor":"lstm/3","deprecated":false,"id":"lstm/3","title":"lstm(x, units, opts)"},{"anchor":"lstm/4","deprecated":false,"id":"lstm/4","title":"lstm(x, hidden_state, units, opts \\\\ [])"},{"anchor":"mask/3","deprecated":false,"id":"mask/3","title":"mask(input, eos_token, opts \\\\ [])"}]},{"key":"layers-combinators","name":"Layers: Combinators","nodes":[{"anchor":"add/3","deprecated":false,"id":"add/3","title":"add(x, y, opts)"},{"anchor":"concatenate/3","deprecated":false,"id":"concatenate/3","title":"concatenate(x, y, opts)"},{"anchor":"cond/5","deprecated":false,"id":"cond/5","title":"cond(parent, cond_fn, true_graph, false_graph, opts \\\\ [])"},{"anchor":"multiply/3","deprecated":false,"id":"multiply/3","title":"multiply(x, y, opts)"},{"anchor":"split/3","deprecated":false,"id":"split/3","title":"split(parent, splits, opts \\\\ [])"},{"anchor":"subtract/3","deprecated":false,"id":"subtract/3","title":"subtract(x, y, opts)"}]},{"key":"layers-shape","name":"Layers: Shape","nodes":[{"anchor":"flatten/2","deprecated":false,"id":"flatten/2","title":"flatten(x, opts \\\\ [])"},{"anchor":"pad/4","deprecated":false,"id":"pad/4","title":"pad(x, config, value \\\\ 0.0, opts \\\\ [])"},{"anchor":"reshape/3","deprecated":false,"id":"reshape/3","title":"reshape(x, new_shape, opts \\\\ [])"},{"anchor":"resize/3","deprecated":false,"id":"resize/3","title":"resize(x, resize_shape, opts \\\\ [])"},{"anchor":"transpose/3","deprecated":false,"id":"transpose/3","title":"transpose(x, permutation \\\\ nil, opts \\\\ [])"}]},{"key":"model","name":"Model","nodes":[{"anchor":"build/2","deprecated":false,"id":"build/2","title":"build(model, opts \\\\ [])"},{"anchor":"compile/4","deprecated":false,"id":"compile/4","title":"compile(model, template, init_params \\\\ %{}, opts \\\\ [])"},{"anchor":"freeze/2","deprecated":true,"id":"freeze/2","title":"freeze(model, fun_or_predicate \\\\ :all)"},{"anchor":"predict/4","deprecated":false,"id":"predict/4","title":"predict(model, params, input, opts \\\\ [])"},{"anchor":"unfreeze/2","deprecated":true,"id":"unfreeze/2","title":"unfreeze(model, fun_or_predicate \\\\ :all)"}]},{"key":"model-manipulation","name":"Model: Manipulation","nodes":[{"anchor":"get_inputs/1","deprecated":false,"id":"get_inputs/1","title":"get_inputs(axon)"},{"anchor":"get_op_counts/1","deprecated":false,"id":"get_op_counts/1","title":"get_op_counts(axon)"},{"anchor":"get_options/1","deprecated":false,"id":"get_options/1","title":"get_options(axon)"},{"anchor":"get_output_shape/3","deprecated":false,"id":"get_output_shape/3","title":"get_output_shape(axon, inputs, opts \\\\ [])"},{"anchor":"get_parameters/1","deprecated":false,"id":"get_parameters/1","title":"get_parameters(axon)"},{"anchor":"map_nodes/2","deprecated":false,"id":"map_nodes/2","title":"map_nodes(axon, fun)"},{"anchor":"pop_node/1","deprecated":false,"id":"pop_node/1","title":"pop_node(axon)"},{"anchor":"reduce_nodes/3","deprecated":false,"id":"reduce_nodes/3","title":"reduce_nodes(axon, acc, fun)"},{"anchor":"set_options/2","deprecated":false,"id":"set_options/2","title":"set_options(axon, new_opts)"},{"anchor":"set_parameters/2","deprecated":false,"id":"set_parameters/2","title":"set_parameters(axon, new_params)"}]},{"key":"model-debugging","name":"Model: Debugging","nodes":[{"anchor":"attach_hook/3","deprecated":false,"id":"attach_hook/3","title":"attach_hook(x, fun, opts \\\\ [])"},{"anchor":"trace_backward/5","deprecated":false,"id":"trace_backward/5","title":"trace_backward(model, inputs, params, loss, opts \\\\ [])"},{"anchor":"trace_forward/4","deprecated":false,"id":"trace_forward/4","title":"trace_forward(model, inputs, params, opts \\\\ [])"},{"anchor":"trace_init/4","deprecated":false,"id":"trace_init/4","title":"trace_init(model, template, params \\\\ %{}, opts \\\\ [])"}]},{"key":"types","name":"Types","nodes":[{"anchor":"t:t/0","deprecated":false,"id":"t/0","title":"t()"}]},{"key":"functions","name":"Functions","nodes":[{"anchor":"bidirectional/4","deprecated":false,"id":"bidirectional/4","title":"bidirectional(input, forward_fun, merge_fun, opts \\\\ [])"},{"anchor":"blur_pool/2","deprecated":false,"id":"blur_pool/2","title":"blur_pool(x, opts \\\\ [])"}]}],"sections":[{"anchor":"module-model-creation","id":"Model Creation"},{"anchor":"module-model-execution","id":"Model Execution"},{"anchor":"module-model-training","id":"Model Training"},{"anchor":"module-using-with-nx-serving","id":"Using with Nx.Serving"}],"title":"Axon"},{"deprecated":false,"group":"Model","id":"Axon.Initializers","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"full/1","deprecated":false,"id":"full/1","title":"full(value)"},{"anchor":"glorot_normal/1","deprecated":false,"id":"glorot_normal/1","title":"glorot_normal(opts \\\\ [])"},{"anchor":"glorot_uniform/1","deprecated":false,"id":"glorot_uniform/1","title":"glorot_uniform(opts \\\\ [])"},{"anchor":"he_normal/1","deprecated":false,"id":"he_normal/1","title":"he_normal(opts \\\\ [])"},{"anchor":"he_uniform/1","deprecated":false,"id":"he_uniform/1","title":"he_uniform(opts \\\\ [])"},{"anchor":"identity/0","deprecated":false,"id":"identity/0","title":"identity()"},{"anchor":"lecun_normal/1","deprecated":false,"id":"lecun_normal/1","title":"lecun_normal(opts \\\\ [])"},{"anchor":"lecun_uniform/1","deprecated":false,"id":"lecun_uniform/1","title":"lecun_uniform(opts \\\\ [])"},{"anchor":"normal/1","deprecated":false,"id":"normal/1","title":"normal(opts \\\\ [])"},{"anchor":"ones/0","deprecated":false,"id":"ones/0","title":"ones()"},{"anchor":"orthogonal/1","deprecated":false,"id":"orthogonal/1","title":"orthogonal(opts \\\\ [])"},{"anchor":"uniform/1","deprecated":false,"id":"uniform/1","title":"uniform(opts \\\\ [])"},{"anchor":"variance_scaling/1","deprecated":false,"id":"variance_scaling/1","title":"variance_scaling(opts \\\\ [])"},{"anchor":"zeros/0","deprecated":false,"id":"zeros/0","title":"zeros()"}]}],"sections":[],"title":"Axon.Initializers"},{"deprecated":false,"group":"Model","id":"Axon.MixedPrecision","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"cast/3","deprecated":false,"id":"cast/3","title":"cast(policy, tensor_or_container, variable_type)"},{"anchor":"create_policy/1","deprecated":false,"id":"create_policy/1","title":"create_policy(opts \\\\ [])"}]}],"sections":[],"title":"Axon.MixedPrecision"},{"deprecated":false,"group":"Model","id":"Axon.None","sections":[],"title":"Axon.None"},{"deprecated":false,"group":"Model","id":"Axon.StatefulOutput","sections":[],"title":"Axon.StatefulOutput"},{"deprecated":false,"group":"Summary","id":"Axon.Display","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"as_graph/3","deprecated":false,"id":"as_graph/3","title":"as_graph(axon, input_templates, opts \\\\ [])"},{"anchor":"as_table/2","deprecated":false,"id":"as_table/2","title":"as_table(axon, input_templates)"}]}],"sections":[],"title":"Axon.Display"},{"deprecated":false,"group":"Functional","id":"Axon.Activations","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"celu/2","deprecated":false,"id":"celu/2","title":"celu(x, opts \\\\ [])"},{"anchor":"elu/2","deprecated":false,"id":"elu/2","title":"elu(x, opts \\\\ [])"},{"anchor":"exp/1","deprecated":false,"id":"exp/1","title":"exp(x)"},{"anchor":"gelu/1","deprecated":false,"id":"gelu/1","title":"gelu(x)"},{"anchor":"hard_sigmoid/2","deprecated":false,"id":"hard_sigmoid/2","title":"hard_sigmoid(x, opts \\\\ [])"},{"anchor":"hard_silu/2","deprecated":false,"id":"hard_silu/2","title":"hard_silu(x, opts \\\\ [])"},{"anchor":"hard_tanh/1","deprecated":false,"id":"hard_tanh/1","title":"hard_tanh(x)"},{"anchor":"leaky_relu/2","deprecated":false,"id":"leaky_relu/2","title":"leaky_relu(x, opts \\\\ [])"},{"anchor":"linear/1","deprecated":false,"id":"linear/1","title":"linear(x)"},{"anchor":"log_sigmoid/1","deprecated":false,"id":"log_sigmoid/1","title":"log_sigmoid(x)"},{"anchor":"log_softmax/2","deprecated":false,"id":"log_softmax/2","title":"log_softmax(x, opts \\\\ [])"},{"anchor":"log_sumexp/2","deprecated":false,"id":"log_sumexp/2","title":"log_sumexp(x, opts \\\\ [])"},{"anchor":"mish/1","deprecated":false,"id":"mish/1","title":"mish(x)"},{"anchor":"relu6/1","deprecated":false,"id":"relu6/1","title":"relu6(x)"},{"anchor":"relu/1","deprecated":false,"id":"relu/1","title":"relu(x)"},{"anchor":"selu/2","deprecated":false,"id":"selu/2","title":"selu(x, opts \\\\ [])"},{"anchor":"sigmoid/1","deprecated":false,"id":"sigmoid/1","title":"sigmoid(x)"},{"anchor":"silu/1","deprecated":false,"id":"silu/1","title":"silu(x)"},{"anchor":"softmax/2","deprecated":false,"id":"softmax/2","title":"softmax(x, opts \\\\ [])"},{"anchor":"softplus/1","deprecated":false,"id":"softplus/1","title":"softplus(x)"},{"anchor":"softsign/1","deprecated":false,"id":"softsign/1","title":"softsign(x)"},{"anchor":"tanh/1","deprecated":false,"id":"tanh/1","title":"tanh(x)"}]}],"sections":[],"title":"Axon.Activations"},{"deprecated":false,"group":"Functional","id":"Axon.Layers","nodeGroups":[{"key":"layers-linear","name":"Layers: Linear","nodes":[{"anchor":"bilinear/5","deprecated":false,"id":"bilinear/5","title":"bilinear(input1, input2, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"dense/4","deprecated":false,"id":"dense/4","title":"dense(input, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"embedding/3","deprecated":false,"id":"embedding/3","title":"embedding(input, kernel, arg3 \\\\ [])"}]},{"key":"layers-dropout","name":"Layers: Dropout","nodes":[{"anchor":"alpha_dropout/3","deprecated":false,"id":"alpha_dropout/3","title":"alpha_dropout(input, key, opts \\\\ [])"},{"anchor":"dropout/3","deprecated":false,"id":"dropout/3","title":"dropout(input, key, opts \\\\ [])"},{"anchor":"feature_alpha_dropout/3","deprecated":false,"id":"feature_alpha_dropout/3","title":"feature_alpha_dropout(input, key, opts \\\\ [])"},{"anchor":"spatial_dropout/3","deprecated":false,"id":"spatial_dropout/3","title":"spatial_dropout(input, key, opts \\\\ [])"}]},{"key":"layers-pooling","name":"Layers: Pooling","nodes":[{"anchor":"adaptive_avg_pool/2","deprecated":false,"id":"adaptive_avg_pool/2","title":"adaptive_avg_pool(input, opts \\\\ [])"},{"anchor":"adaptive_lp_pool/2","deprecated":false,"id":"adaptive_lp_pool/2","title":"adaptive_lp_pool(input, opts \\\\ [])"},{"anchor":"adaptive_max_pool/2","deprecated":false,"id":"adaptive_max_pool/2","title":"adaptive_max_pool(input, opts \\\\ [])"},{"anchor":"avg_pool/2","deprecated":false,"id":"avg_pool/2","title":"avg_pool(input, opts \\\\ [])"},{"anchor":"blur_pool/2","deprecated":false,"id":"blur_pool/2","title":"blur_pool(input, opts \\\\ [])"},{"anchor":"global_avg_pool/2","deprecated":false,"id":"global_avg_pool/2","title":"global_avg_pool(input, opts \\\\ [])"},{"anchor":"global_lp_pool/2","deprecated":false,"id":"global_lp_pool/2","title":"global_lp_pool(input, opts \\\\ [])"},{"anchor":"global_max_pool/2","deprecated":false,"id":"global_max_pool/2","title":"global_max_pool(input, opts \\\\ [])"},{"anchor":"lp_pool/2","deprecated":false,"id":"lp_pool/2","title":"lp_pool(input, opts \\\\ [])"},{"anchor":"max_pool/2","deprecated":false,"id":"max_pool/2","title":"max_pool(input, opts \\\\ [])"}]},{"key":"layers-normalization","name":"Layers: Normalization","nodes":[{"anchor":"batch_norm/6","deprecated":false,"id":"batch_norm/6","title":"batch_norm(input, gamma, beta, ra_mean, ra_var, opts \\\\ [])"},{"anchor":"group_norm/4","deprecated":false,"id":"group_norm/4","title":"group_norm(input, gamma, beta, opts \\\\ [])"},{"anchor":"instance_norm/6","deprecated":false,"id":"instance_norm/6","title":"instance_norm(input, gamma, beta, ra_mean, ra_var, opts \\\\ [])"},{"anchor":"layer_norm/4","deprecated":false,"id":"layer_norm/4","title":"layer_norm(input, gamma, beta, opts \\\\ [])"}]},{"key":"layers-shape","name":"Layers: Shape","nodes":[{"anchor":"flatten/2","deprecated":false,"id":"flatten/2","title":"flatten(input, opts \\\\ [])"},{"anchor":"resize/2","deprecated":false,"id":"resize/2","title":"resize(input, opts \\\\ [])"}]},{"key":"functions-convolutional","name":"Functions: Convolutional","nodes":[{"anchor":"conv/4","deprecated":false,"id":"conv/4","title":"conv(input, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"conv_transpose/4","deprecated":false,"id":"conv_transpose/4","title":"conv_transpose(input, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"depthwise_conv/4","deprecated":false,"id":"depthwise_conv/4","title":"depthwise_conv(inputs, kernel, bias \\\\ 0, opts \\\\ [])"},{"anchor":"separable_conv2d/6","deprecated":false,"id":"separable_conv2d/6","title":"separable_conv2d(input, k1, b1, k2, b2, opts \\\\ [])"},{"anchor":"separable_conv3d/8","deprecated":false,"id":"separable_conv3d/8","title":"separable_conv3d(input, k1, b1, k2, b2, k3, b3, opts \\\\ [])"}]},{"key":"functions","name":"Functions","nodes":[{"anchor":"celu/2","deprecated":false,"id":"celu/2","title":"celu(input, opts \\\\ [])"},{"anchor":"conv_lstm/7","deprecated":false,"id":"conv_lstm/7","title":"conv_lstm(input, hidden_state, mask, input_kernel, hidden_kernel, bias \\\\ [], opts \\\\ [])"},{"anchor":"conv_lstm_cell/7","deprecated":false,"id":"conv_lstm_cell/7","title":"conv_lstm_cell(input, carry, arg3, ih, hh, bi, opts \\\\ [])"},{"anchor":"dynamic_unroll/7","deprecated":false,"id":"dynamic_unroll/7","title":"dynamic_unroll(cell_fn, input_sequence, carry, mask, input_kernel, recurrent_kernel, bias)"},{"anchor":"elu/2","deprecated":false,"id":"elu/2","title":"elu(input, opts \\\\ [])"},{"anchor":"gru/7","deprecated":false,"id":"gru/7","title":"gru(input, hidden_state, mask, input_kernel, hidden_kernel, bias \\\\ [], opts \\\\ [])"},{"anchor":"gru_cell/8","deprecated":false,"id":"gru_cell/8","title":"gru_cell(input, carry, mask, arg4, arg5, arg6, gate_fn \\\\ &Axon.Activations.sigmoid/1, activation_fn \\\\ &Axon.Activations.tanh/1)"},{"anchor":"hard_sigmoid/2","deprecated":false,"id":"hard_sigmoid/2","title":"hard_sigmoid(input, opts \\\\ [])"},{"anchor":"hard_silu/2","deprecated":false,"id":"hard_silu/2","title":"hard_silu(input, opts \\\\ [])"},{"anchor":"leaky_relu/2","deprecated":false,"id":"leaky_relu/2","title":"leaky_relu(input, opts \\\\ [])"},{"anchor":"log_softmax/2","deprecated":false,"id":"log_softmax/2","title":"log_softmax(input, opts \\\\ [])"},{"anchor":"log_sumexp/2","deprecated":false,"id":"log_sumexp/2","title":"log_sumexp(input, opts \\\\ [])"},{"anchor":"lstm/7","deprecated":false,"id":"lstm/7","title":"lstm(input, hidden_state, mask, input_kernel, hidden_kernel, bias \\\\ [], opts \\\\ [])"},{"anchor":"lstm_cell/8","deprecated":false,"id":"lstm_cell/8","title":"lstm_cell(input, carry, mask, arg4, arg5, arg6, gate_fn \\\\ &Axon.Activations.sigmoid/1, activation_fn \\\\ &Axon.Activations.tanh/1)"},{"anchor":"multiply/2","deprecated":false,"id":"multiply/2","title":"multiply(inputs, opts \\\\ [])"},{"anchor":"padding_config_transform/2","deprecated":false,"id":"padding_config_transform/2","title":"padding_config_transform(config, channels)"},{"anchor":"selu/2","deprecated":false,"id":"selu/2","title":"selu(input, opts \\\\ [])"},{"anchor":"softmax/2","deprecated":false,"id":"softmax/2","title":"softmax(input, opts \\\\ [])"},{"anchor":"static_unroll/7","deprecated":false,"id":"static_unroll/7","title":"static_unroll(cell_fn, input_sequence, carry, mask, input_kernel, recurrent_kernel, bias)"},{"anchor":"subtract/2","deprecated":false,"id":"subtract/2","title":"subtract(inputs, opts \\\\ [])"}]}],"sections":[],"title":"Axon.Layers"},{"deprecated":false,"group":"Functional","id":"Axon.LossScale","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"dynamic/1","deprecated":false,"id":"dynamic/1","title":"dynamic(opts \\\\ [])"},{"anchor":"identity/1","deprecated":false,"id":"identity/1","title":"identity(opts \\\\ [])"},{"anchor":"static/1","deprecated":false,"id":"static/1","title":"static(opts \\\\ [])"}]}],"sections":[],"title":"Axon.LossScale"},{"deprecated":false,"group":"Functional","id":"Axon.Losses","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"apply_label_smoothing/3","deprecated":false,"id":"apply_label_smoothing/3","title":"apply_label_smoothing(y_true, y_pred, opts \\\\ [])"},{"anchor":"binary_cross_entropy/3","deprecated":false,"id":"binary_cross_entropy/3","title":"binary_cross_entropy(y_true, y_pred, opts \\\\ [])"},{"anchor":"categorical_cross_entropy/3","deprecated":false,"id":"categorical_cross_entropy/3","title":"categorical_cross_entropy(y_true, y_pred, opts \\\\ [])"},{"anchor":"categorical_hinge/3","deprecated":false,"id":"categorical_hinge/3","title":"categorical_hinge(y_true, y_pred, opts \\\\ [])"},{"anchor":"connectionist_temporal_classification/3","deprecated":false,"id":"connectionist_temporal_classification/3","title":"connectionist_temporal_classification(arg1, y_pred, opts \\\\ [])"},{"anchor":"cosine_similarity/3","deprecated":false,"id":"cosine_similarity/3","title":"cosine_similarity(y_true, y_pred, opts \\\\ [])"},{"anchor":"hinge/3","deprecated":false,"id":"hinge/3","title":"hinge(y_true, y_pred, opts \\\\ [])"},{"anchor":"huber/3","deprecated":false,"id":"huber/3","title":"huber(y_true, y_pred, opts \\\\ [])"},{"anchor":"kl_divergence/3","deprecated":false,"id":"kl_divergence/3","title":"kl_divergence(y_true, y_pred, opts \\\\ [])"},{"anchor":"label_smoothing/2","deprecated":false,"id":"label_smoothing/2","title":"label_smoothing(loss_fun, opts \\\\ [])"},{"anchor":"log_cosh/3","deprecated":false,"id":"log_cosh/3","title":"log_cosh(y_true, y_pred, opts \\\\ [])"},{"anchor":"margin_ranking/3","deprecated":false,"id":"margin_ranking/3","title":"margin_ranking(y_true, arg2, opts \\\\ [])"},{"anchor":"mean_absolute_error/3","deprecated":false,"id":"mean_absolute_error/3","title":"mean_absolute_error(y_true, y_pred, opts \\\\ [])"},{"anchor":"mean_squared_error/3","deprecated":false,"id":"mean_squared_error/3","title":"mean_squared_error(y_true, y_pred, opts \\\\ [])"},{"anchor":"poisson/3","deprecated":false,"id":"poisson/3","title":"poisson(y_true, y_pred, opts \\\\ [])"},{"anchor":"soft_margin/3","deprecated":false,"id":"soft_margin/3","title":"soft_margin(y_true, y_pred, opts \\\\ [])"}]}],"sections":[],"title":"Axon.Losses"},{"deprecated":false,"group":"Functional","id":"Axon.Metrics","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"accuracy/3","deprecated":false,"id":"accuracy/3","title":"accuracy(y_true, y_pred, opts \\\\ [])"},{"anchor":"accuracy_transform/4","deprecated":false,"id":"accuracy_transform/4","title":"accuracy_transform(y_true, y_pred, from_logits, sparse)"},{"anchor":"false_negatives/3","deprecated":false,"id":"false_negatives/3","title":"false_negatives(y_true, y_pred, opts \\\\ [])"},{"anchor":"false_positives/3","deprecated":false,"id":"false_positives/3","title":"false_positives(y_true, y_pred, opts \\\\ [])"},{"anchor":"mean_absolute_error/2","deprecated":false,"id":"mean_absolute_error/2","title":"mean_absolute_error(y_true, y_pred)"},{"anchor":"precision/3","deprecated":false,"id":"precision/3","title":"precision(y_true, y_pred, opts \\\\ [])"},{"anchor":"recall/3","deprecated":false,"id":"recall/3","title":"recall(y_true, y_pred, opts \\\\ [])"},{"anchor":"running_average/1","deprecated":false,"id":"running_average/1","title":"running_average(metric)"},{"anchor":"running_sum/1","deprecated":false,"id":"running_sum/1","title":"running_sum(metric)"},{"anchor":"sensitivity/3","deprecated":false,"id":"sensitivity/3","title":"sensitivity(y_true, y_pred, opts \\\\ [])"},{"anchor":"specificity/3","deprecated":false,"id":"specificity/3","title":"specificity(y_true, y_pred, opts \\\\ [])"},{"anchor":"top_k_categorical_accuracy/3","deprecated":false,"id":"top_k_categorical_accuracy/3","title":"top_k_categorical_accuracy(y_true, y_pred, opts \\\\ [])"},{"anchor":"true_negatives/3","deprecated":false,"id":"true_negatives/3","title":"true_negatives(y_true, y_pred, opts \\\\ [])"},{"anchor":"true_positives/3","deprecated":false,"id":"true_positives/3","title":"true_positives(y_true, y_pred, opts \\\\ [])"}]}],"sections":[],"title":"Axon.Metrics"},{"deprecated":false,"group":"Loop","id":"Axon.Loop","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"checkpoint/2","deprecated":false,"id":"checkpoint/2","title":"checkpoint(loop, opts \\\\ [])"},{"anchor":"deserialize_state/2","deprecated":false,"id":"deserialize_state/2","title":"deserialize_state(serialized, opts \\\\ [])"},{"anchor":"early_stop/3","deprecated":false,"id":"early_stop/3","title":"early_stop(loop, monitor, opts \\\\ [])"},{"anchor":"eval_step/1","deprecated":false,"id":"eval_step/1","title":"eval_step(model)"},{"anchor":"evaluator/1","deprecated":false,"id":"evaluator/1","title":"evaluator(model)"},{"anchor":"from_state/2","deprecated":false,"id":"from_state/2","title":"from_state(loop, state)"},{"anchor":"handle_event/4","deprecated":false,"id":"handle_event/4","title":"handle_event(loop, event, handler, filter \\\\ :always)"},{"anchor":"kino_vega_lite_plot/4","deprecated":false,"id":"kino_vega_lite_plot/4","title":"kino_vega_lite_plot(loop, plot, metric, opts \\\\ [])"},{"anchor":"log/3","deprecated":false,"id":"log/3","title":"log(loop, message_fn, opts \\\\ [])"},{"anchor":"loop/3","deprecated":false,"id":"loop/3","title":"loop(step_fn, init_fn \\\\ &default_init/2, output_transform \\\\ & &1)"},{"anchor":"metric/5","deprecated":false,"id":"metric/5","title":"metric(loop, metric, name \\\\ nil, accumulate \\\\ :running_average, transform_or_fields \\\\ [:y_true, :y_pred])"},{"anchor":"monitor/5","deprecated":false,"id":"monitor/5","title":"monitor(loop, metric, fun, name, opts \\\\ [])"},{"anchor":"reduce_lr_on_plateau/3","deprecated":false,"id":"reduce_lr_on_plateau/3","title":"reduce_lr_on_plateau(loop, monitor, opts \\\\ [])"},{"anchor":"run/4","deprecated":false,"id":"run/4","title":"run(loop, data, init_state \\\\ %{}, opts \\\\ [])"},{"anchor":"serialize_state/2","deprecated":false,"id":"serialize_state/2","title":"serialize_state(state, opts \\\\ [])"},{"anchor":"train_step/4","deprecated":false,"id":"train_step/4","title":"train_step(model, loss, optimizer, opts \\\\ [])"},{"anchor":"trainer/4","deprecated":false,"id":"trainer/4","title":"trainer(model, loss, optimizer, opts \\\\ [])"},{"anchor":"validate/4","deprecated":false,"id":"validate/4","title":"validate(loop, model, validation_data, opts \\\\ [])"}]}],"sections":[{"anchor":"module-initialize-and-step","id":"Initialize and Step"},{"anchor":"module-metrics","id":"Metrics"},{"anchor":"module-events-and-handlers","id":"Events and Handlers"},{"anchor":"module-factories","id":"Factories"},{"anchor":"module-running-loops","id":"Running loops"},{"anchor":"module-resuming-loops","id":"Resuming loops"}],"title":"Axon.Loop"},{"deprecated":false,"group":"Loop","id":"Axon.Loop.State","sections":[],"title":"Axon.Loop.State"},{"deprecated":false,"group":"Exceptions","id":"Axon.CompileError","nodeGroups":[{"key":"functions","name":"Functions","nodes":[{"anchor":"message/1","deprecated":false,"id":"message/1","title":"message(exception)"}]}],"sections":[],"title":"Axon.CompileError"}],"tasks":[]} \ No newline at end of file diff --git a/fashionmnist_autoencoder.html b/fashionmnist_autoencoder.html index 0daf8cc4..b8ce6e6f 100644 --- a/fashionmnist_autoencoder.html +++ b/fashionmnist_autoencoder.html @@ -14,7 +14,7 @@ - + @@ -136,14 +136,14 @@

    -
    Mix.install([
    -  {:axon, "~> 0.3.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:exla, "~> 0.4.0"},
    -  {:scidata, "~> 0.1.9"}
    -])
    +
    Mix.install([
    +  {:axon, "~> 0.3.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:exla, "~> 0.4.0"},
    +  {:scidata, "~> 0.1.9"}
    +])
     
    -Nx.Defn.default_options(compiler: EXLA)

    +Nx.Defn.default_options(compiler: EXLA)

    @@ -156,29 +156,29 @@

    Downloading the data

    -

    To train and test how our model works, we use one of the most popular data sets: Fashion MNIST. It consists of small black and white images of clothes. Loading this data set is very simple with the help of Scidata.

    {image_data, _label_data} = Scidata.FashionMNIST.download()
    -{bin, type, shape} = image_data

    We get the data in a raw format, but this is exactly the information we need to build an Nx tensor.

    train_images =
    +

    To train and test how our model works, we use one of the most popular data sets: Fashion MNIST. It consists of small black and white images of clothes. Loading this data set is very simple with the help of Scidata.

    {image_data, _label_data} = Scidata.FashionMNIST.download()
    +{bin, type, shape} = image_data

    We get the data in a raw format, but this is exactly the information we need to build an Nx tensor.

    train_images =
       bin
    -  |> Nx.from_binary(type)
    -  |> Nx.reshape(shape)
    -  |> Nx.divide(255.0)

    We also normalize pixel values into the range $[0, 1]$.

    We can visualize one of the images by looking at the tensor heatmap:

    Nx.to_heatmap(train_images[1])

    + |> Nx.from_binary(type) + |> Nx.reshape(shape) + |> Nx.divide(255.0)

    We also normalize pixel values into the range $[0, 1]$.

    We can visualize one of the images by looking at the tensor heatmap:

    Nx.to_heatmap(train_images[1])

    Encoder and decoder

    -

    First we need to define the encoder and decoder. Both are one-layer neural networks.

    In the encoder, we start by flattening the input, so we get from shape {batch_size, 1, 28, 28} to {batch_size, 784} and we pass the input into a dense layer. Our dense layer has only latent_dim number of neurons. The latent_dim (or the latent space) is a compressed representation of data. Remember, we want our encoder to compress the input data into a lower-dimensional representation, so we choose a latent_dim which is less than the dimensionality of the input.

    encoder = fn x, latent_dim ->
    +

    First we need to define the encoder and decoder. Both are one-layer neural networks.

    In the encoder, we start by flattening the input, so we get from shape {batch_size, 1, 28, 28} to {batch_size, 784} and we pass the input into a dense layer. Our dense layer has only latent_dim number of neurons. The latent_dim (or the latent space) is a compressed representation of data. Remember, we want our encoder to compress the input data into a lower-dimensional representation, so we choose a latent_dim which is less than the dimensionality of the input.

    encoder = fn x, latent_dim ->
       x
    -  |> Axon.flatten()
    -  |> Axon.dense(latent_dim, activation: :relu)
    -end

    Next, we pass the output of the encoder to the decoder and try to reconstruct the compressed data into its original form. Since our original input had a dimensionality of 784, we use a dense layer with 784 neurons. Because our original data was normalized to have pixel values between 0 and 1, we use a :sigmoid activation in our dense layer to squeeze output values between 0 and 1. Our original input shape was 28x28, so we use Axon.reshape to convert the flattened representation of the outputs into an image with correct the width and height.

    decoder = fn x ->
    +  |> Axon.flatten()
    +  |> Axon.dense(latent_dim, activation: :relu)
    +end

    Next, we pass the output of the encoder to the decoder and try to reconstruct the compressed data into its original form. Since our original input had a dimensionality of 784, we use a dense layer with 784 neurons. Because our original data was normalized to have pixel values between 0 and 1, we use a :sigmoid activation in our dense layer to squeeze output values between 0 and 1. Our original input shape was 28x28, so we use Axon.reshape to convert the flattened representation of the outputs into an image with correct the width and height.

    decoder = fn x ->
       x
    -  |> Axon.dense(784, activation: :sigmoid)
    -  |> Axon.reshape({:batch, 1, 28, 28})
    -end

    If we just bind the encoder and decoder sequentially, we'll get the desired model. This was pretty smooth, wasn't it?

    model =
    -  Axon.input("input", shape: {nil, 1, 28, 28})
    -  |> encoder.(64)
    -  |> decoder.()

    + |> Axon.dense(784, activation: :sigmoid) + |> Axon.reshape({:batch, 1, 28, 28}) +end

    If we just bind the encoder and decoder sequentially, we'll get the desired model. This was pretty smooth, wasn't it?

    model =
    +  Axon.input("input", shape: {nil, 1, 28, 28})
    +  |> encoder.(64)
    +  |> decoder.()

    @@ -187,14 +187,14 @@

    Finally, we can train the model. We'll use the :adam and :mean_squared_error loss with Axon.Loop.trainer. Our loss function will measure the aggregate error between pixels of original images and the model's reconstructed images. We'll also :mean_absolute_error using Axon.Loop.metric. Axon.Loop.run trains the model with the given training data.

    batch_size = 32
     epochs = 5
     
    -batched_images = Nx.to_batched(train_images, batch_size)
    -train_batches = Stream.zip(batched_images, batched_images)
    +batched_images = Nx.to_batched(train_images, batch_size)
    +train_batches = Stream.zip(batched_images, batched_images)
     
     params =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :adam)
    -  |> Axon.Loop.metric(:mean_absolute_error, "Error")
    -  |> Axon.Loop.run(train_batches, %{}, epochs: epochs, compiler: EXLA)

    + |> Axon.Loop.trainer(:mean_squared_error, :adam) + |> Axon.Loop.metric(:mean_absolute_error, "Error") + |> Axon.Loop.run(train_batches, %{}, epochs: epochs, compiler: EXLA)

    @@ -202,46 +202,46 @@

    To better understand what is mean absolute error (MAE) and mean square error (MSE) let's go through an example.

    # Error definitions for a single sample
     
    -mean_square_error = fn y_pred, y ->
    +mean_square_error = fn y_pred, y ->
       y_pred
    -  |> Nx.subtract(y)
    -  |> Nx.power(2)
    -  |> Nx.mean()
    -end
    +  |> Nx.subtract(y)
    +  |> Nx.power(2)
    +  |> Nx.mean()
    +end
     
    -mean_absolute_error = fn y_pred, y ->
    +mean_absolute_error = fn y_pred, y ->
       y_pred
    -  |> Nx.subtract(y)
    -  |> Nx.abs()
    -  |> Nx.mean()
    -end

    We will work with a sample image of a shoe, a slightly noised version of that image, and also an entirely different image from the dataset.

    shoe_image = train_images[0]
    -noised_shoe_image = Nx.add(shoe_image, Nx.random_normal(shoe_image, 0.0, 0.05))
    -other_image = train_images[1]
    -:ok

    For the same image both errors should be 0, because when we have two exact copies, there is no pixel difference.

    {
    -  mean_square_error.(shoe_image, shoe_image),
    -  mean_absolute_error.(shoe_image, shoe_image)
    -}

    Now the noised image:

    {
    -  mean_square_error.(shoe_image, noised_shoe_image),
    -  mean_absolute_error.(shoe_image, noised_shoe_image)
    -}

    And a different image:

    {
    -  mean_square_error.(shoe_image, other_image),
    -  mean_absolute_error.(shoe_image, other_image)
    -}

    As we can see, the noised image has a non-zero MSE and MAE but is much smaller than the error of two completely different pictures. In other words, both of these error types measure the level of similarity between images. A small error implies decent prediction values. On the other hand, a large error value suggests poor quality of predictions.

    If you look at our implementation of MAE and MSE, you will notice that they are very similar. MAE and MSE can also be called the $L_1$ and $L_2$ loss respectively for the $L_1$ and $L_2$ norm. The $L_2$ loss (MSE) is typically preferred because it's a smoother function whereas $L_1$ is often difficult to optimize with stochastic gradient descent (SGD).

    + |> Nx.subtract(y) + |> Nx.abs() + |> Nx.mean() +end

    We will work with a sample image of a shoe, a slightly noised version of that image, and also an entirely different image from the dataset.

    shoe_image = train_images[0]
    +noised_shoe_image = Nx.add(shoe_image, Nx.random_normal(shoe_image, 0.0, 0.05))
    +other_image = train_images[1]
    +:ok

    For the same image both errors should be 0, because when we have two exact copies, there is no pixel difference.

    {
    +  mean_square_error.(shoe_image, shoe_image),
    +  mean_absolute_error.(shoe_image, shoe_image)
    +}

    Now the noised image:

    {
    +  mean_square_error.(shoe_image, noised_shoe_image),
    +  mean_absolute_error.(shoe_image, noised_shoe_image)
    +}

    And a different image:

    {
    +  mean_square_error.(shoe_image, other_image),
    +  mean_absolute_error.(shoe_image, other_image)
    +}

    As we can see, the noised image has a non-zero MSE and MAE but is much smaller than the error of two completely different pictures. In other words, both of these error types measure the level of similarity between images. A small error implies decent prediction values. On the other hand, a large error value suggests poor quality of predictions.

    If you look at our implementation of MAE and MSE, you will notice that they are very similar. MAE and MSE can also be called the $L_1$ and $L_2$ loss respectively for the $L_1$ and $L_2$ norm. The $L_2$ loss (MSE) is typically preferred because it's a smoother function whereas $L_1$ is often difficult to optimize with stochastic gradient descent (SGD).

    Inference

    -

    Now, let's see how our model is doing! We will compare a sample image before and after compression.

    sample_image = train_images[0..0//1]
    -compressed_image = Axon.predict(model, params, sample_image, compiler: EXLA)
    +

    Now, let's see how our model is doing! We will compare a sample image before and after compression.

    sample_image = train_images[0..0//1]
    +compressed_image = Axon.predict(model, params, sample_image, compiler: EXLA)
     
     sample_image
    -|> Nx.to_heatmap()
    -|> IO.inspect(label: "Original")
    +|> Nx.to_heatmap()
    +|> IO.inspect(label: "Original")
     
     compressed_image
    -|> Nx.to_heatmap()
    -|> IO.inspect(label: "Compressed")
    +|> Nx.to_heatmap()
    +|> IO.inspect(label: "Compressed")
     
     :ok

    As we can see, the generated image is similar to the input image. The only difference between them is the absence of a sign in the middle of the second shoe. The model treated the sign as noise and bled this into the plain shoe.

    diff --git a/fashionmnist_vae.html b/fashionmnist_vae.html index 53df1d5c..967c78d4 100644 --- a/fashionmnist_vae.html +++ b/fashionmnist_vae.html @@ -14,7 +14,7 @@ - + @@ -136,23 +136,23 @@

    -
    Mix.install([
    -  {:exla, "~> 0.4.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:axon, "~> 0.3.0"},
    -  {:req, "~> 0.3.1"},
    -  {:kino, "~> 0.7.0"},
    -  {:scidata, "~> 0.1.9"},
    -  {:stb_image, "~> 0.5.2"},
    -  {:kino_vega_lite, "~> 0.1.6"},
    -  {:vega_lite, "~> 0.1.6"},
    -  {:table_rex, "~> 3.1.1"}
    -])
    +
    Mix.install([
    +  {:exla, "~> 0.4.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:axon, "~> 0.3.0"},
    +  {:req, "~> 0.3.1"},
    +  {:kino, "~> 0.7.0"},
    +  {:scidata, "~> 0.1.9"},
    +  {:stb_image, "~> 0.5.2"},
    +  {:kino_vega_lite, "~> 0.1.6"},
    +  {:vega_lite, "~> 0.1.6"},
    +  {:table_rex, "~> 3.1.1"}
    +])
     
     alias VegaLite, as: Vl
     
     # This speeds up all our `Nx` operations without having to use `defn`
    -Nx.global_default_backend(EXLA.Backend)
    +Nx.global_default_backend(EXLA.Backend)
     
     :ok

    @@ -166,7 +166,7 @@

    Training a simple autoencoder

    -

    This section will proceed without much explanation as most of it is extracted from denoising autoencoder example. If anything here doesn't make sense, take a look at that notebook for an explanation.

    defmodule Data do
    +

    This section will proceed without much explanation as most of it is extracted from denoising autoencoder example. If anything here doesn't make sense, take a look at that notebook for an explanation.

    defmodule Data do
       @moduledoc """
       A module to hold useful data processing utilities,
       mostly extracted from the previous notebook
    @@ -178,182 +178,182 @@ 

    `image` must be a single channel `Nx` tensor with pixel values between 0 and 1. `height` and `width` are the output size in pixels """ - def image_to_kino(image, height \\ 200, width \\ 200) do + def image_to_kino(image, height \\ 200, width \\ 200) do image - |> Nx.multiply(255) - |> Nx.as_type(:u8) - |> Nx.transpose(axes: [:height, :width, :channels]) - |> StbImage.from_nx() - |> StbImage.resize(height, width) - |> StbImage.to_binary(:png) - |> Kino.Image.new(:png) - end + |> Nx.multiply(255) + |> Nx.as_type(:u8) + |> Nx.transpose(axes: [:height, :width, :channels]) + |> StbImage.from_nx() + |> StbImage.resize(height, width) + |> StbImage.to_binary(:png) + |> Kino.Image.new(:png) + end @doc """ Converts image data from `Scidata.MNIST` into an `Nx` tensor and normalizes it. """ - def preprocess_data(data) do - {image_data, _labels} = data - {images_binary, type, shape} = image_data + def preprocess_data(data) do + {image_data, _labels} = data + {images_binary, type, shape} = image_data images_binary - |> Nx.from_binary(type) + |> Nx.from_binary(type) # Since pixels are organized row-wise, reshape into rows x columns - |> Nx.reshape(shape, names: [:images, :channels, :height, :width]) + |> Nx.reshape(shape, names: [:images, :channels, :height, :width]) # Normalize the pixel values to be between 0 and 1 - |> Nx.divide(255) - end + |> Nx.divide(255) + end @doc """ Converts a tensor of images into random batches of paired images for model training """ - def prepare_training_data(images, batch_size) do - Stream.flat_map([nil], fn nil -> - images |> Nx.shuffle(axis: :images) |> Nx.to_batched(batch_size) - end) - |> Stream.map(fn batch -> {batch, batch} end) - end -end

    train_images = Data.preprocess_data(Scidata.FashionMNIST.download())
    -test_images = Data.preprocess_data(Scidata.FashionMNIST.download_test())
    -
    -Kino.render(train_images[[images: 0]] |> Data.image_to_kino())
    -Kino.render(test_images[[images: 0]] |> Data.image_to_kino())
    -
    -:ok

    Now for our simple autoencoder model. We won't be using a denoising autoencoder here.

    Note that we're giving each of the layers a name - the reason for this will be apparent later.

    I'm also using a small custom layer to shift and scale the output of the sigmoid layer slightly so it can hit the 0 and 1 targets. I noticed the gradients tend to explode without this.

    defmodule CustomLayer do
    +  def prepare_training_data(images, batch_size) do
    +    Stream.flat_map([nil], fn nil ->
    +      images |> Nx.shuffle(axis: :images) |> Nx.to_batched(batch_size)
    +    end)
    +    |> Stream.map(fn batch -> {batch, batch} end)
    +  end
    +end
    train_images = Data.preprocess_data(Scidata.FashionMNIST.download())
    +test_images = Data.preprocess_data(Scidata.FashionMNIST.download_test())
    +
    +Kino.render(train_images[[images: 0]] |> Data.image_to_kino())
    +Kino.render(test_images[[images: 0]] |> Data.image_to_kino())
    +
    +:ok

    Now for our simple autoencoder model. We won't be using a denoising autoencoder here.

    Note that we're giving each of the layers a name - the reason for this will be apparent later.

    I'm also using a small custom layer to shift and scale the output of the sigmoid layer slightly so it can hit the 0 and 1 targets. I noticed the gradients tend to explode without this.

    defmodule CustomLayer do
       import Nx.Defn
     
    -  def scaling_layer(%Axon{} = input, _opts \\ []) do
    -    Axon.layer(&scaling_layer_impl/2, [input])
    -  end
    +  def scaling_layer(%Axon{} = input, _opts \\ []) do
    +    Axon.layer(&scaling_layer_impl/2, [input])
    +  end
     
    -  defnp scaling_layer_impl(x, _opts \\ []) do
    +  defnp scaling_layer_impl(x, _opts \\ []) do
         x
    -    |> Nx.subtract(0.05)
    -    |> Nx.multiply(1.2)
    -  end
    -end
    model =
    -  Axon.input("image", shape: {nil, 1, 28, 28})
    +    |> Nx.subtract(0.05)
    +    |> Nx.multiply(1.2)
    +  end
    +end
    model =
    +  Axon.input("image", shape: {nil, 1, 28, 28})
       # This is now 28*28*1 = 784
    -  |> Axon.flatten()
    +  |> Axon.flatten()
       # The encoder
    -  |> Axon.dense(256, activation: :relu, name: "encoder_layer_1")
    -  |> Axon.dense(128, activation: :relu, name: "encoder_layer_2")
    -  |> Axon.dense(64, activation: :relu, name: "encoder_layer_3")
    +  |> Axon.dense(256, activation: :relu, name: "encoder_layer_1")
    +  |> Axon.dense(128, activation: :relu, name: "encoder_layer_2")
    +  |> Axon.dense(64, activation: :relu, name: "encoder_layer_3")
       # Bottleneck layer
    -  |> Axon.dense(10, activation: :relu, name: "bottleneck_layer")
    +  |> Axon.dense(10, activation: :relu, name: "bottleneck_layer")
       # The decoder
    -  |> Axon.dense(64, activation: :relu, name: "decoder_layer_1")
    -  |> Axon.dense(128, activation: :relu, name: "decoder_layer_2")
    -  |> Axon.dense(256, activation: :relu, name: "decoder_layer_3")
    -  |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
    -  |> CustomLayer.scaling_layer()
    +  |> Axon.dense(64, activation: :relu, name: "decoder_layer_1")
    +  |> Axon.dense(128, activation: :relu, name: "decoder_layer_2")
    +  |> Axon.dense(256, activation: :relu, name: "decoder_layer_3")
    +  |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
    +  |> CustomLayer.scaling_layer()
       # Turn it back into a 28x28 single channel image
    -  |> Axon.reshape({:auto, 1, 28, 28})
    +  |> Axon.reshape({:auto, 1, 28, 28})
     
     # We can use Axon.Display to show us what each of the layers would look like
     # assuming we send in a batch of 4 images
    -Axon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()
    batch_size = 128
    +Axon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()
    batch_size = 128
     
    -train_data = Data.prepare_training_data(train_images, 128)
    -test_data = Data.prepare_training_data(test_images, 128)
    +train_data = Data.prepare_training_data(train_images, 128)
    +test_data = Data.prepare_training_data(test_images, 128)
     
    -{input_batch, target_batch} = Enum.at(train_data, 0)
    -Kino.render(input_batch[[images: 0]] |> Data.image_to_kino())
    -Kino.render(target_batch[[images: 0]] |> Data.image_to_kino())
    +{input_batch, target_batch} = Enum.at(train_data, 0)
    +Kino.render(input_batch[[images: 0]] |> Data.image_to_kino())
    +Kino.render(target_batch[[images: 0]] |> Data.image_to_kino())
     
    -:ok

    When training, it can be useful to stop execution early - either when you see it's failing and you don't want to waste time waiting for the remaining epochs to finish, or if it's good enough and you want to start experimenting with it.

    The kino_early_stop/1 function below is a handy handler to give us a Kino.Control.button that will stop the training loop when clicked.

    We also have plot_losses/1 function to visualize our train and validation losses using VegaLite.

    defmodule KinoAxon do
    +:ok

    When training, it can be useful to stop execution early - either when you see it's failing and you don't want to waste time waiting for the remaining epochs to finish, or if it's good enough and you want to start experimenting with it.

    The kino_early_stop/1 function below is a handy handler to give us a Kino.Control.button that will stop the training loop when clicked.

    We also have plot_losses/1 function to visualize our train and validation losses using VegaLite.

    defmodule KinoAxon do
       @doc """
       Adds handler function which adds a frame with a "stop" button
       to the cell with the training loop.
     
       Clicking "stop" will halt the training loop.
       """
    -  def kino_early_stop(loop) do
    -    frame = Kino.Frame.new() |> Kino.render()
    -    stop_button = Kino.Control.button("stop")
    -    Kino.Frame.render(frame, stop_button)
    +  def kino_early_stop(loop) do
    +    frame = Kino.Frame.new() |> Kino.render()
    +    stop_button = Kino.Control.button("stop")
    +    Kino.Frame.render(frame, stop_button)
     
    -    {:ok, button_agent} = Agent.start_link(fn -> nil end)
    +    {:ok, button_agent} = Agent.start_link(fn -> nil end)
     
         stop_button
    -    |> Kino.Control.stream()
    -    |> Kino.listen(fn _event ->
    -      Agent.update(button_agent, fn _ -> :stop end)
    -    end)
    -
    -    handler = fn state ->
    -      stop_state = Agent.get(button_agent, & &1)
    -
    -      if stop_state == :stop do
    -        Agent.stop(button_agent)
    -        Kino.Frame.render(frame, "stopped")
    -        {:halt_loop, state}
    -      else
    -        {:continue, state}
    -      end
    -    end
    -
    -    Axon.Loop.handle(loop, :iteration_completed, handler)
    -  end
    +    |> Kino.Control.stream()
    +    |> Kino.listen(fn _event ->
    +      Agent.update(button_agent, fn _ -> :stop end)
    +    end)
    +
    +    handler = fn state ->
    +      stop_state = Agent.get(button_agent, & &1)
    +
    +      if stop_state == :stop do
    +        Agent.stop(button_agent)
    +        Kino.Frame.render(frame, "stopped")
    +        {:halt_loop, state}
    +      else
    +        {:continue, state}
    +      end
    +    end
    +
    +    Axon.Loop.handle(loop, :iteration_completed, handler)
    +  end
     
       @doc """
       Plots the training and validation losses using Kino and VegaLite.
     
       This *must* come after `Axon.Loop.validate`.
       """
    -  def plot_losses(loop) do
    +  def plot_losses(loop) do
         vl_widget =
    -      Vl.new(width: 600, height: 400)
    -      |> Vl.mark(:point, tooltip: true)
    -      |> Vl.encode_field(:x, "epoch", type: :ordinal)
    -      |> Vl.encode_field(:y, "loss", type: :quantitative)
    -      |> Vl.encode_field(:color, "dataset", type: :nominal)
    -      |> Kino.VegaLite.new()
    -      |> Kino.render()
    -
    -    handler = fn state ->
    -      %Axon.Loop.State{metrics: metrics, epoch: epoch} = state
    -      loss = metrics["loss"] |> Nx.to_number()
    -      val_loss = metrics["validation_loss"] |> Nx.to_number()
    -
    -      points = [
    -        %{epoch: epoch, loss: loss, dataset: "train"},
    -        %{epoch: epoch, loss: val_loss, dataset: "validation"}
    -      ]
    -
    -      Kino.VegaLite.push_many(vl_widget, points)
    -      {:continue, state}
    -    end
    -
    -    Axon.Loop.handle(loop, :epoch_completed, handler)
    -  end
    -end
    # A helper function to display the input and output side by side
    -combined_input_output = fn params, image_index ->
    -  test_image = test_images[[images: image_index]]
    -  reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])
    -  Nx.concatenate([test_image, reconstructed_image], axis: :width)
    -end
    -
    -frame = Kino.Frame.new() |> Kino.render()
    -
    -render_example_handler = fn state ->
    +      Vl.new(width: 600, height: 400)
    +      |> Vl.mark(:point, tooltip: true)
    +      |> Vl.encode_field(:x, "epoch", type: :ordinal)
    +      |> Vl.encode_field(:y, "loss", type: :quantitative)
    +      |> Vl.encode_field(:color, "dataset", type: :nominal)
    +      |> Kino.VegaLite.new()
    +      |> Kino.render()
    +
    +    handler = fn state ->
    +      %Axon.Loop.State{metrics: metrics, epoch: epoch} = state
    +      loss = metrics["loss"] |> Nx.to_number()
    +      val_loss = metrics["validation_loss"] |> Nx.to_number()
    +
    +      points = [
    +        %{epoch: epoch, loss: loss, dataset: "train"},
    +        %{epoch: epoch, loss: val_loss, dataset: "validation"}
    +      ]
    +
    +      Kino.VegaLite.push_many(vl_widget, points)
    +      {:continue, state}
    +    end
    +
    +    Axon.Loop.handle(loop, :epoch_completed, handler)
    +  end
    +end
    # A helper function to display the input and output side by side
    +combined_input_output = fn params, image_index ->
    +  test_image = test_images[[images: image_index]]
    +  reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])
    +  Nx.concatenate([test_image, reconstructed_image], axis: :width)
    +end
    +
    +frame = Kino.Frame.new() |> Kino.render()
    +
    +render_example_handler = fn state ->
       # state.step_state[:model_state] contains the model params when this event is fired
    -  params = state.step_state[:model_state]
    -  image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))
    -  image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)
    -  Kino.Frame.render(frame, image)
    -  Kino.Frame.append(frame, "Epoch: #{state.epoch}, Iteration: #{state.iteration}")
    -  {:continue, state}
    -end
    +  params = state.step_state[:model_state]
    +  image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))
    +  image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)
    +  Kino.Frame.render(frame, image)
    +  Kino.Frame.append(frame, "Epoch: #{state.epoch}, Iteration: #{state.iteration}")
    +  {:continue, state}
    +end
     
     params =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))
    -  |> KinoAxon.kino_early_stop()
    -  |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)
    -  |> Axon.Loop.validate(model, test_data)
    -  |> KinoAxon.plot_losses()
    -  |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)
    +  |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))
    +  |> KinoAxon.kino_early_stop()
    +  |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)
    +  |> Axon.Loop.validate(model, test_data)
    +  |> KinoAxon.plot_losses()
    +  |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)
     
     :ok

    @@ -362,191 +362,191 @@

    Splitting up the model

    Cool! We now have the parameters for a trained, simple autoencoder. Our next step is to split up the model so we can use the encoder and decoder separately. By doing that, we'll be able to take an image and encode it to get the model's compressed image representation (the latent vector). We can then manipulate the latent vector and run the manipulated latent vector through the decoder to get a new image.

    Let's start by defining the encoder and decoder separately as two different models.

    encoder =
    -  Axon.input("image", shape: {nil, 1, 28, 28})
    +  Axon.input("image", shape: {nil, 1, 28, 28})
       # This is now 28*28*1 = 784
    -  |> Axon.flatten()
    +  |> Axon.flatten()
       # The encoder
    -  |> Axon.dense(256, activation: :relu, name: "encoder_layer_1")
    -  |> Axon.dense(128, activation: :relu, name: "encoder_layer_2")
    -  |> Axon.dense(64, activation: :relu, name: "encoder_layer_3")
    +  |> Axon.dense(256, activation: :relu, name: "encoder_layer_1")
    +  |> Axon.dense(128, activation: :relu, name: "encoder_layer_2")
    +  |> Axon.dense(64, activation: :relu, name: "encoder_layer_3")
       # Bottleneck layer
    -  |> Axon.dense(10, activation: :relu, name: "bottleneck_layer")
    +  |> Axon.dense(10, activation: :relu, name: "bottleneck_layer")
     
     # The output from the encoder
     decoder =
    -  Axon.input("latent", shape: {nil, 10})
    +  Axon.input("latent", shape: {nil, 10})
       # The decoder
    -  |> Axon.dense(64, activation: :relu, name: "decoder_layer_1")
    -  |> Axon.dense(128, activation: :relu, name: "decoder_layer_2")
    -  |> Axon.dense(256, activation: :relu, name: "decoder_layer_3")
    -  |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
    -  |> CustomLayer.scaling_layer()
    +  |> Axon.dense(64, activation: :relu, name: "decoder_layer_1")
    +  |> Axon.dense(128, activation: :relu, name: "decoder_layer_2")
    +  |> Axon.dense(256, activation: :relu, name: "decoder_layer_3")
    +  |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
    +  |> CustomLayer.scaling_layer()
       # Turn it back into a 28x28 single channel image
    -  |> Axon.reshape({:auto, 1, 28, 28})
    +  |> Axon.reshape({:auto, 1, 28, 28})
     
    -Axon.Display.as_table(encoder, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()
    -Axon.Display.as_table(decoder, Nx.template({4, 10}, :f32)) |> IO.puts()

    We have the two models, but the problem is these are untrained models so we don't have the corresponding set of parameters. We'd like to use the parameters from the autoencoder we just trained and apply them to our split up models.

    Let's first take a look at what params actually are:

    params

    Params are just a Map with the layer name as the key identifying which parameters to use. We can easily match up the layer names with the output from the Axon.Display.as_table/2 call for the autoencoder model.

    So all we need to do is create a new Map that plucks out the right layers from our autoencoder params for each model and use that to run inference on our split up models.

    Fortunately, since we gave each of the layers names, this requires no work at all - we can use the Map as it is since the layer names match up! Axon will ignore any extra keys so those won't be a problem.

    Note that naming the layers wasn't required, if the layers didn't have names we would have some renaming to do to get the names to match between the models. But giving them names made it very convenient :)

    Let's try encoding an image, printing the latent and then decoding the latent using our split up model to make sure it's working.

    image = test_images[[images: 0]]
    +Axon.Display.as_table(encoder, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()
    +Axon.Display.as_table(decoder, Nx.template({4, 10}, :f32)) |> IO.puts()

    We have the two models, but the problem is these are untrained models so we don't have the corresponding set of parameters. We'd like to use the parameters from the autoencoder we just trained and apply them to our split up models.

    Let's first take a look at what params actually are:

    params

    Params are just a Map with the layer name as the key identifying which parameters to use. We can easily match up the layer names with the output from the Axon.Display.as_table/2 call for the autoencoder model.

    So all we need to do is create a new Map that plucks out the right layers from our autoencoder params for each model and use that to run inference on our split up models.

    Fortunately, since we gave each of the layers names, this requires no work at all - we can use the Map as it is since the layer names match up! Axon will ignore any extra keys so those won't be a problem.

    Note that naming the layers wasn't required, if the layers didn't have names we would have some renaming to do to get the names to match between the models. But giving them names made it very convenient :)

    Let's try encoding an image, printing the latent and then decoding the latent using our split up model to make sure it's working.

    image = test_images[[images: 0]]
     
     # Encode the image
    -latent = Axon.predict(encoder, params, image)
    -IO.inspect(latent, label: "Latent")
    +latent = Axon.predict(encoder, params, image)
    +IO.inspect(latent, label: "Latent")
     # Decode the image
    -reconstructed_image = Axon.predict(decoder, params, latent) |> Nx.squeeze(axes: [0])
    +reconstructed_image = Axon.predict(decoder, params, latent) |> Nx.squeeze(axes: [0])
     
    -combined_image = Nx.concatenate([image, reconstructed_image], axis: :width)
    -Data.image_to_kino(combined_image, 200, 400)

    Perfect! Seems like the split up models are working as expected. Now let's try to generate some new images using our autoencoder. To do this, we'll manipulate the latent so it's slightly different from what the encoder gave us. Specifically, we'll try to interpolate between two images, showing 100 steps from our starting image to our final image.

    num_steps = 100
    +combined_image = Nx.concatenate([image, reconstructed_image], axis: :width)
    +Data.image_to_kino(combined_image, 200, 400)

    Perfect! Seems like the split up models are working as expected. Now let's try to generate some new images using our autoencoder. To do this, we'll manipulate the latent so it's slightly different from what the encoder gave us. Specifically, we'll try to interpolate between two images, showing 100 steps from our starting image to our final image.

    num_steps = 100
     
     # Get our latents, image at index 0 is our starting point
     # index 1 is where we'll end
    -latents = Axon.predict(encoder, params, test_images[[images: 0..1]])
    +latents = Axon.predict(encoder, params, test_images[[images: 0..1]])
     # Latents is a {2, 10} tensor
     # The step we'll add to our latent to move it towards image[1]
    -step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)
    +step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)
     # We can make a batch of all our new latents
    -new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])
    +new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])
     
    -reconstructed_images = Axon.predict(decoder, params, new_latents)
    +reconstructed_images = Axon.predict(decoder, params, new_latents)
     
     reconstructed_images =
    -  Nx.reshape(
    +  Nx.reshape(
         reconstructed_images,
    -    Nx.shape(reconstructed_images),
    -    names: [:images, :channels, :height, :width]
    -  )
    -
    -Stream.interval(div(5000, num_steps))
    -|> Stream.take(num_steps + 1)
    -|> Kino.animate(fn i ->
    -  Data.image_to_kino(reconstructed_images[i])
    -end)

    Cool! We have interpolation! But did you notice that some of the intermediate frames don't look fashionable at all? Autoencoders don't generally return good results for random vectors in their latent space. That's where a VAE can help.

    + Nx.shape(reconstructed_images), + names: [:images, :channels, :height, :width] + ) + +Stream.interval(div(5000, num_steps)) +|> Stream.take(num_steps + 1) +|> Kino.animate(fn i -> + Data.image_to_kino(reconstructed_images[i]) +end)

    Cool! We have interpolation! But did you notice that some of the intermediate frames don't look fashionable at all? Autoencoders don't generally return good results for random vectors in their latent space. That's where a VAE can help.

    Making it variational

    -

    In a VAE, instead of outputting a latent vector, our encoder will output a distribution. Essentially this means instead of 10 outputs we'll have 20. 10 of them will represent the mean and 10 will represent the log of the variance of the latent. We'll have to sample from this distribution to get our latent vector. Finally, we'll have to modify our loss function to also compute the KL Divergence between the latent distribution and a standard normal distribution (this acts as a regularizer of the latent space).

    We'll start by defining our model:

    defmodule Vae do
    +

    In a VAE, instead of outputting a latent vector, our encoder will output a distribution. Essentially this means instead of 10 outputs we'll have 20. 10 of them will represent the mean and 10 will represent the log of the variance of the latent. We'll have to sample from this distribution to get our latent vector. Finally, we'll have to modify our loss function to also compute the KL Divergence between the latent distribution and a standard normal distribution (this acts as a regularizer of the latent space).

    We'll start by defining our model:

    defmodule Vae do
       import Nx.Defn
     
       @latent_features 10
     
    -  defp sampling_layer(%Axon{} = input, _opts \\ []) do
    -    Axon.layer(&sampling_layer_impl/2, [input], name: "sampling_layer", op_name: :sample)
    -  end
    +  defp sampling_layer(%Axon{} = input, _opts \\ []) do
    +    Axon.layer(&sampling_layer_impl/2, [input], name: "sampling_layer", op_name: :sample)
    +  end
     
    -  defnp sampling_layer_impl(x, _opts \\ []) do
    -    mu = x[[0..-1//1, 0, 0..-1//1]]
    -    log_var = x[[0..-1//1, 1, 0..-1//1]]
    -    std_dev = Nx.exp(0.5 * log_var)
    -    eps = Nx.random_normal(std_dev)
    +  defnp sampling_layer_impl(x, _opts \\ []) do
    +    mu = x[[0..-1//1, 0, 0..-1//1]]
    +    log_var = x[[0..-1//1, 1, 0..-1//1]]
    +    std_dev = Nx.exp(0.5 * log_var)
    +    eps = Nx.random_normal(std_dev)
         sample = mu + std_dev * eps
    -    Nx.stack([sample, mu, std_dev], axis: 1)
    -  end
    +    Nx.stack([sample, mu, std_dev], axis: 1)
    +  end
     
    -  defp encoder_partial() do
    -    Axon.input("image", shape: {nil, 1, 28, 28})
    +  defp encoder_partial() do
    +    Axon.input("image", shape: {nil, 1, 28, 28})
         # This is now 28*28*1 = 784
    -    |> Axon.flatten()
    +    |> Axon.flatten()
         # The encoder
    -    |> Axon.dense(256, activation: :relu, name: "encoder_layer_1")
    -    |> Axon.dense(128, activation: :relu, name: "encoder_layer_2")
    -    |> Axon.dense(64, activation: :relu, name: "encoder_layer_3")
    +    |> Axon.dense(256, activation: :relu, name: "encoder_layer_1")
    +    |> Axon.dense(128, activation: :relu, name: "encoder_layer_2")
    +    |> Axon.dense(64, activation: :relu, name: "encoder_layer_3")
         # Bottleneck layer
    -    |> Axon.dense(@latent_features * 2, name: "bottleneck_layer")
    +    |> Axon.dense(@latent_features * 2, name: "bottleneck_layer")
         # Split up the mu and logvar
    -    |> Axon.reshape({:auto, 2, @latent_features})
    -    |> sampling_layer()
    -  end
    +    |> Axon.reshape({:auto, 2, @latent_features})
    +    |> sampling_layer()
    +  end
     
    -  def encoder() do
    -    encoder_partial()
    +  def encoder() do
    +    encoder_partial()
         # Grab only the sample (ie. the sampled latent)
    -    |> Axon.nx(fn x -> x[[0..-1//1, 0]] end)
    -  end
    +    |> Axon.nx(fn x -> x[[0..-1//1, 0]] end)
    +  end
     
    -  def decoder(input_latent) do
    +  def decoder(input_latent) do
         input_latent
    -    |> Axon.dense(64, activation: :relu, name: "decoder_layer_1")
    -    |> Axon.dense(128, activation: :relu, name: "decoder_layer_2")
    -    |> Axon.dense(256, activation: :relu, name: "decoder_layer_3")
    -    |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
    -    |> CustomLayer.scaling_layer()
    +    |> Axon.dense(64, activation: :relu, name: "decoder_layer_1")
    +    |> Axon.dense(128, activation: :relu, name: "decoder_layer_2")
    +    |> Axon.dense(256, activation: :relu, name: "decoder_layer_3")
    +    |> Axon.dense(784, activation: :sigmoid, name: "decoder_layer_4")
    +    |> CustomLayer.scaling_layer()
         # Turn it back into a 28x28 single channel image
    -    |> Axon.reshape({:auto, 1, 28, 28})
    -  end
    -
    -  def autoencoder() do
    -    encoder_partial = encoder_partial()
    -    encoder = encoder()
    -    autoencoder = decoder(encoder)
    -    Axon.container(%{mu_sigma: encoder_partial, reconstruction: autoencoder})
    -  end
    -end

    There's a few interesting things going on here. First, since our model has become more complex, we've used a module to keep it organized. We also built a custom layer to do the sampling and output the sampled latent vector as well as the distribution parameters (mu and sigma).

    Finally, we need the distribution itself so we can calculate the KL Divergence in our loss function. To make the model output the distribution parameters (mu and sigma), we use Axon.container/1 to produce two outputs from our model instead of one. Now, instead of getting a tensor as an output, we'll get a map with the two tensors we need for our loss function.

    Our loss function also has to be modified so be the sum of the KL divergence and MSE. Here's our custom loss function:

    defmodule CustomLoss do
    +    |> Axon.reshape({:auto, 1, 28, 28})
    +  end
    +
    +  def autoencoder() do
    +    encoder_partial = encoder_partial()
    +    encoder = encoder()
    +    autoencoder = decoder(encoder)
    +    Axon.container(%{mu_sigma: encoder_partial, reconstruction: autoencoder})
    +  end
    +end

    There's a few interesting things going on here. First, since our model has become more complex, we've used a module to keep it organized. We also built a custom layer to do the sampling and output the sampled latent vector as well as the distribution parameters (mu and sigma).

    Finally, we need the distribution itself so we can calculate the KL Divergence in our loss function. To make the model output the distribution parameters (mu and sigma), we use Axon.container/1 to produce two outputs from our model instead of one. Now, instead of getting a tensor as an output, we'll get a map with the two tensors we need for our loss function.

    Our loss function also has to be modified so be the sum of the KL divergence and MSE. Here's our custom loss function:

    defmodule CustomLoss do
       import Nx.Defn
     
    -  defn loss(y_true, %{reconstruction: reconstruction, mu_sigma: mu_sigma}) do
    -    mu = mu_sigma[[0..-1//1, 1, 0..-1//1]]
    -    sigma = mu_sigma[[0..-1//1, 2, 0..-1//1]]
    -    kld = Nx.sum(-Nx.log(sigma) - 0.5 + Nx.multiply(sigma, sigma) + Nx.multiply(mu, mu))
    -    kld * 0.1 + Axon.Losses.mean_squared_error(y_true, reconstruction, reduction: :sum)
    -  end
    -end

    With all our pieces ready, we can pretty much use the same training loop as we did earlier. The only modifications needed are to account for the fact that the model outputs a map with two values instead of a single tensor and telling the trainer to use our custom loss.

    model = Vae.autoencoder()
    +  defn loss(y_true, %{reconstruction: reconstruction, mu_sigma: mu_sigma}) do
    +    mu = mu_sigma[[0..-1//1, 1, 0..-1//1]]
    +    sigma = mu_sigma[[0..-1//1, 2, 0..-1//1]]
    +    kld = Nx.sum(-Nx.log(sigma) - 0.5 + Nx.multiply(sigma, sigma) + Nx.multiply(mu, mu))
    +    kld * 0.1 + Axon.Losses.mean_squared_error(y_true, reconstruction, reduction: :sum)
    +  end
    +end

    With all our pieces ready, we can pretty much use the same training loop as we did earlier. The only modifications needed are to account for the fact that the model outputs a map with two values instead of a single tensor and telling the trainer to use our custom loss.

    model = Vae.autoencoder()
     
     # A helper function to display the input and output side by side
    -combined_input_output = fn params, image_index ->
    -  test_image = test_images[[images: image_index]]
    -  %{reconstruction: reconstructed_image} = Axon.predict(model, params, test_image)
    -  reconstructed_image = reconstructed_image |> Nx.squeeze(axes: [0])
    -  Nx.concatenate([test_image, reconstructed_image], axis: :width)
    -end
    +combined_input_output = fn params, image_index ->
    +  test_image = test_images[[images: image_index]]
    +  %{reconstruction: reconstructed_image} = Axon.predict(model, params, test_image)
    +  reconstructed_image = reconstructed_image |> Nx.squeeze(axes: [0])
    +  Nx.concatenate([test_image, reconstructed_image], axis: :width)
    +end
     
    -frame = Kino.Frame.new() |> Kino.render()
    +frame = Kino.Frame.new() |> Kino.render()
     
    -render_example_handler = fn state ->
    +render_example_handler = fn state ->
       # state.step_state[:model_state] contains the model params when this event is fired
    -  params = state.step_state[:model_state]
    -  image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))
    -  image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)
    -  Kino.Frame.render(frame, image)
    -  Kino.Frame.append(frame, "Epoch: #{state.epoch}, Iteration: #{state.iteration}")
    -  {:continue, state}
    -end
    +  params = state.step_state[:model_state]
    +  image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))
    +  image = combined_input_output.(params, image_index) |> Data.image_to_kino(200, 400)
    +  Kino.Frame.render(frame, image)
    +  Kino.Frame.append(frame, "Epoch: #{state.epoch}, Iteration: #{state.iteration}")
    +  {:continue, state}
    +end
     
     params =
       model
    -  |> Axon.Loop.trainer(&CustomLoss.loss/2, Polaris.Optimizers.adam(learning_rate: 0.001))
    -  |> KinoAxon.kino_early_stop()
    -  |> Axon.Loop.handle(:epoch_completed, render_example_handler)
    -  |> Axon.Loop.validate(model, test_data)
    -  |> KinoAxon.plot_losses()
    -  |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)
    +  |> Axon.Loop.trainer(&CustomLoss.loss/2, Polaris.Optimizers.adam(learning_rate: 0.001))
    +  |> KinoAxon.kino_early_stop()
    +  |> Axon.Loop.handle(:epoch_completed, render_example_handler)
    +  |> Axon.Loop.validate(model, test_data)
    +  |> KinoAxon.plot_losses()
    +  |> Axon.Loop.run(train_data, %{}, epochs: 40, compiler: EXLA)
     
     :ok

    Finally, we can try our interpolation again:

    num_steps = 100
     
     # Get our latents, image at index 0 is our starting point
     # index 1 is where we'll end
    -latents = Axon.predict(Vae.encoder(), params, test_images[[images: 0..1]])
    +latents = Axon.predict(Vae.encoder(), params, test_images[[images: 0..1]])
     # Latents is a {2, 10} tensor
     # The step we'll add to our latent to move it towards image[1]
    -step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)
    +step = Nx.subtract(latents[1], latents[0]) |> Nx.divide(num_steps)
     # We can make a batch of all our new latents
    -new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])
    +new_latents = Nx.multiply(Nx.iota({num_steps + 1, 1}), step) |> Nx.add(latents[0])
     
    -decoder = Axon.input("latent", shape: {nil, 10}) |> Vae.decoder()
    +decoder = Axon.input("latent", shape: {nil, 10}) |> Vae.decoder()
     
    -reconstructed_images = Axon.predict(decoder, params, new_latents)
    +reconstructed_images = Axon.predict(decoder, params, new_latents)
     
     reconstructed_images =
    -  Nx.reshape(
    +  Nx.reshape(
         reconstructed_images,
    -    Nx.shape(reconstructed_images),
    -    names: [:images, :channels, :height, :width]
    -  )
    -
    -Stream.interval(div(5000, num_steps))
    -|> Stream.take(num_steps + 1)
    -|> Kino.animate(fn i ->
    -  Data.image_to_kino(reconstructed_images[i])
    -end)

    Did you notice the difference? Every step in our interpolation looks similar to items in our dataset! This is the benefit of the VAE: we can generate new items by using random latents. In contrast, in the simple autoencoder, for the most part only latents we got from our encoder were likely to produce sensible outputs.

    +
    Nx.shape(reconstructed_images), + names: [:images, :channels, :height, :width] + ) + +Stream.interval(div(5000, num_steps)) +|> Stream.take(num_steps + 1) +|> Kino.animate(fn i -> + Data.image_to_kino(reconstructed_images[i]) +end)

    Did you notice the difference? Every step in our interpolation looks similar to items in our dataset! This is the benefit of the VAE: we can generate new items by using random latents. In contrast, in the simple autoencoder, for the most part only latents we got from our encoder were likely to produce sensible outputs.

    diff --git a/guides.html b/guides.html index 8fe65737..060ece8d 100644 --- a/guides.html +++ b/guides.html @@ -14,7 +14,7 @@ - + diff --git a/horses_or_humans.html b/horses_or_humans.html index 037a2053..4e73fcd3 100644 --- a/horses_or_humans.html +++ b/horses_or_humans.html @@ -14,7 +14,7 @@ - + @@ -136,17 +136,17 @@

    -
    Mix.install([
    -  {:axon, "~> 0.6.0"},
    -  {:nx, "~> 0.6.0"},
    -  {:exla, "~> 0.6.0"},
    -  {:stb_image, "~> 0.6.0"},
    -  {:req, "~> 0.4.5"},
    -  {:kino, "~> 0.11.0"}
    -])
    -
    -Nx.global_default_backend(EXLA.Backend)
    -Nx.Defn.global_default_options(compiler: EXLA)

    +
    Mix.install([
    +  {:axon, "~> 0.6.0"},
    +  {:nx, "~> 0.6.0"},
    +  {:exla, "~> 0.6.0"},
    +  {:stb_image, "~> 0.6.0"},
    +  {:req, "~> 0.4.5"},
    +  {:kino, "~> 0.11.0"}
    +])
    +
    +Nx.global_default_backend(EXLA.Backend)
    +Nx.Defn.global_default_options(compiler: EXLA)

    @@ -158,151 +158,151 @@

    Loading the data

    -

    We will be using the Horses or Humans Dataset. The dataset is available as a ZIP with image files, we will download it using req. Conveniently, req will unzip the files for us, we just need to convert the filenames from strings.

    %{body: files} =
    -  Req.get!("https://storage.googleapis.com/learning-datasets/horse-or-human.zip")
    +

    We will be using the Horses or Humans Dataset. The dataset is available as a ZIP with image files, we will download it using req. Conveniently, req will unzip the files for us, we just need to convert the filenames from strings.

    %{body: files} =
    +  Req.get!("https://storage.googleapis.com/learning-datasets/horse-or-human.zip")
     
    -files = for {name, binary} <- files, do: {List.to_string(name), binary}

    +files = for {name, binary} <- files, do: {List.to_string(name), binary}

    Note on batching

    We need to know how many images to include in a batch. A batch is a group of images to load into the GPU at a time. If the batch size is too big for your GPU, it will run out of memory, in such case you can reduce the batch size. It is generally optimal to utilize almost all of the GPU memory during training. It will take more time to train with a lower batch size.

    batch_size = 32
    -batches_per_epoch = div(length(files), batch_size)

    +batches_per_epoch = div(length(files), batch_size)

    A look at the data

    -

    We'll have a really quick look at our data. Let's see what we are dealing with:

    {name, binary} = Enum.random(files)
    -Kino.Markdown.new(name) |> Kino.render()
    -Kino.Image.new(binary, :png)

    Reevaluate the cell a couple times to view different images. Note that the file names are either horse[N]-[M].png or human[N]-[M].png, so we can derive the expected class from that.

    While we are at it, look at this beautiful animation:

    names_to_animate = ["horse01", "horse05", "human01", "human05"]
    +

    We'll have a really quick look at our data. Let's see what we are dealing with:

    {name, binary} = Enum.random(files)
    +Kino.Markdown.new(name) |> Kino.render()
    +Kino.Image.new(binary, :png)

    Reevaluate the cell a couple times to view different images. Note that the file names are either horse[N]-[M].png or human[N]-[M].png, so we can derive the expected class from that.

    While we are at it, look at this beautiful animation:

    names_to_animate = ["horse01", "horse05", "human01", "human05"]
     
     images_to_animate =
    -  for {name, binary} <- files, Enum.any?(names_to_animate, &String.contains?(name, &1)) do
    -    Kino.Image.new(binary, :png)
    -  end
    -
    -Kino.animate(50, images_to_animate, fn
    -  _i, [image | images] -> {:cont, image, images}
    -  _i, [] -> :halt
    -end)

    How many images are there?

    length(files)

    How many images will not be used for training? The remainder of the integer division will be ignored.

    files
    -|> length()
    -|> rem(batch_size)

    + for {name, binary} <- files, Enum.any?(names_to_animate, &String.contains?(name, &1)) do + Kino.Image.new(binary, :png) + end + +Kino.animate(50, images_to_animate, fn + _i, [image | images] -> {:cont, image, images} + _i, [] -> :halt +end)

    How many images are there?

    length(files)

    How many images will not be used for training? The remainder of the integer division will be ignored.

    files
    +|> length()
    +|> rem(batch_size)

    Data processing

    -

    First, we need to preprocess the data for our CNN. At the beginning of the process, we chunk images into batches. Then, we use the parse_file/1 function to load images and label them accurately. Finally, we "augment" the input, which means that we normalize data and flip the images along one of the axes. The last procedure helps a neural network to make predictions regardless of the orientation of the image.

    defmodule HorsesHumans.DataProcessing do
    +

    First, we need to preprocess the data for our CNN. At the beginning of the process, we chunk images into batches. Then, we use the parse_file/1 function to load images and label them accurately. Finally, we "augment" the input, which means that we normalize data and flip the images along one of the axes. The last procedure helps a neural network to make predictions regardless of the orientation of the image.

    defmodule HorsesHumans.DataProcessing do
       import Nx.Defn
     
    -  def data_stream(files, batch_size) do
    +  def data_stream(files, batch_size) do
         files
    -    |> Enum.shuffle()
    -    |> Stream.chunk_every(batch_size, batch_size, :discard)
    -    |> Task.async_stream(
    -      fn batch ->
    -        {images, labels} = batch |> Enum.map(&parse_file/1) |> Enum.unzip()
    -        {Nx.stack(images), Nx.stack(labels)}
    -      end,
    +    |> Enum.shuffle()
    +    |> Stream.chunk_every(batch_size, batch_size, :discard)
    +    |> Task.async_stream(
    +      fn batch ->
    +        {images, labels} = batch |> Enum.map(&parse_file/1) |> Enum.unzip()
    +        {Nx.stack(images), Nx.stack(labels)}
    +      end,
           timeout: :infinity
    -    )
    -    |> Stream.map(fn {:ok, {images, labels}} -> {augment(images), labels} end)
    -    |> Stream.cycle()
    -  end
    +    )
    +    |> Stream.map(fn {:ok, {images, labels}} -> {augment(images), labels} end)
    +    |> Stream.cycle()
    +  end
     
    -  defp parse_file({filename, binary}) do
    +  defp parse_file({filename, binary}) do
         label =
    -      if String.starts_with?(filename, "horses/"),
    -        do: Nx.tensor([1, 0], type: {:u, 8}),
    -        else: Nx.tensor([0, 1], type: {:u, 8})
    +      if String.starts_with?(filename, "horses/"),
    +        do: Nx.tensor([1, 0], type: {:u, 8}),
    +        else: Nx.tensor([0, 1], type: {:u, 8})
     
    -    image = binary |> StbImage.read_binary!() |> StbImage.to_nx()
    +    image = binary |> StbImage.read_binary!() |> StbImage.to_nx()
     
    -    {image, label}
    -  end
    +    {image, label}
    +  end
     
    -  defnp augment(images) do
    +  defnp augment(images) do
         # Normalize
         images = images / 255.0
     
         # Optional vertical/horizontal flip
    -    { u, _new_key } = Nx.Random.key(1987) |> Nx.Random.uniform()
    +    { u, _new_key } = Nx.Random.key(1987) |> Nx.Random.uniform()
     
    -    cond do
    +    cond do
           u < 0.25 -> images
    -      u < 0.5 -> Nx.reverse(images, axes: [2])
    -      u < 0.75 -> Nx.reverse(images, axes: [3])
    -      true -> Nx.reverse(images, axes: [2, 3])
    -    end
    -  end
    -end

    + u < 0.5 -> Nx.reverse(images, axes: [2]) + u < 0.75 -> Nx.reverse(images, axes: [3]) + true -> Nx.reverse(images, axes: [2, 3]) + end + end +end

    Building the model

    The next step is creating our model. In this notebook, we choose the classic Convolutional Neural Network architecture. Let's dive in to the core components of a CNN.

    Axon.conv/3 adds a convolutional layer, which is at the core of a CNN. A convolutional layer applies a filter function throughout the image, sliding a window with shape :kernel_size. As opposed to dense layers, a convolutional layer exploits weight sharing to better model data where locality matters. This feature is a natural fit for images.

    Figure 1: A step-by-step visualization of a convolution layer for kernel_size: {3, 3}

    Axon.max_pool/2 adds a downscaling operation that takes the maximum value from a subtensor according to :kernel_size.

    Figure 2: Max pooling operation for kernel_size: {2, 2}

    Axon.dropout/2 and Axon.spatial_dropout/2 add dropout layers which prevent a neural network from overfitting. Standard dropout drops a given rate of randomly chosen neurons during the training process. On the other hand, spatial dropout gets rid of whole feature maps. The graphical difference between dropout and spatial dropout is presented in a picture below.

    Figure 3: The difference between standard dropout and spatial dropout

    Knowing the relevant building blocks, let's build our network! It will have a convolutional part, composed of convolutional and pooling layers, this part should capture the spatial features of an image. Then at the end, we will add a dense layer with 512 neurons fed with all the spatial features, and a final two-neuron layer for as our classification output.

    model =
    -  Axon.input("input", shape: {nil, 300, 300, 4})
    -  |> Axon.conv(16, kernel_size: {3, 3}, activation: :relu)
    -  |> Axon.max_pool(kernel_size: {2, 2})
    -  |> Axon.conv(32, kernel_size: {3, 3}, activation: :relu)
    -  |> Axon.spatial_dropout(rate: 0.5)
    -  |> Axon.max_pool(kernel_size: {2, 2})
    -  |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)
    -  |> Axon.spatial_dropout(rate: 0.5)
    -  |> Axon.max_pool(kernel_size: {2, 2})
    -  |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)
    -  |> Axon.max_pool(kernel_size: {2, 2})
    -  |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu)
    -  |> Axon.max_pool(kernel_size: {2, 2})
    -  |> Axon.flatten()
    -  |> Axon.dropout(rate: 0.5)
    -  |> Axon.dense(512, activation: :relu)
    -  |> Axon.dense(2, activation: :softmax)

    + Axon.input("input", shape: {nil, 300, 300, 4}) + |> Axon.conv(16, kernel_size: {3, 3}, activation: :relu) + |> Axon.max_pool(kernel_size: {2, 2}) + |> Axon.conv(32, kernel_size: {3, 3}, activation: :relu) + |> Axon.spatial_dropout(rate: 0.5) + |> Axon.max_pool(kernel_size: {2, 2}) + |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu) + |> Axon.spatial_dropout(rate: 0.5) + |> Axon.max_pool(kernel_size: {2, 2}) + |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu) + |> Axon.max_pool(kernel_size: {2, 2}) + |> Axon.conv(64, kernel_size: {3, 3}, activation: :relu) + |> Axon.max_pool(kernel_size: {2, 2}) + |> Axon.flatten() + |> Axon.dropout(rate: 0.5) + |> Axon.dense(512, activation: :relu) + |> Axon.dense(2, activation: :softmax)

    Training the model

    -

    It's time to train our model. We specify the loss, optimizer and choose accuracy as our metric. We also set log: 1 to frequently update the training progress. We manually specify the number of iterations, such that each epoch goes through all of the baches once.

    data = HorsesHumans.DataProcessing.data_stream(files, batch_size)
    +

    It's time to train our model. We specify the loss, optimizer and choose accuracy as our metric. We also set log: 1 to frequently update the training progress. We manually specify the number of iterations, such that each epoch goes through all of the baches once.

    data = HorsesHumans.DataProcessing.data_stream(files, batch_size)
     
    -optimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-4)
    +optimizer = Polaris.Optimizers.adam(learning_rate: 1.0e-4)
     
     params =
       model
    -  |> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, log: 1)
    -  |> Axon.Loop.metric(:accuracy)
    -  |> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)

    + |> Axon.Loop.trainer(:categorical_cross_entropy, optimizer, log: 1) + |> Axon.Loop.metric(:accuracy) + |> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)

    Extra: gradient centralization

    -

    We can improve the training by applying gradient centralization. It is a technique with a similar purpose to batch normalization. For each loss gradient, we subtract a mean value to have a gradient with mean equal to zero. This process prevents gradients from exploding.

    centralized_optimizer = Polaris.Updates.compose(Polaris.Updates.centralize(), optimizer)
    +

    We can improve the training by applying gradient centralization. It is a technique with a similar purpose to batch normalization. For each loss gradient, we subtract a mean value to have a gradient with mean equal to zero. This process prevents gradients from exploding.

    centralized_optimizer = Polaris.Updates.compose(Polaris.Updates.centralize(), optimizer)
     
     model
    -|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, log: 1)
    -|> Axon.Loop.metric(:accuracy)
    -|> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)

    +|> Axon.Loop.trainer(:categorical_cross_entropy, centralized_optimizer, log: 1) +|> Axon.Loop.metric(:accuracy) +|> Axon.Loop.run(data, %{}, epochs: 10, iterations: batches_per_epoch)

    Inference

    -

    We can now use our trained model, let's try a couple examples.

    {name, binary} = Enum.random(files)
    -Kino.Markdown.new(name) |> Kino.render()
    -Kino.Image.new(binary, :png) |> Kino.render()
    +

    We can now use our trained model, let's try a couple examples.

    {name, binary} = Enum.random(files)
    +Kino.Markdown.new(name) |> Kino.render()
    +Kino.Image.new(binary, :png) |> Kino.render()
     
     input =
       binary
    -  |> StbImage.read_binary!()
    -  |> StbImage.to_nx()
    -  |> Nx.new_axis(0)
    -  |> Nx.divide(255.0)
    +  |> StbImage.read_binary!()
    +  |> StbImage.to_nx()
    +  |> Nx.new_axis(0)
    +  |> Nx.divide(255.0)
     
    -Axon.predict(model, params, input)

    Note: the model output refers to the probability that the image presents a horse and a human respectively.

    You can find a validation set here, in case you want to experiment further!

    +
    Axon.predict(model, params, input)

    Note: the model output refers to the probability that the image presents a horse and a human respectively.

    You can find a validation set here, in case you want to experiment further!

    diff --git a/instrumenting_loops_with_metrics.html b/instrumenting_loops_with_metrics.html index 41bc46ed..155e83dc 100644 --- a/instrumenting_loops_with_metrics.html +++ b/instrumenting_loops_with_metrics.html @@ -14,7 +14,7 @@ - + @@ -136,208 +136,208 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Adding metrics to training loops

    Often times when executing a loop you want to keep track of various metrics such as accuracy or precision. For training loops, Axon by default only tracks loss; however, you can instrument the loop with additional built-in metrics. For example, you might want to track mean-absolute error on top of a mean-squared error loss:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
     
     loop =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -  |> Axon.Loop.metric(:mean_absolute_error)
    #Axon.Loop<
    -  metrics: %{
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},
    -    "mean_absolute_error" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     :mean_absolute_error}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +  |> Axon.Loop.metric(:mean_absolute_error)
    #Axon.Loop<
    +  metrics: %{
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},
    +    "mean_absolute_error" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     :mean_absolute_error}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    When specifying a metric, you can specify an atom which maps to any of the metrics defined in Axon.Metrics. You can also define custom metrics. For more information on custom metrics, see Writing custom metrics.

    When you run a loop with metrics, Axon will aggregate that metric over the course of the loop execution. For training loops, Axon will also report the aggregate metric in the training logs:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0590630 mean_absolute_error: 0.1463431
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.015203186310827732, 0.1997198462486267, 0.09740892797708511, -0.007404750678688288, 0.11397464573383331, 0.3608400523662567, 0.07219560444355011, -0.06638865917921066]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [0.07889414578676224, 0.30445051193237305, 0.1377921849489212, 0.015571207739412785, 0.7115736603736877, -0.6404237151145935, 0.25553327798843384, 0.057831913232803345]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.10809992998838425, 0.0, 0.47775307297706604, -0.1641010195016861]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.040330830961465836, -0.36995524168014526, 0.001599793671630323, 0.6012424826622009],
    -        [0.21044284105300903, -0.39482879638671875, -0.5866784453392029, 0.15573620796203613],
    -        [-0.09234675765037537, 0.27758270502090454, -0.6663768291473389, 0.6017312407493591],
    -        [-0.4454570412635803, 0.1304328441619873, -0.31381309032440186, 0.1906844824552536],
    -        [0.3460652530193329, -0.3017694056034088, -0.1680794507265091, -0.47811293601989746],
    -        [0.28633055090904236, -0.34003201127052307, 0.6202688813209534, 0.18027405440807343],
    -        [0.5729941129684448, 0.32222074270248413, 0.20647864043712616, 0.02462891861796379],
    -        [-0.13146185874938965, -0.06700503826141357, 0.6600251793861389, -0.06442582607269287]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.4863035976886749]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.41491562128067017],
    -        [-0.948100209236145],
    -        [-1.2559744119644165],
    -        [1.0097774267196655]
    -      ]
    -    >
    -  }
    -}

    By default, the metric will have a name which matches the string form of the given metric. You can give metrics semantic meaning by providing an explicit name:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.metric(:mean_absolute_error, "model error")
    -|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0607362 model error: 0.1516546
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.2577069401741028, 0.16761353611946106, 0.11587327718734741, 0.28539595007896423, -0.2071152776479721, -0.02039412036538124, -0.11152249574661255, 0.2389308214187622]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.1265750676393509, 0.6902633309364319, -0.10233660787343979, -0.2544037103652954, -0.26677289605140686, -0.31035077571868896, 0.3845033347606659, -0.33032187819480896]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.0, 0.16427761316299438, 0.02123815007507801, 0.22260485589504242]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.3859425485134125, 0.49959924817085266, -0.34108400344848633, 0.6222119331359863],
    -        [-0.43326857686042786, -0.42272067070007324, 0.04245679825544357, -0.4357914626598358],
    -        [-0.3065953850746155, 0.587925374507904, 0.2960704267024994, -0.31594154238700867],
    -        [-0.35595524311065674, 0.6649497747421265, 0.4832736849784851, 0.3025558590888977],
    -        [0.048333823680877686, -0.17023107409477234, 0.09139639884233475, -0.6511918902397156],
    -        [-0.12099027633666992, -0.02014642395079136, 0.025831595063209534, -0.09945832937955856],
    -        [0.3415437340736389, 0.41694650053977966, 0.24677544832229614, 0.06690020114183426],
    -        [-0.1977071762084961, 0.39345067739486694, 0.26068705320358276, 0.35502269864082336]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.8329466581344604]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.23763614892959595],
    -        [-1.031561255455017],
    -        [0.1092313677072525],
    -        [-0.7191486358642578]
    -      ]
    -    >
    -  }
    -}

    Axon's default aggregation behavior is to aggregate metrics with a running average; however, you can customize this behavior by specifying an explicit accumulation function. Built-in accumulation functions are :running_average and :running_sum:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.metric(:mean_absolute_error, "total error", :running_sum)
    -|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0688004 total error: 151.4876404
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.34921368956565857, 0.2217460423707962, 0.274880051612854, 0.016405446454882622, -0.11720903217792511, -0.20693546533584595, 0.14232252538204193, -0.07956698536872864]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.37851807475090027, -0.17135880887508392, -0.3878959119319916, 0.19248774647712708, 0.12453905493021011, -0.2750281095504761, 0.5614567995071411, 0.6186240315437317]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.28566694259643555, 0.27262070775032043, -0.2875851094722748, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.23161421716213226, 0.8222984671592712, 0.09437259286642075, -0.4825701117515564],
    -        [-0.38828352093696594, 0.6247998476028442, 0.5035035610198975, 0.0026152729988098145],
    -        [0.5202338099479675, 0.7906754612922668, 0.08624745905399323, -0.5285568833351135],
    -        [0.47950035333633423, -0.07571044564247131, 0.32921522855758667, -0.7011756896972656],
    -        [-0.3601212203502655, 0.44817543029785156, 0.13981425762176514, -0.01014477014541626],
    -        [-0.3157005310058594, -0.6309216618537903, 0.5622371435165405, 0.27447545528411865],
    -        [-0.5749425292015076, -0.5073797702789307, -0.3527824282646179, 0.08027392625808716],
    -        [-0.5331286191940308, 0.15432128310203552, -0.015716910362243652, -0.5225256681442261]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.8275660872459412]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.45810666680336],
    -        [-1.0092405080795288],
    -        [0.5322748422622681],
    -        [-0.5989866852760315]
    -      ]
    -    >
    -  }
    -}
    +>

    When specifying a metric, you can specify an atom which maps to any of the metrics defined in Axon.Metrics. You can also define custom metrics. For more information on custom metrics, see Writing custom metrics.

    When you run a loop with metrics, Axon will aggregate that metric over the course of the loop execution. For training loops, Axon will also report the aggregate metric in the training logs:

    train_data =
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0590630 mean_absolute_error: 0.1463431
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.015203186310827732, 0.1997198462486267, 0.09740892797708511, -0.007404750678688288, 0.11397464573383331, 0.3608400523662567, 0.07219560444355011, -0.06638865917921066]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [0.07889414578676224, 0.30445051193237305, 0.1377921849489212, 0.015571207739412785, 0.7115736603736877, -0.6404237151145935, 0.25553327798843384, 0.057831913232803345]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.10809992998838425, 0.0, 0.47775307297706604, -0.1641010195016861]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.040330830961465836, -0.36995524168014526, 0.001599793671630323, 0.6012424826622009],
    +        [0.21044284105300903, -0.39482879638671875, -0.5866784453392029, 0.15573620796203613],
    +        [-0.09234675765037537, 0.27758270502090454, -0.6663768291473389, 0.6017312407493591],
    +        [-0.4454570412635803, 0.1304328441619873, -0.31381309032440186, 0.1906844824552536],
    +        [0.3460652530193329, -0.3017694056034088, -0.1680794507265091, -0.47811293601989746],
    +        [0.28633055090904236, -0.34003201127052307, 0.6202688813209534, 0.18027405440807343],
    +        [0.5729941129684448, 0.32222074270248413, 0.20647864043712616, 0.02462891861796379],
    +        [-0.13146185874938965, -0.06700503826141357, 0.6600251793861389, -0.06442582607269287]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.4863035976886749]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.41491562128067017],
    +        [-0.948100209236145],
    +        [-1.2559744119644165],
    +        [1.0097774267196655]
    +      ]
    +    >
    +  }
    +}

    By default, the metric will have a name which matches the string form of the given metric. You can give metrics semantic meaning by providing an explicit name:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +|> Axon.Loop.metric(:mean_absolute_error, "model error")
    +|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0607362 model error: 0.1516546
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.2577069401741028, 0.16761353611946106, 0.11587327718734741, 0.28539595007896423, -0.2071152776479721, -0.02039412036538124, -0.11152249574661255, 0.2389308214187622]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.1265750676393509, 0.6902633309364319, -0.10233660787343979, -0.2544037103652954, -0.26677289605140686, -0.31035077571868896, 0.3845033347606659, -0.33032187819480896]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.0, 0.16427761316299438, 0.02123815007507801, 0.22260485589504242]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.3859425485134125, 0.49959924817085266, -0.34108400344848633, 0.6222119331359863],
    +        [-0.43326857686042786, -0.42272067070007324, 0.04245679825544357, -0.4357914626598358],
    +        [-0.3065953850746155, 0.587925374507904, 0.2960704267024994, -0.31594154238700867],
    +        [-0.35595524311065674, 0.6649497747421265, 0.4832736849784851, 0.3025558590888977],
    +        [0.048333823680877686, -0.17023107409477234, 0.09139639884233475, -0.6511918902397156],
    +        [-0.12099027633666992, -0.02014642395079136, 0.025831595063209534, -0.09945832937955856],
    +        [0.3415437340736389, 0.41694650053977966, 0.24677544832229614, 0.06690020114183426],
    +        [-0.1977071762084961, 0.39345067739486694, 0.26068705320358276, 0.35502269864082336]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.8329466581344604]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.23763614892959595],
    +        [-1.031561255455017],
    +        [0.1092313677072525],
    +        [-0.7191486358642578]
    +      ]
    +    >
    +  }
    +}

    Axon's default aggregation behavior is to aggregate metrics with a running average; however, you can customize this behavior by specifying an explicit accumulation function. Built-in accumulation functions are :running_average and :running_sum:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +|> Axon.Loop.metric(:mean_absolute_error, "total error", :running_sum)
    +|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0688004 total error: 151.4876404
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.34921368956565857, 0.2217460423707962, 0.274880051612854, 0.016405446454882622, -0.11720903217792511, -0.20693546533584595, 0.14232252538204193, -0.07956698536872864]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.37851807475090027, -0.17135880887508392, -0.3878959119319916, 0.19248774647712708, 0.12453905493021011, -0.2750281095504761, 0.5614567995071411, 0.6186240315437317]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.28566694259643555, 0.27262070775032043, -0.2875851094722748, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.23161421716213226, 0.8222984671592712, 0.09437259286642075, -0.4825701117515564],
    +        [-0.38828352093696594, 0.6247998476028442, 0.5035035610198975, 0.0026152729988098145],
    +        [0.5202338099479675, 0.7906754612922668, 0.08624745905399323, -0.5285568833351135],
    +        [0.47950035333633423, -0.07571044564247131, 0.32921522855758667, -0.7011756896972656],
    +        [-0.3601212203502655, 0.44817543029785156, 0.13981425762176514, -0.01014477014541626],
    +        [-0.3157005310058594, -0.6309216618537903, 0.5622371435165405, 0.27447545528411865],
    +        [-0.5749425292015076, -0.5073797702789307, -0.3527824282646179, 0.08027392625808716],
    +        [-0.5331286191940308, 0.15432128310203552, -0.015716910362243652, -0.5225256681442261]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.8275660872459412]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.45810666680336],
    +        [-1.0092405080795288],
    +        [0.5322748422622681],
    +        [-0.5989866852760315]
    +      ]
    +    >
    +  }
    +}
    diff --git a/lstm_generation.html b/lstm_generation.html index 9115f81f..36603998 100644 --- a/lstm_generation.html +++ b/lstm_generation.html @@ -14,7 +14,7 @@ - + @@ -136,15 +136,15 @@

    -
    Mix.install([
    -  {:axon, "~> 0.3.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:exla, "~> 0.4.0"},
    -  {:req, "~> 0.3.1"}
    -])
    +
    Mix.install([
    +  {:axon, "~> 0.3.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:exla, "~> 0.4.0"},
    +  {:req, "~> 0.3.1"}
    +])
     
    -Nx.Defn.default_options(compiler: EXLA)
    -Nx.global_default_backend(EXLA.Backend)

    +Nx.Defn.default_options(compiler: EXLA) +Nx.global_default_backend(EXLA.Backend)

    @@ -159,43 +159,43 @@

    Using Project Gutenburg we can download a text books that are no longer protected under copywrite, so we can experiment with them.

    The one that we will use for this experiment is Alice's Adventures in Wonderland by Lewis Carroll. You can choose any other text or book that you like for this experiment.

    # Change the URL if you'd like to experiment with other books
     download_url = "https://www.gutenberg.org/files/11/11-0.txt"
     
    -book_text = Req.get!(download_url).body

    First of all, we need to normalize the content of the book. We are only interested in the sequence of English characters, periods and new lines. Also currently we don't care about the capitalization and things like apostrophe so we can remove all other unknown characters and downcase everything. We can use a regular expression for that.

    We can also convert the string into a list of characters so we can handle them easier. You will understand exactly why a bit further.

    normalized_book_text =
    +book_text = Req.get!(download_url).body

    First of all, we need to normalize the content of the book. We are only interested in the sequence of English characters, periods and new lines. Also currently we don't care about the capitalization and things like apostrophe so we can remove all other unknown characters and downcase everything. We can use a regular expression for that.

    We can also convert the string into a list of characters so we can handle them easier. You will understand exactly why a bit further.

    normalized_book_text =
       book_text
    -  |> String.downcase()
    -  |> String.replace(~r/[^a-z \.\n]/, "")
    -  |> String.to_charlist()

    We converted the text to a list of characters, where each character is a number (specifically, a Unicode code point). Lowercase English characters are represented with numbers between 97 = a and 122 = z, a space is 32 = [ ], a new line is 10 = \n and the period is 46 = ..

    So we should have 26 + 3 (= 29) characters in total. Let's see if that's true.

    normalized_book_text |> Enum.uniq() |> Enum.count()

    Since we want to use this 29 characters as possible values for each input in our neural network, we can re-map them to values between 0 and 28. So each specific neuron will indicate a specific character.

    # Extract all then unique characters we have and sort them for clarity
    -characters = normalized_book_text |> Enum.uniq() |> Enum.sort()
    -characters_count = Enum.count(characters)
    +  |> String.downcase()
    +  |> String.replace(~r/[^a-z \.\n]/, "")
    +  |> String.to_charlist()

    We converted the text to a list of characters, where each character is a number (specifically, a Unicode code point). Lowercase English characters are represented with numbers between 97 = a and 122 = z, a space is 32 = [ ], a new line is 10 = \n and the period is 46 = ..

    So we should have 26 + 3 (= 29) characters in total. Let's see if that's true.

    normalized_book_text |> Enum.uniq() |> Enum.count()

    Since we want to use this 29 characters as possible values for each input in our neural network, we can re-map them to values between 0 and 28. So each specific neuron will indicate a specific character.

    # Extract all then unique characters we have and sort them for clarity
    +characters = normalized_book_text |> Enum.uniq() |> Enum.sort()
    +characters_count = Enum.count(characters)
     
     # Create a mapping for every character
    -char_to_idx = characters |> Enum.with_index() |> Map.new()
    +char_to_idx = characters |> Enum.with_index() |> Map.new()
     # And a reverse mapping to convert back to characters
    -idx_to_char = characters |> Enum.with_index(&{&2, &1}) |> Map.new()
    +idx_to_char = characters |> Enum.with_index(&{&2, &1}) |> Map.new()
     
    -IO.puts("Total book characters: #{Enum.count(normalized_book_text)}")
    -IO.puts("Total unique characters: #{characters_count}")

    Now we need to create our training and testing data sets. But how?

    Our goal is to teach the machine what comes after a sequence of characters (usually). For example given the following sequence "Hello, My name i" the computer should be able to guess that the next character is probably "s".

    graph LR;
    +IO.puts("Total book characters: #{Enum.count(normalized_book_text)}")
    +IO.puts("Total unique characters: #{characters_count}")

    Now we need to create our training and testing data sets. But how?

    Our goal is to teach the machine what comes after a sequence of characters (usually). For example given the following sequence "Hello, My name i" the computer should be able to guess that the next character is probably "s".

    graph LR;
       A[Input: Hello my name i]-->NN[Neural Network]-->B[Output: s];

    Let's choose an arbitrary sequence length and create a data set from the book text. All we need to do is read X amount of characters from the book as the input and then read 1 more as the designated output.

    After doing all that, we also want to convert every character to it's index using the char_to_idx mapping that we have created before.

    Neural networks work best if you scale your inputs and outputs. In this case we are going to scale everything between 0 and 1 by dividing them by the number of unique characters that we have.

    And for the final step we will reshape it so we can use the data in our LSTM model.

    sequence_length = 100
     
     train_data =
       normalized_book_text
    -  |> Enum.map(&Map.fetch!(char_to_idx, &1))
    -  |> Enum.chunk_every(sequence_length, 1, :discard)
    +  |> Enum.map(&Map.fetch!(char_to_idx, &1))
    +  |> Enum.chunk_every(sequence_length, 1, :discard)
       # We don't want the last chunk since we don't have a prediction for it.
    -  |> Enum.drop(-1)
    -  |> Nx.tensor()
    -  |> Nx.divide(characters_count)
    -  |> Nx.reshape({:auto, sequence_length, 1})

    For our train results, We will do the same. Drop the first sequence_length characters and then convert them to the mapping. Additionally, we will do one-hot encoding.

    The reason we want to use one-hot encoding is that in our model we don't want to only return a character as the output. We want it to return the probability of each character for the output. This way we can decide if certain probability is good or not or even we can decide between multiple possible outputs or even discard everything if the network is not confident enough.

    In Nx, you can achieve this encoding by using this snippet

    Nx.tensor([
    -  [0],
    -  [1],
    -  [2]
    -])
    -|> Nx.equal(Nx.iota({1, 3}))

    To sum it up, Here is how we generate the train results.

    train_results =
    +  |> Enum.drop(-1)
    +  |> Nx.tensor()
    +  |> Nx.divide(characters_count)
    +  |> Nx.reshape({:auto, sequence_length, 1})

    For our train results, We will do the same. Drop the first sequence_length characters and then convert them to the mapping. Additionally, we will do one-hot encoding.

    The reason we want to use one-hot encoding is that in our model we don't want to only return a character as the output. We want it to return the probability of each character for the output. This way we can decide if certain probability is good or not or even we can decide between multiple possible outputs or even discard everything if the network is not confident enough.

    In Nx, you can achieve this encoding by using this snippet

    Nx.tensor([
    +  [0],
    +  [1],
    +  [2]
    +])
    +|> Nx.equal(Nx.iota({1, 3}))

    To sum it up, Here is how we generate the train results.

    train_results =
       normalized_book_text
    -  |> Enum.drop(sequence_length)
    -  |> Enum.map(&Map.fetch!(char_to_idx, &1))
    -  |> Nx.tensor()
    -  |> Nx.reshape({:auto, 1})
    -  |> Nx.equal(Nx.iota({1, characters_count}))

    + |> Enum.drop(sequence_length) + |> Enum.map(&Map.fetch!(char_to_idx, &1)) + |> Nx.tensor() + |> Nx.reshape({:auto, 1}) + |> Nx.equal(Nx.iota({1, characters_count}))

    @@ -204,34 +204,34 @@

    # As the input, we expect the sequence_length characters
     
     model =
    -  Axon.input("input_chars", shape: {nil, sequence_length, 1})
    +  Axon.input("input_chars", shape: {nil, sequence_length, 1})
       # The LSTM layer of our network
    -  |> Axon.lstm(256)
    +  |> Axon.lstm(256)
       # Selecting only the output from the LSTM Layer
    -  |> then(fn {out, _} -> out end)
    +  |> then(fn {out, _} -> out end)
       # Since we only want the last sequence in LSTM we will slice it and
       # select the last one
    -  |> Axon.nx(fn t -> t[[0..-1//1, -1]] end)
    +  |> Axon.nx(fn t -> t[[0..-1//1, -1]] end)
       # 20% dropout so we will not become too dependent on specific neurons
    -  |> Axon.dropout(rate: 0.2)
    +  |> Axon.dropout(rate: 0.2)
       # The output layer. One neuron for each character and using softmax,
       # as activation so every node represents a probability
    -  |> Axon.dense(characters_count, activation: :softmax)

    + |> Axon.dense(characters_count, activation: :softmax)

    Training the network

    To train the network, we will use Axon's Loop API. It is pretty straightforward.

    For the loss function we can use categorical cross-entropy since we are dealing with categories (each character) in our output. For the optimizer we can use Adam.

    We will train our network for 20 epochs. Note that we are working with a fair amount data, so it may take a long time unless you run it on a GPU.

    batch_size = 128
    -train_batches = Nx.to_batched(train_data, batch_size)
    -result_batches = Nx.to_batched(train_results, batch_size)
    +train_batches = Nx.to_batched(train_data, batch_size)
    +result_batches = Nx.to_batched(train_results, batch_size)
     
    -IO.puts("Total batches: #{Enum.count(train_batches)}")
    +IO.puts("Total batches: #{Enum.count(train_batches)}")
     
     params =
       model
    -  |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))
    -  |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 20, compiler: EXLA)
    +  |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))
    +  |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 20, compiler: EXLA)
     
     :ok

    @@ -239,32 +239,32 @@

    Generating text

    -

    Now we have a trained neural network, so we can start generating text with it! We just need to pass the initial sequence as the input to the network and select the most probable output. Axon.predict/3 will give us the output layer and then using Nx.argmax/1 we get the most confident neuron index, then simply convert that index back to its Unicode representation.

    generate_fn = fn model, params, init_seq ->
    +

    Now we have a trained neural network, so we can start generating text with it! We just need to pass the initial sequence as the input to the network and select the most probable output. Axon.predict/3 will give us the output layer and then using Nx.argmax/1 we get the most confident neuron index, then simply convert that index back to its Unicode representation.

    generate_fn = fn model, params, init_seq ->
       # The initial sequence that we want the network to complete for us.
       init_seq =
         init_seq
    -    |> String.trim()
    -    |> String.downcase()
    -    |> String.to_charlist()
    -    |> Enum.map(&Map.fetch!(char_to_idx, &1))
    +    |> String.trim()
    +    |> String.downcase()
    +    |> String.to_charlist()
    +    |> Enum.map(&Map.fetch!(char_to_idx, &1))
     
    -  Enum.reduce(1..100, init_seq, fn _, seq ->
    +  Enum.reduce(1..100, init_seq, fn _, seq ->
         init_seq =
           seq
    -      |> Enum.take(-sequence_length)
    -      |> Nx.tensor()
    -      |> Nx.divide(characters_count)
    -      |> Nx.reshape({1, sequence_length, 1})
    +      |> Enum.take(-sequence_length)
    +      |> Nx.tensor()
    +      |> Nx.divide(characters_count)
    +      |> Nx.reshape({1, sequence_length, 1})
     
         char =
    -      Axon.predict(model, params, init_seq)
    -      |> Nx.argmax()
    -      |> Nx.to_number()
    +      Axon.predict(model, params, init_seq)
    +      |> Nx.argmax()
    +      |> Nx.to_number()
     
    -    seq ++ [char]
    -  end)
    -  |> Enum.map(&Map.fetch!(idx_to_char, &1))
    -end
    +    seq ++ [char]
    +  end)
    +  |> Enum.map(&Map.fetch!(idx_to_char, &1))
    +end
     
     # The initial sequence that we want the network to complete for us.
     init_seq = """
    @@ -273,34 +273,34 @@ 

    cupboards as she fell past it. """ -generate_fn.(model, params, init_seq) |> IO.puts()

    +generate_fn.(model, params, init_seq) |> IO.puts()

    Multi LSTM layers

    We can improve our network by stacking multiple LSTM layers together. We just need to change our model and re-train our network.

    new_model =
    -  Axon.input("input_chars", shape: {nil, sequence_length, 1})
    -  |> Axon.lstm(256)
    -  |> then(fn {out, _} -> out end)
    -  |> Axon.dropout(rate: 0.2)
    +  Axon.input("input_chars", shape: {nil, sequence_length, 1})
    +  |> Axon.lstm(256)
    +  |> then(fn {out, _} -> out end)
    +  |> Axon.dropout(rate: 0.2)
       # This time we will pass all of the `out` to the next lstm layer.
       # We just need to slice the last one.
    -  |> Axon.lstm(256)
    -  |> then(fn {out, _} -> out end)
    -  |> Axon.nx(fn x -> x[[0..-1//1, -1]] end)
    -  |> Axon.dropout(rate: 0.2)
    -  |> Axon.dense(characters_count, activation: :softmax)

    Then we can train the network using the exact same code as before

    # Using a smaller batch size in this case will give the network more opportunity to learn
    +  |> Axon.lstm(256)
    +  |> then(fn {out, _} -> out end)
    +  |> Axon.nx(fn x -> x[[0..-1//1, -1]] end)
    +  |> Axon.dropout(rate: 0.2)
    +  |> Axon.dense(characters_count, activation: :softmax)

    Then we can train the network using the exact same code as before

    # Using a smaller batch size in this case will give the network more opportunity to learn
     batch_size = 64
    -train_batches = Nx.to_batched(train_data, batch_size)
    -result_batches = Nx.to_batched(train_results, batch_size)
    +train_batches = Nx.to_batched(train_data, batch_size)
    +result_batches = Nx.to_batched(train_results, batch_size)
     
    -IO.puts("Total batches: #{Enum.count(train_batches)}")
    +IO.puts("Total batches: #{Enum.count(train_batches)}")
     
     new_params =
       new_model
    -  |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))
    -  |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 50, compiler: EXLA)
    +  |> Axon.Loop.trainer(:categorical_cross_entropy, Polaris.Optimizers.adam(learning_rate: 0.001))
    +  |> Axon.Loop.run(Stream.zip(train_batches, result_batches), %{}, epochs: 50, compiler: EXLA)
     
     :ok

    @@ -308,7 +308,7 @@

    Generate text with the new network

    -
    generate_fn.(new_model, new_params, init_seq) |> IO.puts()

    As you may see, it improved a lot with this new model and the extensive training. This time it knows about rules like adding a space after period.

    +
    generate_fn.(new_model, new_params, init_seq) |> IO.puts()

    As you may see, it improved a lot with this new model and the extensive training. This time it knows about rules like adding a space after period.

    diff --git a/mnist.html b/mnist.html index 7d6e3d1a..55f69739 100644 --- a/mnist.html +++ b/mnist.html @@ -14,7 +14,7 @@ - + @@ -136,12 +136,12 @@

    -
    Mix.install([
    -  {:axon, "~> 0.3.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:exla, "~> 0.4.0"},
    -  {:req, "~> 0.3.1"}
    -])

    +
    Mix.install([
    +  {:axon, "~> 0.3.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:exla, "~> 0.4.0"},
    +  {:req, "~> 0.3.1"}
    +])

    @@ -154,30 +154,30 @@

    Retrieving and exploring the dataset

    The MNIST dataset is available for free online. Using Req we'll download both training images and training labels. Both train_images and train_labels are compressed binary data. Fortunately, Req takes care of the decompression for us.

    You can read more about the format of the ubyte files here. Each file starts with a magic number and some metadata. We can use binary pattern matching to extract the information we want. In this case we extract the raw binary images and labels.

    base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
    -%{body: train_images} = Req.get!(base_url <> "train-images-idx3-ubyte.gz")
    -%{body: train_labels} = Req.get!(base_url <> "train-labels-idx1-ubyte.gz")
    +%{body: train_images} = Req.get!(base_url <> "train-images-idx3-ubyte.gz")
    +%{body: train_labels} = Req.get!(base_url <> "train-labels-idx1-ubyte.gz")
     
    -<<_::32, n_images::32, n_rows::32, n_cols::32, images::binary>> = train_images
    -<<_::32, n_labels::32, labels::binary>> = train_labels

    We can easily read that binary data into a tensor using Nx.from_binary/2. Nx.from_binary/2 expects a raw binary and a data type. In this case, both images and labels are stored as unsigned 8-bit integers. We can start by parsing our images:

    images =
    +<<_::32, n_images::32, n_rows::32, n_cols::32, images::binary>> = train_images
    +<<_::32, n_labels::32, labels::binary>> = train_labels

    We can easily read that binary data into a tensor using Nx.from_binary/2. Nx.from_binary/2 expects a raw binary and a data type. In this case, both images and labels are stored as unsigned 8-bit integers. We can start by parsing our images:

    images =
       images
    -  |> Nx.from_binary({:u, 8})
    -  |> Nx.reshape({n_images, 1, n_rows, n_cols}, names: [:images, :channels, :height, :width])
    -  |> Nx.divide(255)

    Nx.from_binary/2 returns a flat tensor. Using Nx.reshape/3 we can manipulate this flat tensor into meaningful dimensions. Notice we also normalized the tensor by dividing the input data by 255. This squeezes the data between 0 and 1 which often leads to better behavior when training models. Now, let's see what these images look like:

    images[[images: 0..4]] |> Nx.to_heatmap()

    In the reshape operation above, we give each dimension of the tensor a name. This makes it much easier to do things like slicing, and helps make your code easier to understand. Here we slice the images dimension of the images tensor to obtain the first 5 training images. Then, we convert them to a heatmap for easy visualization.

    It's common to train neural networks in batches (actually correctly called minibatches, but you'll see batch and minibatch used interchangeably). We can "batch" our images into batches of 32 like this:

    images = Nx.to_batched(images, 32)

    Now, we'll need to get our labels into batches as well, but first we need to one-hot encode the labels. One-hot encoding converts input data from labels such as 3, 5, 7, etc. into vectors of 0's and a single 1 at the correct labels index. As an example, a label of: 3 gets converted to: [0, 0, 0, 1, 0, 0, 0, 0, 0, 0].

    targets =
    +  |> Nx.from_binary({:u, 8})
    +  |> Nx.reshape({n_images, 1, n_rows, n_cols}, names: [:images, :channels, :height, :width])
    +  |> Nx.divide(255)

    Nx.from_binary/2 returns a flat tensor. Using Nx.reshape/3 we can manipulate this flat tensor into meaningful dimensions. Notice we also normalized the tensor by dividing the input data by 255. This squeezes the data between 0 and 1 which often leads to better behavior when training models. Now, let's see what these images look like:

    images[[images: 0..4]] |> Nx.to_heatmap()

    In the reshape operation above, we give each dimension of the tensor a name. This makes it much easier to do things like slicing, and helps make your code easier to understand. Here we slice the images dimension of the images tensor to obtain the first 5 training images. Then, we convert them to a heatmap for easy visualization.

    It's common to train neural networks in batches (actually correctly called minibatches, but you'll see batch and minibatch used interchangeably). We can "batch" our images into batches of 32 like this:

    images = Nx.to_batched(images, 32)

    Now, we'll need to get our labels into batches as well, but first we need to one-hot encode the labels. One-hot encoding converts input data from labels such as 3, 5, 7, etc. into vectors of 0's and a single 1 at the correct labels index. As an example, a label of: 3 gets converted to: [0, 0, 0, 1, 0, 0, 0, 0, 0, 0].

    targets =
       labels
    -  |> Nx.from_binary({:u, 8})
    -  |> Nx.new_axis(-1)
    -  |> Nx.equal(Nx.tensor(Enum.to_list(0..9)))
    -  |> Nx.to_batched(32)

    + |> Nx.from_binary({:u, 8}) + |> Nx.new_axis(-1) + |> Nx.equal(Nx.tensor(Enum.to_list(0..9))) + |> Nx.to_batched(32)

    Defining the model

    Let's start by defining a simple model:

    model =
    -  Axon.input("input", shape: {nil, 1, 28, 28})
    -  |> Axon.flatten()
    -  |> Axon.dense(128, activation: :relu)
    -  |> Axon.dense(10, activation: :softmax)

    All Axon models start with an input layer to tell subsequent layers what shapes to expect. We then use Axon.flatten/2 which flattens the previous layer by squeezing all dimensions but the first dimension into a single dimension. Our model consists of 2 fully connected layers with 128 and 10 units respectively. The first layer uses :relu activation which returns max(0, input) element-wise. The final layer uses :softmax activation to return a probability distribution over the 10 labels [0 - 9].

    + Axon.input("input", shape: {nil, 1, 28, 28}) + |> Axon.flatten() + |> Axon.dense(128, activation: :relu) + |> Axon.dense(10, activation: :softmax)

    All Axon models start with an input layer to tell subsequent layers what shapes to expect. We then use Axon.flatten/2 which flattens the previous layer by squeezing all dimensions but the first dimension into a single dimension. Our model consists of 2 fully connected layers with 128 and 10 units respectively. The first layer uses :relu activation which returns max(0, input) element-wise. The final layer uses :softmax activation to return a probability distribution over the 10 labels [0 - 9].

    @@ -185,18 +185,18 @@

    In Axon we express the task of training using a declarative loop API. First, we need to specify a loss function and optimizer, there are many built-in variants to choose from. In this example, we'll use categorical cross-entropy and the Adam optimizer. We will also keep track of the accuracy metric. Finally, we run training loop passing our batched images and labels. We'll train for 10 epochs using the EXLA compiler.

    params =
       model
    -  |> Axon.Loop.trainer(:categorical_cross_entropy, :adam)
    -  |> Axon.Loop.metric(:accuracy, "Accuracy")
    -  |> Axon.Loop.run(Stream.zip(images, targets), %{}, epochs: 10, compiler: EXLA)

    + |> Axon.Loop.trainer(:categorical_cross_entropy, :adam) + |> Axon.Loop.metric(:accuracy, "Accuracy") + |> Axon.Loop.run(Stream.zip(images, targets), %{}, epochs: 10, compiler: EXLA)

    Prediction

    Now that we have the parameters from the training step, we can use them for predictions. -For this the Axon.predict can be used.

    first_batch = Enum.at(images, 0)
    +For this the Axon.predict can be used.

    first_batch = Enum.at(images, 0)
     
    -output = Axon.predict(model, params, first_batch)

    For each image, the model outputs probability distribution. This informs us how certain the model is about its prediction. Let's see the most probable digit for each image:

    Nx.argmax(output, axis: 1)

    If you look at the original images and you will see the predictions match the data!

    +
    output = Axon.predict(model, params, first_batch)

    For each image, the model outputs probability distribution. This informs us how certain the model is about its prediction. Let's see the most probable digit for each image:

    Nx.argmax(output, axis: 1)

    If you look at the original images and you will see the predictions match the data!

    diff --git a/mnist_autoencoder_using_kino.html b/mnist_autoencoder_using_kino.html index ba95bd75..a5a642e1 100644 --- a/mnist_autoencoder_using_kino.html +++ b/mnist_autoencoder_using_kino.html @@ -14,7 +14,7 @@ - + @@ -136,16 +136,16 @@

    -
    Mix.install([
    -  {:exla, "~> 0.4.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:axon, "~> 0.3.0"},
    -  {:req, "~> 0.3.1"},
    -  {:kino, "~> 0.7.0"},
    -  {:scidata, "~> 0.1.9"},
    -  {:stb_image, "~> 0.5.2"},
    -  {:table_rex, "~> 3.1.1"}
    -])

    +
    Mix.install([
    +  {:exla, "~> 0.4.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:axon, "~> 0.3.0"},
    +  {:req, "~> 0.3.1"},
    +  {:kino, "~> 0.7.0"},
    +  {:scidata, "~> 0.1.9"},
    +  {:stb_image, "~> 0.5.2"},
    +  {:table_rex, "~> 3.1.1"}
    +])

    @@ -158,26 +158,26 @@

    Data loading

    An autoencoder learns to recreate data it's seen in the dataset. For this notebook, we're going to try something simple: generating images of digits using the MNIST digit recognition dataset.

    Following along with the Fashion MNIST Autoencoder example, we'll use Scidata to download the MNIST dataset and then preprocess the data.

    # We're not going to use the labels so we'll ignore them
    -{train_images, _train_labels} = Scidata.MNIST.download()
    -{train_images_binary, type, shape} = train_images

    The shape tells us we have 60,000 images with a single channel of size 28x28.

    According to the MNIST website:

    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    Let's preprocess and normalize the data accordingly.

    train_images =
    +{train_images, _train_labels} = Scidata.MNIST.download()
    +{train_images_binary, type, shape} = train_images

    The shape tells us we have 60,000 images with a single channel of size 28x28.

    According to the MNIST website:

    Pixels are organized row-wise. Pixel values are 0 to 255. 0 means background (white), 255 means foreground (black).

    Let's preprocess and normalize the data accordingly.

    train_images =
       train_images_binary
    -  |> Nx.from_binary(type)
    +  |> Nx.from_binary(type)
       # Since pixels are organized row-wise, reshape into rows x columns
    -  |> Nx.reshape(shape, names: [:images, :channels, :height, :width])
    +  |> Nx.reshape(shape, names: [:images, :channels, :height, :width])
       # Normalize the pixel values to be between 0 and 1
    -  |> Nx.divide(255)
    # Make sure they look like numbers
    -train_images[[images: 0..2]] |> Nx.to_heatmap()

    That looks right! Let's repeat the process for the test set.

    {test_images, _train_labels} = Scidata.MNIST.download_test()
    -{test_images_binary, type, shape} = test_images
    +  |> Nx.divide(255)
    # Make sure they look like numbers
    +train_images[[images: 0..2]] |> Nx.to_heatmap()

    That looks right! Let's repeat the process for the test set.

    {test_images, _train_labels} = Scidata.MNIST.download_test()
    +{test_images_binary, type, shape} = test_images
     
     test_images =
       test_images_binary
    -  |> Nx.from_binary(type)
    +  |> Nx.from_binary(type)
       # Since pixels are organized row-wise, reshape into rows x columns
    -  |> Nx.reshape(shape, names: [:images, :channels, :height, :width])
    +  |> Nx.reshape(shape, names: [:images, :channels, :height, :width])
       # Normalize the pixel values to be between 0 and 1
    -  |> Nx.divide(255)
    +  |> Nx.divide(255)
     
    -test_images[[images: 0..2]] |> Nx.to_heatmap()

    +test_images[[images: 0..2]] |> Nx.to_heatmap()

    @@ -190,79 +190,79 @@

    The model

    model =
    -  Axon.input("image", shape: {nil, 1, 28, 28})
    +  Axon.input("image", shape: {nil, 1, 28, 28})
       # This is now 28*28*1 = 784
    -  |> Axon.flatten()
    +  |> Axon.flatten()
       # The encoder
    -  |> Axon.dense(256, activation: :relu)
    -  |> Axon.dense(128, activation: :relu)
    -  |> Axon.dense(64, activation: :relu)
    +  |> Axon.dense(256, activation: :relu)
    +  |> Axon.dense(128, activation: :relu)
    +  |> Axon.dense(64, activation: :relu)
       # Bottleneck layer
    -  |> Axon.dense(10, activation: :relu)
    +  |> Axon.dense(10, activation: :relu)
       # The decoder
    -  |> Axon.dense(64, activation: :relu)
    -  |> Axon.dense(128, activation: :relu)
    -  |> Axon.dense(256, activation: :relu)
    -  |> Axon.dense(784, activation: :sigmoid)
    +  |> Axon.dense(64, activation: :relu)
    +  |> Axon.dense(128, activation: :relu)
    +  |> Axon.dense(256, activation: :relu)
    +  |> Axon.dense(784, activation: :sigmoid)
       # Turn it back into a 28x28 single channel image
    -  |> Axon.reshape({:auto, 1, 28, 28})
    +  |> Axon.reshape({:auto, 1, 28, 28})
     
     # We can use Axon.Display to show us what each of the layers would look like
     # assuming we send in a batch of 4 images
    -Axon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()

    Checking our understanding, since the layers are all dense layers, the number of parameters should be input_features * output_features parameters for the weights + output_features parameters for the biases for each layer.

    This should match the Total Parameters output from Axon.Display (486298 parameters)

    # encoder
    -encoder_parameters = 784 * 256 + 256 + (256 * 128 + 128) + (128 * 64 + 64) + (64 * 10 + 10)
    -decoder_parameters = 10 * 64 + 64 + (64 * 128 + 128) + (128 * 256 + 256) + (256 * 784 + 784)
    +Axon.Display.as_table(model, Nx.template({4, 1, 28, 28}, :f32)) |> IO.puts()

    Checking our understanding, since the layers are all dense layers, the number of parameters should be input_features * output_features parameters for the weights + output_features parameters for the biases for each layer.

    This should match the Total Parameters output from Axon.Display (486298 parameters)

    # encoder
    +encoder_parameters = 784 * 256 + 256 + (256 * 128 + 128) + (128 * 64 + 64) + (64 * 10 + 10)
    +decoder_parameters = 10 * 64 + 64 + (64 * 128 + 128) + (128 * 256 + 256) + (256 * 784 + 784)
     total_parameters = encoder_parameters + decoder_parameters

    Training

    -

    With the model set up, we can now try to train the model. We'll use MSE loss to compare our reconstruction with the original

    We'll create the training input by turning our image list into batches of size 128 and then using the same image as both the input and the target. However, the input image will have noise added to it that the autoencoder will have to remove.

    For validation data, we'll use the test set and look at how the autoencoder does at reconstructing the test set to make sure we're not overfitting

    The function below adds some noise to the image by adding the image with gaussian noise scaled by a noise factor. We then have to make sure the pixel values are still within the 0..1.0 range.

    We have to define this function using defn so that Nx can optimize it. If we don't do this, adding noise will take a really long time, making our training loop very slow. See Nx.defn for more details. defn can only be used in a module so we'll define a little module to contain it.

    defmodule Noiser do
    +

    With the model set up, we can now try to train the model. We'll use MSE loss to compare our reconstruction with the original

    We'll create the training input by turning our image list into batches of size 128 and then using the same image as both the input and the target. However, the input image will have noise added to it that the autoencoder will have to remove.

    For validation data, we'll use the test set and look at how the autoencoder does at reconstructing the test set to make sure we're not overfitting

    The function below adds some noise to the image by adding the image with gaussian noise scaled by a noise factor. We then have to make sure the pixel values are still within the 0..1.0 range.

    We have to define this function using defn so that Nx can optimize it. If we don't do this, adding noise will take a really long time, making our training loop very slow. See Nx.defn for more details. defn can only be used in a module so we'll define a little module to contain it.

    defmodule Noiser do
       import Nx.Defn
     
       @noise_factor 0.4
     
    -  defn add_noise(images) do
    +  defn add_noise(images) do
         @noise_factor
    -    |> Nx.multiply(Nx.random_normal(images))
    -    |> Nx.add(images)
    -    |> Nx.clip(0.0, 1.0)
    -  end
    -end
    +    |> Nx.multiply(Nx.random_normal(images))
    +    |> Nx.add(images)
    +    |> Nx.clip(0.0, 1.0)
    +  end
    +end
     
    -add_noise = Nx.Defn.jit(&Noiser.add_noise/1, compiler: EXLA)
    batch_size = 128
    +add_noise = Nx.Defn.jit(&Noiser.add_noise/1, compiler: EXLA)
    batch_size = 128
     
     # The original image which is the target the network will trying to match
     batched_train_images =
       train_images
    -  |> Nx.to_batched(batch_size)
    +  |> Nx.to_batched(batch_size)
     
     batched_noisy_train_images =
       train_images
    -  |> Nx.to_batched(batch_size)
    +  |> Nx.to_batched(batch_size)
       # goes after to_batched so the noise is different every time
    -  |> Stream.map(add_noise)
    +  |> Stream.map(add_noise)
     
     # The noisy image is the input to the network
     # and the original image is the target it's trying to match
    -train_data = Stream.zip(batched_noisy_train_images, batched_train_images)
    +train_data = Stream.zip(batched_noisy_train_images, batched_train_images)
     
     batched_test_images =
       test_images
    -  |> Nx.to_batched(batch_size)
    +  |> Nx.to_batched(batch_size)
     
     batched_noisy_test_images =
       test_images
    -  |> Nx.to_batched(batch_size)
    -  |> Stream.map(add_noise)
    +  |> Nx.to_batched(batch_size)
    +  |> Stream.map(add_noise)
     
    -test_data = Stream.zip(batched_noisy_test_images, batched_test_images)

    Let's see what an element of the input and target look like

    {input_batch, target_batch} = Enum.at(train_data, 0)
    -{Nx.to_heatmap(input_batch[images: 0]), Nx.to_heatmap(target_batch[images: 0])}

    Looks right (and tricky). Let's see how the model does.

    params =
    +test_data = Stream.zip(batched_noisy_test_images, batched_test_images)

    Let's see what an element of the input and target look like

    {input_batch, target_batch} = Enum.at(train_data, 0)
    +{Nx.to_heatmap(input_batch[images: 0]), Nx.to_heatmap(target_batch[images: 0])}

    Looks right (and tricky). Let's see how the model does.

    params =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))
    -  |> Axon.Loop.validate(model, test_data)
    -  |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)
    +  |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))
    +  |> Axon.Loop.validate(model, test_data)
    +  |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)
     
     :ok

    Now that we have a model that theoretically has learned something, we'll see what it's learned by running it on some images from the test set. We'll use Kino to allow us to select the image from the test set to run the model against. To avoid losing the params that took a while to train, we'll create another branch so we can experiment with the params and stop execution when needed without having to retrain.

    @@ -271,70 +271,70 @@

    Evaluation

    A note on branching

    By default, everything in Livebook runs sequentially in a single process. Stopping a running cell aborts that process and consequently all its state is lost. A branching section copies everything from its parent and runs in a separate process. Thanks to this isolation, when we stop a cell in a branching section, only the state within that section is gone.

    Since we just spent a bunch of time training the model and don't want to lose that memory state as we continue to experiment, we create a branching section. This does add some memory overhead, but it's worth it so we can experiment without fear!

    To use Kino to give us an interactive tool to evaluate the model, we'll create a Kino.Frame that we can dynamically update. We'll also create a form using Kino.Control to allow the user to select which image from the test set they'd like to evaluate the model on. Finally Kino.Control.stream enables us to respond to changes in the user's selection when the user clicks the "Render" button.

    We can use Nx.concatenate to stack the images side by side for a prettier output.

    form =
    -  Kino.Control.form(
    -    [
    -      test_image_index: Kino.Input.number("Test Image Index", default: 0)
    -    ],
    +  Kino.Control.form(
    +    [
    +      test_image_index: Kino.Input.number("Test Image Index", default: 0)
    +    ],
         submit: "Render"
    -  )
    +  )
     
    -Kino.render(form)
    +Kino.render(form)
     
     form
    -|> Kino.Control.stream()
    -|> Kino.animate(fn %{data: %{test_image_index: image_index}} ->
    -  test_image = test_images[[images: image_index]] |> add_noise.()
    +|> Kino.Control.stream()
    +|> Kino.animate(fn %{data: %{test_image_index: image_index}} ->
    +  test_image = test_images[[images: image_index]] |> add_noise.()
     
       reconstructed_image =
         model
    -    |> Axon.predict(params, test_image)
    +    |> Axon.predict(params, test_image)
         # Get rid of the batch dimension
    -    |> Nx.squeeze(axes: [0])
    +    |> Nx.squeeze(axes: [0])
     
    -  combined_image = Nx.concatenate([test_image, reconstructed_image], axis: :width)
    -  Nx.to_heatmap(combined_image)
    -end)

    That looks pretty good!

    Note we used Kino.animate/2 which runs asynchronously so we don't block execution of the rest of the notebook.

    + combined_image = Nx.concatenate([test_image, reconstructed_image], axis: :width) + Nx.to_heatmap(combined_image) +end)

    That looks pretty good!

    Note we used Kino.animate/2 which runs asynchronously so we don't block execution of the rest of the notebook.

    A better training loop

    Note that we branch from the "Building a model" section since we only need the model definition for this section and not the previously trained model.

    It'd be nice to see how the model improves as it trains. In this section (also a branch since I plan to experiment and don't want to lose the execution state) we'll improve the training loop to use Kino to show us how it's doing.

    Axon.Loop.handle gives us a hook into various points of the training loop. We'll can use it with the :iteration_completed event to get a copy of the state of the params after some number of completed iterations of the training loop. By using those params to render an image in the test set, we can get a live view of the autoencoder learning to reconstruct its inputs.

    # A helper function to display the input and output side by side
    -combined_input_output = fn params, image_index ->
    -  test_image = test_images[[images: image_index]] |> add_noise.()
    -  reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])
    -  Nx.concatenate([test_image, reconstructed_image], axis: :width)
    -end
    +combined_input_output = fn params, image_index ->
    +  test_image = test_images[[images: image_index]] |> add_noise.()
    +  reconstructed_image = Axon.predict(model, params, test_image) |> Nx.squeeze(axes: [0])
    +  Nx.concatenate([test_image, reconstructed_image], axis: :width)
    +end
     
    -Nx.to_heatmap(combined_input_output.(params, 0))

    It'd also be nice to have a prettier version of the output. Let's convert the heatmap to a png to make that happen.

    image_to_kino = fn image ->
    +Nx.to_heatmap(combined_input_output.(params, 0))

    It'd also be nice to have a prettier version of the output. Let's convert the heatmap to a png to make that happen.

    image_to_kino = fn image ->
       image
    -  |> Nx.multiply(255)
    -  |> Nx.as_type(:u8)
    -  |> Nx.transpose(axes: [:height, :width, :channels])
    -  |> StbImage.from_nx()
    -  |> StbImage.resize(200, 400)
    -  |> StbImage.to_binary(:png)
    -  |> Kino.Image.new(:png)
    -end
    -
    -image_to_kino.(combined_input_output.(params, 0))

    Much nicer!

    Once again we'll use Kino.Frame for dynamically updating output:

    frame = Kino.Frame.new() |> Kino.render()
    -
    -render_example_handler = fn state ->
    -  Kino.Frame.append(frame, "Epoch: #{state.epoch}, Iteration: #{state.iteration}")
    +  |> Nx.multiply(255)
    +  |> Nx.as_type(:u8)
    +  |> Nx.transpose(axes: [:height, :width, :channels])
    +  |> StbImage.from_nx()
    +  |> StbImage.resize(200, 400)
    +  |> StbImage.to_binary(:png)
    +  |> Kino.Image.new(:png)
    +end
    +
    +image_to_kino.(combined_input_output.(params, 0))

    Much nicer!

    Once again we'll use Kino.Frame for dynamically updating output:

    frame = Kino.Frame.new() |> Kino.render()
    +
    +render_example_handler = fn state ->
    +  Kino.Frame.append(frame, "Epoch: #{state.epoch}, Iteration: #{state.iteration}")
       # state.step_state[:model_state] contains the model params when this event is fired
    -  params = state.step_state[:model_state]
    -  image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))
    -  image = combined_input_output.(params, image_index) |> image_to_kino.()
    -  Kino.Frame.append(frame, image)
    -  {:continue, state}
    -end
    +  params = state.step_state[:model_state]
    +  image_index = Enum.random(0..(Nx.axis_size(test_images, :images) - 1))
    +  image = combined_input_output.(params, image_index) |> image_to_kino.()
    +  Kino.Frame.append(frame, image)
    +  {:continue, state}
    +end
     
     params =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))
    -  |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)
    -  |> Axon.Loop.validate(model, test_data)
    -  |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)
    +  |> Axon.Loop.trainer(:mean_squared_error, Polaris.Optimizers.adamw(learning_rate: 0.001))
    +  |> Axon.Loop.handle(:iteration_completed, render_example_handler, every: 450)
    +  |> Axon.Loop.validate(model, test_data)
    +  |> Axon.Loop.run(train_data, %{}, epochs: 20, compiler: EXLA)
     
     :ok

    Awesome! We have a working denoising autoencoder that we can visualize getting better in 20 epochs!

    diff --git a/model_hooks.html b/model_hooks.html index ebc236cc..3e68bdef 100644 --- a/model_hooks.html +++ b/model_hooks.html @@ -14,7 +14,7 @@ - + @@ -136,289 +136,289 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Creating models with hooks

    Sometimes it's useful to inspect or visualize the values of intermediate layers in your model during the forward or backward pass. For example, it's common to visualize the gradients of activation functions to ensure your model is learning in a stable manner. Axon supports this functionality via model hooks.

    Model hooks are a means of unidirectional communication with an executing model. Hooks are unidirectional in the sense that you can only receive information from your model, and not send information back.

    Hooks are attached per-layer and can execute at 4 different points in model execution: on the pre-forward, forward, or backward pass of the model or during model initialization. You can also configure the same hook to execute on all 3 events. You can attach hooks to models using Axon.attach_hook/3:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_forward) end, on: :forward)
    -  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_init) end, on: :initialize)
    -  |> Axon.relu()
    -  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :relu) end, on: :forward)
    -
    -{init_fn, predict_fn} = Axon.build(model)
    -
    -input = Nx.iota({2, 4}, type: :f32)
    -params = init_fn.(input, %{})
    dense_init: %{
    -  "bias" => #Nx.Tensor<
    -    f32[8]
    -    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -  >,
    -  "kernel" => #Nx.Tensor<
    -    f32[4][8]
    -    [
    -      [0.6067318320274353, 0.5483129620552063, -0.05663269758224487, -0.48249542713165283, -0.18357598781585693, 0.6496620774269104, 0.4919115900993347, -0.08380156755447388],
    -      [-0.19745409488677979, 0.10483592748641968, -0.43387970328330994, -0.1041460633277893, -0.4129607081413269, -0.6482449769973755, 0.6696910262107849, 0.4690167307853699],
    -      [-0.18194729089736938, -0.4856645464897156, 0.39400774240493774, -0.28496378660202026, 0.32120805978775024, -0.41854584217071533, 0.5671316981315613, -0.21937215328216553],
    -      [0.4516749978065491, -0.23585206270217896, -0.6682141423225403, 0.4286096692085266, -0.14930623769760132, -0.3825327157974243, 0.2700549364089966, -0.3888852596282959]
    -    ]
    -  >
    -}
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][8]
    -      [
    -        [0.6067318320274353, 0.5483129620552063, -0.05663269758224487, -0.48249542713165283, -0.18357598781585693, 0.6496620774269104, 0.4919115900993347, -0.08380156755447388],
    -        [-0.19745409488677979, 0.10483592748641968, -0.43387970328330994, -0.1041460633277893, -0.4129607081413269, -0.6482449769973755, 0.6696910262107849, 0.4690167307853699],
    -        [-0.18194729089736938, -0.4856645464897156, 0.39400774240493774, -0.28496378660202026, 0.32120805978775024, -0.41854584217071533, 0.5671316981315613, -0.21937215328216553],
    -        [0.4516749978065491, -0.23585206270217896, -0.6682141423225403, 0.4286096692085266, -0.14930623769760132, -0.3825327157974243, 0.2700549364089966, -0.3888852596282959]
    -      ]
    -    >
    -  }
    -}

    Notice how during initialization the :dense_init hook fired and inspected the layer's parameters. Now when executing, you'll see outputs for :dense and :relu:

    predict_fn.(params, input)
    relu: #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [0.7936763167381287, 0.0, 0.0, 0.61175537109375, 0.0, 0.0, 2.614119291305542, 0.0],
    -    [3.5096981525421143, 0.0, 0.0, 0.0, 0.0, 0.0, 10.609275817871094, 0.0]
    -  ]
    ->
    #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [0.7936763167381287, 0.0, 0.0, 0.61175537109375, 0.0, 0.0, 2.614119291305542, 0.0],
    -    [3.5096981525421143, 0.0, 0.0, 0.0, 0.0, 0.0, 10.609275817871094, 0.0]
    -  ]
    ->

    It's important to note that hooks execute in the order they were attached to a layer. If you attach 2 hooks to the same layer which execute different functions on the same event, they will run in order:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook1) end, on: :forward)
    -  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook2) end, on: :forward)
    -  |> Axon.relu()
    -
    -{init_fn, predict_fn} = Axon.build(model)
    -params = init_fn.(input, %{})
    -
    -predict_fn.(params, input)
    hook2: #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [-0.6567458510398865, 2.2303993701934814, -1.540865421295166, -1.873536229133606, -2.386439085006714, -1.248870849609375, -2.9092607498168945, -0.1976098120212555],
    -    [2.4088101387023926, 5.939034461975098, -2.024522066116333, -7.58249568939209, -10.193460464477539, 0.33839887380599976, -10.836882591247559, 1.8173918724060059]
    -  ]
    ->
    #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [0.0, 2.2303993701934814, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    -    [2.4088101387023926, 5.939034461975098, 0.0, 0.0, 0.0, 0.33839887380599976, 0.0, 1.8173918724060059]
    -  ]
    ->

    Notice that :hook1 fires before :hook2.

    You can also specify a hook to fire on all events:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.attach_hook(&IO.inspect/1, on: :all)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    -
    -{init_fn, predict_fn} = Axon.build(model)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    - #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}

    On initialization:

    params = init_fn.(input, %{})
    %{
    -  "bias" => #Nx.Tensor<
    -    f32[8]
    -    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -  >,
    -  "kernel" => #Nx.Tensor<
    -    f32[4][8]
    -    [
    -      [0.2199305295944214, -0.05434012413024902, -0.07989239692687988, -0.4456246793270111, -0.2792319655418396, -0.1601254940032959, -0.6115692853927612, 0.37740427255630493],
    -      [-0.3606935739517212, 0.6091846823692322, -0.3203054368495941, -0.6252920031547546, -0.41500264406204224, -0.20729252696037292, -0.6763507127761841, -0.6776859164237976],
    -      [0.659041702747345, -0.615885317325592, -0.45865312218666077, 0.18774819374084473, 0.31994110345840454, -0.3055777847766876, -0.3537192642688751, 0.4297131896018982],
    -      [0.06112170219421387, 0.13321959972381592, 0.5566524863243103, -0.1115691065788269, -0.3557875156402588, -0.03118818998336792, -0.5788122415542603, -0.6988758444786072]
    -    ]
    -  >
    -}
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][8]
    -      [
    -        [0.2199305295944214, -0.05434012413024902, -0.07989239692687988, -0.4456246793270111, -0.2792319655418396, -0.1601254940032959, -0.6115692853927612, 0.37740427255630493],
    -        [-0.3606935739517212, 0.6091846823692322, -0.3203054368495941, -0.6252920031547546, -0.41500264406204224, -0.20729252696037292, -0.6763507127761841, -0.6776859164237976],
    -        [0.659041702747345, -0.615885317325592, -0.45865312218666077, 0.18774819374084473, 0.31994110345840454, -0.3055777847766876, -0.3537192642688751, 0.4297131896018982],
    -        [0.06112170219421387, 0.13321959972381592, 0.5566524863243103, -0.1115691065788269, -0.3557875156402588, -0.03118818998336792, -0.5788122415542603, -0.6988758444786072]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][1]
    -      [
    -        [0.3259686231613159],
    -        [0.4874255657196045],
    -        [0.6338149309158325],
    -        [0.4437469244003296],
    -        [-0.22870665788650513],
    -        [0.8108665943145752],
    -        [7.919073104858398e-4],
    -        [0.4469025135040283]
    -      ]
    -    >
    -  }
    -}

    On pre-forward and forward:

    predict_fn.(params, input)
    #Nx.Tensor<
    -  f32[2][4]
    -  [
    -    [0.0, 1.0, 2.0, 3.0],
    -    [4.0, 5.0, 6.0, 7.0]
    -  ]
    ->
    -#Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    -    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    -  ]
    ->
    -#Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    -    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    -  ]
    ->
    #Nx.Tensor<
    -  f32[2][1]
    -  [
    -    [0.6458775401115417],
    -    [1.1593825817108154]
    -  ]
    ->

    And on backwards:

    Nx.Defn.grad(fn params -> predict_fn.(params, input) end).(params)
    #Nx.Tensor<
    -  f32[2][4]
    -  [
    -    [0.0, 1.0, 2.0, 3.0],
    -    [4.0, 5.0, 6.0, 7.0]
    -  ]
    ->
    -#Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    -    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    -  ]
    ->
    -#Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    -    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    -  ]
    ->
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.6519372463226318, 0.4874255657196045, 0.6338149309158325, 0.0, 0.0, 0.0, 0.0, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][8]
    -      [
    -        [1.3038744926452637, 1.949702262878418, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    -        [1.9558117389678955, 2.4371278285980225, 0.6338149309158325, 0.0, 0.0, 0.0, 0.0, 0.0],
    -        [2.6077489852905273, 2.924553394317627, 1.267629861831665, 0.0, 0.0, 0.0, 0.0, 0.0],
    -        [3.259686231613159, 3.4119789600372314, 1.9014447927474976, 0.0, 0.0, 0.0, 0.0, 0.0]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [2.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][1]
    -      [
    -        [4.599111557006836],
    -        [0.06578820943832397],
    -        [0.43234577775001526],
    -        [0.0],
    -        [0.0],
    -        [0.0],
    -        [0.0],
    -        [0.0]
    -      ]
    -    >
    -  }
    -}

    Finally, you can specify hooks to only run when the model is built in a certain mode such as training and inference mode. You can read more about training and inference mode in Training and inference mode:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)
    -  |> Axon.relu()
    -
    -{init_fn, predict_fn} = Axon.build(model, mode: :train)
    -params = init_fn.(input, %{})
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][8]
    -      [
    -        [-0.13241732120513916, 0.6946331858634949, -0.6328000426292419, -0.684409499168396, -0.39569517970085144, -0.10005003213882446, 0.2501150965690613, 0.14561182260513306],
    -        [-0.5495109558105469, 0.459137499332428, -0.4059434235095978, -0.4489462077617645, -0.6331832408905029, 0.05011630058288574, -0.35836488008499146, -0.2661571800708771],
    -        [0.29260867834091187, 0.42186349630355835, 0.32596689462661743, -0.12340176105499268, 0.6767188906669617, 0.2658537030220032, 0.5745270848274231, 6.475448608398438e-4],
    -        [0.16781508922576904, 0.23747843503952026, -0.5311254858970642, 0.22617805004119873, -0.5153165459632874, 0.19729173183441162, -0.5706893801689148, -0.5531126260757446]
    -      ]
    -    >
    -  }
    -}

    The model was built in training mode so the hook will run:

    predict_fn.(params, input)
    #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [0.539151668548584, 2.0152997970581055, -1.347386121749878, -0.017215579748153687, -0.8256950974464417, 1.173698902130127, -0.9213788509368896, -1.9241999387741089],
    -    [-0.3468663692474365, 9.267749786376953, -6.322994232177734, -4.139533042907715, -4.295599460601807, 2.8265457153320312, -1.3390271663665771, -4.616241931915283]
    -  ]
    ->
    %{
    -  prediction: #Nx.Tensor<
    -    f32[2][8]
    -    [
    -      [0.539151668548584, 2.0152997970581055, 0.0, 0.0, 0.0, 1.173698902130127, 0.0, 0.0],
    -      [0.0, 9.267749786376953, 0.0, 0.0, 0.0, 2.8265457153320312, 0.0, 0.0]
    -    ]
    -  >,
    -  state: %{}
    -}
    {init_fn, predict_fn} = Axon.build(model, mode: :inference)
    -params = init_fn.(input, %{})
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][8]
    -      [
    -        [0.02683490514755249, -0.28041765093803406, 0.15839070081710815, 0.16674137115478516, -0.5444575548171997, -0.34951671957969666, 0.08247309923171997, 0.6700448393821716],
    -        [0.6001952290534973, -0.26907777786254883, 0.4580194354057312, -0.060002803802490234, -0.5385662317276001, -0.46773862838745117, 0.25804388523101807, -0.6824946999549866],
    -        [0.13328874111175537, -0.46421635150909424, -0.5192649960517883, -0.0429919958114624, 0.0771912932395935, -0.447194904088974, 0.30910569429397583, -0.6105270981788635],
    -        [0.5253992676734924, 0.41786473989486694, 0.6903378367424011, 0.6038702130317688, 0.06673228740692139, 0.4242702126502991, -0.6737087368965149, -0.6956207156181335]
    -      ]
    -    >
    -  }
    -}

    The model was built in inference mode so the hook will not run:

    predict_fn.(params, input)
    #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [2.4429705142974854, 0.056083738803863525, 1.490502953529358, 1.6656239032745361, 0.0, 0.0, 0.0, 0.0],
    -    [7.585843086242676, 0.0, 4.640434741973877, 4.336091041564941, 0.0, 0.0, 0.0, 0.0]
    -  ]
    ->
    + Axon.input("data") + |> Axon.dense(8) + |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_forward) end, on: :forward) + |> Axon.attach_hook(fn val -> IO.inspect(val, label: :dense_init) end, on: :initialize) + |> Axon.relu() + |> Axon.attach_hook(fn val -> IO.inspect(val, label: :relu) end, on: :forward) + +{init_fn, predict_fn} = Axon.build(model) + +input = Nx.iota({2, 4}, type: :f32) +params = init_fn.(input, %{})
    dense_init: %{
    +  "bias" => #Nx.Tensor<
    +    f32[8]
    +    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +  >,
    +  "kernel" => #Nx.Tensor<
    +    f32[4][8]
    +    [
    +      [0.6067318320274353, 0.5483129620552063, -0.05663269758224487, -0.48249542713165283, -0.18357598781585693, 0.6496620774269104, 0.4919115900993347, -0.08380156755447388],
    +      [-0.19745409488677979, 0.10483592748641968, -0.43387970328330994, -0.1041460633277893, -0.4129607081413269, -0.6482449769973755, 0.6696910262107849, 0.4690167307853699],
    +      [-0.18194729089736938, -0.4856645464897156, 0.39400774240493774, -0.28496378660202026, 0.32120805978775024, -0.41854584217071533, 0.5671316981315613, -0.21937215328216553],
    +      [0.4516749978065491, -0.23585206270217896, -0.6682141423225403, 0.4286096692085266, -0.14930623769760132, -0.3825327157974243, 0.2700549364089966, -0.3888852596282959]
    +    ]
    +  >
    +}
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][8]
    +      [
    +        [0.6067318320274353, 0.5483129620552063, -0.05663269758224487, -0.48249542713165283, -0.18357598781585693, 0.6496620774269104, 0.4919115900993347, -0.08380156755447388],
    +        [-0.19745409488677979, 0.10483592748641968, -0.43387970328330994, -0.1041460633277893, -0.4129607081413269, -0.6482449769973755, 0.6696910262107849, 0.4690167307853699],
    +        [-0.18194729089736938, -0.4856645464897156, 0.39400774240493774, -0.28496378660202026, 0.32120805978775024, -0.41854584217071533, 0.5671316981315613, -0.21937215328216553],
    +        [0.4516749978065491, -0.23585206270217896, -0.6682141423225403, 0.4286096692085266, -0.14930623769760132, -0.3825327157974243, 0.2700549364089966, -0.3888852596282959]
    +      ]
    +    >
    +  }
    +}

    Notice how during initialization the :dense_init hook fired and inspected the layer's parameters. Now when executing, you'll see outputs for :dense and :relu:

    predict_fn.(params, input)
    relu: #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [0.7936763167381287, 0.0, 0.0, 0.61175537109375, 0.0, 0.0, 2.614119291305542, 0.0],
    +    [3.5096981525421143, 0.0, 0.0, 0.0, 0.0, 0.0, 10.609275817871094, 0.0]
    +  ]
    +>
    #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [0.7936763167381287, 0.0, 0.0, 0.61175537109375, 0.0, 0.0, 2.614119291305542, 0.0],
    +    [3.5096981525421143, 0.0, 0.0, 0.0, 0.0, 0.0, 10.609275817871094, 0.0]
    +  ]
    +>

    It's important to note that hooks execute in the order they were attached to a layer. If you attach 2 hooks to the same layer which execute different functions on the same event, they will run in order:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook1) end, on: :forward)
    +  |> Axon.attach_hook(fn val -> IO.inspect(val, label: :hook2) end, on: :forward)
    +  |> Axon.relu()
    +
    +{init_fn, predict_fn} = Axon.build(model)
    +params = init_fn.(input, %{})
    +
    +predict_fn.(params, input)
    hook2: #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [-0.6567458510398865, 2.2303993701934814, -1.540865421295166, -1.873536229133606, -2.386439085006714, -1.248870849609375, -2.9092607498168945, -0.1976098120212555],
    +    [2.4088101387023926, 5.939034461975098, -2.024522066116333, -7.58249568939209, -10.193460464477539, 0.33839887380599976, -10.836882591247559, 1.8173918724060059]
    +  ]
    +>
    #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [0.0, 2.2303993701934814, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    +    [2.4088101387023926, 5.939034461975098, 0.0, 0.0, 0.0, 0.33839887380599976, 0.0, 1.8173918724060059]
    +  ]
    +>

    Notice that :hook1 fires before :hook2.

    You can also specify a hook to fire on all events:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.attach_hook(&IO.inspect/1, on: :all)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    +
    +{init_fn, predict_fn} = Axon.build(model)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    + #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}

    On initialization:

    params = init_fn.(input, %{})
    %{
    +  "bias" => #Nx.Tensor<
    +    f32[8]
    +    [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +  >,
    +  "kernel" => #Nx.Tensor<
    +    f32[4][8]
    +    [
    +      [0.2199305295944214, -0.05434012413024902, -0.07989239692687988, -0.4456246793270111, -0.2792319655418396, -0.1601254940032959, -0.6115692853927612, 0.37740427255630493],
    +      [-0.3606935739517212, 0.6091846823692322, -0.3203054368495941, -0.6252920031547546, -0.41500264406204224, -0.20729252696037292, -0.6763507127761841, -0.6776859164237976],
    +      [0.659041702747345, -0.615885317325592, -0.45865312218666077, 0.18774819374084473, 0.31994110345840454, -0.3055777847766876, -0.3537192642688751, 0.4297131896018982],
    +      [0.06112170219421387, 0.13321959972381592, 0.5566524863243103, -0.1115691065788269, -0.3557875156402588, -0.03118818998336792, -0.5788122415542603, -0.6988758444786072]
    +    ]
    +  >
    +}
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][8]
    +      [
    +        [0.2199305295944214, -0.05434012413024902, -0.07989239692687988, -0.4456246793270111, -0.2792319655418396, -0.1601254940032959, -0.6115692853927612, 0.37740427255630493],
    +        [-0.3606935739517212, 0.6091846823692322, -0.3203054368495941, -0.6252920031547546, -0.41500264406204224, -0.20729252696037292, -0.6763507127761841, -0.6776859164237976],
    +        [0.659041702747345, -0.615885317325592, -0.45865312218666077, 0.18774819374084473, 0.31994110345840454, -0.3055777847766876, -0.3537192642688751, 0.4297131896018982],
    +        [0.06112170219421387, 0.13321959972381592, 0.5566524863243103, -0.1115691065788269, -0.3557875156402588, -0.03118818998336792, -0.5788122415542603, -0.6988758444786072]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][1]
    +      [
    +        [0.3259686231613159],
    +        [0.4874255657196045],
    +        [0.6338149309158325],
    +        [0.4437469244003296],
    +        [-0.22870665788650513],
    +        [0.8108665943145752],
    +        [7.919073104858398e-4],
    +        [0.4469025135040283]
    +      ]
    +    >
    +  }
    +}

    On pre-forward and forward:

    predict_fn.(params, input)
    #Nx.Tensor<
    +  f32[2][4]
    +  [
    +    [0.0, 1.0, 2.0, 3.0],
    +    [4.0, 5.0, 6.0, 7.0]
    +  ]
    +>
    +#Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    +    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    +  ]
    +>
    +#Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    +    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    +  ]
    +>
    #Nx.Tensor<
    +  f32[2][1]
    +  [
    +    [0.6458775401115417],
    +    [1.1593825817108154]
    +  ]
    +>

    And on backwards:

    Nx.Defn.grad(fn params -> predict_fn.(params, input) end).(params)
    #Nx.Tensor<
    +  f32[2][4]
    +  [
    +    [0.0, 1.0, 2.0, 3.0],
    +    [4.0, 5.0, 6.0, 7.0]
    +  ]
    +>
    +#Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    +    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    +  ]
    +>
    +#Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [1.1407549381256104, -0.22292715311050415, 0.43234577775001526, -0.5845029354095459, -0.8424829840660095, -0.9120126962661743, -3.1202259063720703, -1.9148870706558228],
    +    [3.4583563804626465, 0.06578820943832397, -0.776448130607605, -4.563453197479248, -3.7628071308135986, -3.7287485599517822, -12.002032279968262, -4.19266414642334]
    +  ]
    +>
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.6519372463226318, 0.4874255657196045, 0.6338149309158325, 0.0, 0.0, 0.0, 0.0, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][8]
    +      [
    +        [1.3038744926452637, 1.949702262878418, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
    +        [1.9558117389678955, 2.4371278285980225, 0.6338149309158325, 0.0, 0.0, 0.0, 0.0, 0.0],
    +        [2.6077489852905273, 2.924553394317627, 1.267629861831665, 0.0, 0.0, 0.0, 0.0, 0.0],
    +        [3.259686231613159, 3.4119789600372314, 1.9014447927474976, 0.0, 0.0, 0.0, 0.0, 0.0]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [2.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][1]
    +      [
    +        [4.599111557006836],
    +        [0.06578820943832397],
    +        [0.43234577775001526],
    +        [0.0],
    +        [0.0],
    +        [0.0],
    +        [0.0],
    +        [0.0]
    +      ]
    +    >
    +  }
    +}

    Finally, you can specify hooks to only run when the model is built in a certain mode such as training and inference mode. You can read more about training and inference mode in Training and inference mode:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.attach_hook(&IO.inspect/1, on: :forward, mode: :train)
    +  |> Axon.relu()
    +
    +{init_fn, predict_fn} = Axon.build(model, mode: :train)
    +params = init_fn.(input, %{})
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][8]
    +      [
    +        [-0.13241732120513916, 0.6946331858634949, -0.6328000426292419, -0.684409499168396, -0.39569517970085144, -0.10005003213882446, 0.2501150965690613, 0.14561182260513306],
    +        [-0.5495109558105469, 0.459137499332428, -0.4059434235095978, -0.4489462077617645, -0.6331832408905029, 0.05011630058288574, -0.35836488008499146, -0.2661571800708771],
    +        [0.29260867834091187, 0.42186349630355835, 0.32596689462661743, -0.12340176105499268, 0.6767188906669617, 0.2658537030220032, 0.5745270848274231, 6.475448608398438e-4],
    +        [0.16781508922576904, 0.23747843503952026, -0.5311254858970642, 0.22617805004119873, -0.5153165459632874, 0.19729173183441162, -0.5706893801689148, -0.5531126260757446]
    +      ]
    +    >
    +  }
    +}

    The model was built in training mode so the hook will run:

    predict_fn.(params, input)
    #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [0.539151668548584, 2.0152997970581055, -1.347386121749878, -0.017215579748153687, -0.8256950974464417, 1.173698902130127, -0.9213788509368896, -1.9241999387741089],
    +    [-0.3468663692474365, 9.267749786376953, -6.322994232177734, -4.139533042907715, -4.295599460601807, 2.8265457153320312, -1.3390271663665771, -4.616241931915283]
    +  ]
    +>
    %{
    +  prediction: #Nx.Tensor<
    +    f32[2][8]
    +    [
    +      [0.539151668548584, 2.0152997970581055, 0.0, 0.0, 0.0, 1.173698902130127, 0.0, 0.0],
    +      [0.0, 9.267749786376953, 0.0, 0.0, 0.0, 2.8265457153320312, 0.0, 0.0]
    +    ]
    +  >,
    +  state: %{}
    +}
    {init_fn, predict_fn} = Axon.build(model, mode: :inference)
    +params = init_fn.(input, %{})
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][8]
    +      [
    +        [0.02683490514755249, -0.28041765093803406, 0.15839070081710815, 0.16674137115478516, -0.5444575548171997, -0.34951671957969666, 0.08247309923171997, 0.6700448393821716],
    +        [0.6001952290534973, -0.26907777786254883, 0.4580194354057312, -0.060002803802490234, -0.5385662317276001, -0.46773862838745117, 0.25804388523101807, -0.6824946999549866],
    +        [0.13328874111175537, -0.46421635150909424, -0.5192649960517883, -0.0429919958114624, 0.0771912932395935, -0.447194904088974, 0.30910569429397583, -0.6105270981788635],
    +        [0.5253992676734924, 0.41786473989486694, 0.6903378367424011, 0.6038702130317688, 0.06673228740692139, 0.4242702126502991, -0.6737087368965149, -0.6956207156181335]
    +      ]
    +    >
    +  }
    +}

    The model was built in inference mode so the hook will not run:

    predict_fn.(params, input)
    #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [2.4429705142974854, 0.056083738803863525, 1.490502953529358, 1.6656239032745361, 0.0, 0.0, 0.0, 0.0],
    +    [7.585843086242676, 0.0, 4.640434741973877, 4.336091041564941, 0.0, 0.0, 0.0, 0.0]
    +  ]
    +>
    diff --git a/multi_input_multi_output_models.html b/multi_input_multi_output_models.html index d5210b4f..3c6ed487 100644 --- a/multi_input_multi_output_models.html +++ b/multi_input_multi_output_models.html @@ -14,7 +14,7 @@ - + @@ -136,63 +136,63 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"},
    -  {:kino, ">= 0.9.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"},
    +  {:kino, ">= 0.9.0"}
    +])
    :ok

    Creating multi-input models

    -

    Sometimes your application necessitates the use of multiple inputs. To use multiple inputs in an Axon model, you just need to declare multiple inputs in your graph:

    input_1 = Axon.input("input_1")
    -input_2 = Axon.input("input_2")
    +

    Sometimes your application necessitates the use of multiple inputs. To use multiple inputs in an Axon model, you just need to declare multiple inputs in your graph:

    input_1 = Axon.input("input_1")
    +input_2 = Axon.input("input_2")
     
    -out = Axon.add(input_1, input_2)
    #Axon<
    -  inputs: %{"input_1" => nil, "input_2" => nil}
    +out = Axon.add(input_1, input_2)
    #Axon<
    +  inputs: %{"input_1" => nil, "input_2" => nil}
       outputs: "add_0"
       nodes: 4
    ->

    Notice when you inspect the model, it tells you what your models inputs are up front. You can also get metadata about your model inputs programmatically with Axon.get_inputs/1:

    Axon.get_inputs(out)
    %{"input_1" => nil, "input_2" => nil}

    Each input is uniquely named, so you can pass inputs by-name into inspection and execution functions with a map:

    inputs = %{
    -  "input_1" => Nx.template({2, 8}, :f32),
    -  "input_2" => Nx.template({2, 8}, :f32)
    -}
    +>

    Notice when you inspect the model, it tells you what your models inputs are up front. You can also get metadata about your model inputs programmatically with Axon.get_inputs/1:

    Axon.get_inputs(out)
    %{"input_1" => nil, "input_2" => nil}

    Each input is uniquely named, so you can pass inputs by-name into inspection and execution functions with a map:

    inputs = %{
    +  "input_1" => Nx.template({2, 8}, :f32),
    +  "input_2" => Nx.template({2, 8}, :f32)
    +}
     
    -Axon.Display.as_graph(out, inputs)
    graph TD;
    +Axon.Display.as_graph(out, inputs)
    graph TD;
     3[/"input_1 (:input) {2, 8}"/];
     4[/"input_2 (:input) {2, 8}"/];
     5["container_0 (:container) {{2, 8}, {2, 8}}"];
     6["add_0 (:add) {2, 8}"];
     5 --> 6;
     4 --> 5;
    -3 --> 5;
    {init_fn, predict_fn} = Axon.build(out)
    -params = init_fn.(inputs, %{})
    %{}
    inputs = %{
    -  "input_1" => Nx.iota({2, 8}, type: :f32),
    -  "input_2" => Nx.iota({2, 8}, type: :f32)
    -}
    -
    -predict_fn.(params, inputs)
    #Nx.Tensor<
    -  f32[2][8]
    -  [
    -    [0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0],
    -    [16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0]
    -  ]
    ->

    If you forget a required input, Axon will raise:

    predict_fn.(params, %{"input_1" => Nx.iota({2, 8}, type: :f32)})

    +3 --> 5;

    {init_fn, predict_fn} = Axon.build(out)
    +params = init_fn.(inputs, %{})
    %{}
    inputs = %{
    +  "input_1" => Nx.iota({2, 8}, type: :f32),
    +  "input_2" => Nx.iota({2, 8}, type: :f32)
    +}
    +
    +predict_fn.(params, inputs)
    #Nx.Tensor<
    +  f32[2][8]
    +  [
    +    [0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0],
    +    [16.0, 18.0, 20.0, 22.0, 24.0, 26.0, 28.0, 30.0]
    +  ]
    +>

    If you forget a required input, Axon will raise:

    predict_fn.(params, %{"input_1" => Nx.iota({2, 8}, type: :f32)})

    Creating multi-output models

    -

    Depending on your application, you might also want your model to have multiple outputs. You can achieve this by using Axon.container/2 to wrap multiple nodes into any supported Nx container:

    inp = Axon.input("data")
    +

    Depending on your application, you might also want your model to have multiple outputs. You can achieve this by using Axon.container/2 to wrap multiple nodes into any supported Nx container:

    inp = Axon.input("data")
     
    -x1 = inp |> Axon.dense(32) |> Axon.relu()
    -x2 = inp |> Axon.dense(64) |> Axon.relu()
    +x1 = inp |> Axon.dense(32) |> Axon.relu()
    +x2 = inp |> Axon.dense(64) |> Axon.relu()
     
    -out = Axon.container({x1, x2})
    #Axon<
    -  inputs: %{"data" => nil}
    +out = Axon.container({x1, x2})
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "container_0"
       nodes: 6
    ->
    template = Nx.template({2, 8}, :f32)
    -Axon.Display.as_graph(out, template)
    graph TD;
    +>
    template = Nx.template({2, 8}, :f32)
    +Axon.Display.as_graph(out, template)
    graph TD;
     7[/"data (:input) {2, 8}"/];
     8["dense_0 (:dense) {2, 32}"];
     9["relu_0 (:relu) {2, 32}"];
    @@ -204,80 +204,80 @@ 

    10 --> 11; 7 --> 10; 8 --> 9; -7 --> 8;

    When executed, containers will return a data structure which matches their input structure:

    {init_fn, predict_fn} = Axon.build(out)
    -params = init_fn.(template, %{})
    -predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    {#Nx.Tensor<
    -   f32[2][32]
    -   [
    -     [0.4453479051589966, 1.7394963502883911, 0.8509911298751831, 0.35142624378204346, 0.0, 0.0, 0.0, 3.942654609680176, 0.0, 0.0, 0.0, 0.6140655279159546, 0.0, 5.719906330108643, 1.1410939693450928, 0.0, 2.6871578693389893, 3.373258352279663, 0.0, 0.0, 0.0, 0.3058185875415802, 0.0, 0.0, 1.3737146854400635, 2.2648088932037354, 1.3570061922073364, 0.0, 0.05746358633041382, 0.0, 2.046199321746826, 4.884631156921387],
    -     [0.0, 2.0598671436309814, 2.4343056678771973, 3.2341041564941406, 0.0, 1.905256748199463, 0.0, 12.712749481201172, 0.0, 0.0, 0.0, 4.559232711791992, 0.0, 12.027459144592285, 0.8423471450805664, 0.0, 8.888325691223145, ...]
    -   ]
    - >,
    - #Nx.Tensor<
    -   f32[2][64]
    -   [
    -     [2.211906909942627, 0.937014639377594, 0.017132893204689026, 0.0, 3.617021083831787, 1.3125507831573486, 1.1870051622390747, 0.0, 0.0, 1.245000958442688, 1.5268664360046387, 0.0, 2.16796612739563, 0.8091188669204712, 0.45314761996269226, 0.0, 0.05176612734794617, 0.0, 5.982738018035889, 1.58057701587677, 0.0, 0.0, 1.2986125946044922, 0.8577098250389099, 0.0, 1.1064631938934326, 1.1242716312408447, 1.8777625560760498, 3.4422712326049805, 0.13321448862552643, 2.753225088119507, 0.0, 0.45021766424179077, 0.5664225816726685, 0.0, 0.0, 0.0, 1.5448659658432007, 0.0, 0.7237715721130371, 0.1693495213985443, 0.0, 0.719341516494751, 0.0, 0.0, 4.644839763641357, 0.0, 3.597681760787964, ...],
    +7 --> 8;

    When executed, containers will return a data structure which matches their input structure:

    {init_fn, predict_fn} = Axon.build(out)
    +params = init_fn.(template, %{})
    +predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    {#Nx.Tensor<
    +   f32[2][32]
    +   [
    +     [0.4453479051589966, 1.7394963502883911, 0.8509911298751831, 0.35142624378204346, 0.0, 0.0, 0.0, 3.942654609680176, 0.0, 0.0, 0.0, 0.6140655279159546, 0.0, 5.719906330108643, 1.1410939693450928, 0.0, 2.6871578693389893, 3.373258352279663, 0.0, 0.0, 0.0, 0.3058185875415802, 0.0, 0.0, 1.3737146854400635, 2.2648088932037354, 1.3570061922073364, 0.0, 0.05746358633041382, 0.0, 2.046199321746826, 4.884631156921387],
    +     [0.0, 2.0598671436309814, 2.4343056678771973, 3.2341041564941406, 0.0, 1.905256748199463, 0.0, 12.712749481201172, 0.0, 0.0, 0.0, 4.559232711791992, 0.0, 12.027459144592285, 0.8423471450805664, 0.0, 8.888325691223145, ...]
    +   ]
    + >,
    + #Nx.Tensor<
    +   f32[2][64]
    +   [
    +     [2.211906909942627, 0.937014639377594, 0.017132893204689026, 0.0, 3.617021083831787, 1.3125507831573486, 1.1870051622390747, 0.0, 0.0, 1.245000958442688, 1.5268664360046387, 0.0, 2.16796612739563, 0.8091188669204712, 0.45314761996269226, 0.0, 0.05176612734794617, 0.0, 5.982738018035889, 1.58057701587677, 0.0, 0.0, 1.2986125946044922, 0.8577098250389099, 0.0, 1.1064631938934326, 1.1242716312408447, 1.8777625560760498, 3.4422712326049805, 0.13321448862552643, 2.753225088119507, 0.0, 0.45021766424179077, 0.5664225816726685, 0.0, 0.0, 0.0, 1.5448659658432007, 0.0, 0.7237715721130371, 0.1693495213985443, 0.0, 0.719341516494751, 0.0, 0.0, 4.644839763641357, 0.0, 3.597681760787964, ...],
          ...
    -   ]
    - >}

    You can output maps as well:

    out = Axon.container(%{x1: x1, x2: x2})
    #Axon<
    -  inputs: %{"data" => nil}
    +   ]
    + >}

    You can output maps as well:

    out = Axon.container(%{x1: x1, x2: x2})
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "container_0"
       nodes: 6
    ->
    {init_fn, predict_fn} = Axon.build(out)
    -params = init_fn.(template, %{})
    -predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    %{
    -  x1: #Nx.Tensor<
    -    f32[2][32]
    -    [
    -      [1.4180752038955688, 1.8710994720458984, 0.0, 1.1198676824569702, 1.1357430219650269, 0.0, 0.0, 0.0, 2.907017469406128, 0.0, 0.3814663589000702, 0.0, 0.6225995421409607, 1.1952786445617676, 0.0, 3.6701409816741943, 3.581918716430664, 1.4750021696090698, 0.910987377166748, 0.0, 0.0, 0.0, 2.317782402038574, 0.8362345695495605, 0.0, 1.9256348609924316, 0.0, 0.0, 0.0, 1.8028252124786377, 1.448373556137085, 1.743951678276062],
    -      [3.7401936054229736, 2.494429349899292, 0.0, 0.9745509624481201, 8.416919708251953, 0.0, 0.6044515371322632, 0.0, 2.5829238891601562, 0.0, 3.592892646789551, 0.0, 0.0, 4.004939079284668, 0.0, 9.755555152893066, 5.3506879806518555, ...]
    -    ]
    -  >,
    -  x2: #Nx.Tensor<
    -    f32[2][64]
    -    [
    -      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5240116119384766, 0.0, 1.6478428840637207, 0.0, 0.0, 0.0, 0.0, 2.1685361862182617, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.5010783672332764, 0.36673399806022644, 0.0, 0.0, 0.5610344409942627, 1.9324723482131958, 0.39768826961517334, 0.0, 0.0, 0.0, 0.0, 0.0, 0.054594263434410095, 0.6123883128166199, 0.15942004323005676, 0.7058550715446472, 0.0, 1.860019326210022, 0.2499483972787857, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03381317853927612, ...],
    +>
    {init_fn, predict_fn} = Axon.build(out)
    +params = init_fn.(template, %{})
    +predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    %{
    +  x1: #Nx.Tensor<
    +    f32[2][32]
    +    [
    +      [1.4180752038955688, 1.8710994720458984, 0.0, 1.1198676824569702, 1.1357430219650269, 0.0, 0.0, 0.0, 2.907017469406128, 0.0, 0.3814663589000702, 0.0, 0.6225995421409607, 1.1952786445617676, 0.0, 3.6701409816741943, 3.581918716430664, 1.4750021696090698, 0.910987377166748, 0.0, 0.0, 0.0, 2.317782402038574, 0.8362345695495605, 0.0, 1.9256348609924316, 0.0, 0.0, 0.0, 1.8028252124786377, 1.448373556137085, 1.743951678276062],
    +      [3.7401936054229736, 2.494429349899292, 0.0, 0.9745509624481201, 8.416919708251953, 0.0, 0.6044515371322632, 0.0, 2.5829238891601562, 0.0, 3.592892646789551, 0.0, 0.0, 4.004939079284668, 0.0, 9.755555152893066, 5.3506879806518555, ...]
    +    ]
    +  >,
    +  x2: #Nx.Tensor<
    +    f32[2][64]
    +    [
    +      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.5240116119384766, 0.0, 1.6478428840637207, 0.0, 0.0, 0.0, 0.0, 2.1685361862182617, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.5010783672332764, 0.36673399806022644, 0.0, 0.0, 0.5610344409942627, 1.9324723482131958, 0.39768826961517334, 0.0, 0.0, 0.0, 0.0, 0.0, 0.054594263434410095, 0.6123883128166199, 0.15942004323005676, 0.7058550715446472, 0.0, 1.860019326210022, 0.2499483972787857, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03381317853927612, ...],
           ...
    -    ]
    -  >
    -}

    Containers even support arbitrary nesting:

    out = Axon.container({%{x1: {x1, x2}, x2: %{x1: x1, x2: {x2}}}})
    #Axon<
    -  inputs: %{"data" => nil}
    +    ]
    +  >
    +}

    Containers even support arbitrary nesting:

    out = Axon.container({%{x1: {x1, x2}, x2: %{x1: x1, x2: {x2}}}})
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "container_0"
       nodes: 6
    ->
    {init_fn, predict_fn} = Axon.build(out)
    -params = init_fn.(template, %{})
    -predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    {%{
    -   x1: {#Nx.Tensor<
    -      f32[2][32]
    -      [
    -        [1.7373675107955933, 0.0, 5.150482177734375, 0.544252336025238, 0.275376558303833, 0.0, 0.0, 0.0, 0.0, 1.7849855422973633, 0.7857151031494141, 0.2273893654346466, 0.2701767086982727, 2.321484327316284, 2.685051441192627, 0.0, 2.547382116317749, 0.0, 0.0, 0.0, 0.722919225692749, 2.3600289821624756, 1.4695687294006348, 0.0, 0.0, 0.0, 1.0015852451324463, 1.2762010097503662, 0.0, 0.07927703857421875, 0.0, 0.6216219663619995],
    -        [4.996878623962402, 0.0, 14.212154388427734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.517582356929779, 0.0, 2.036062479019165, 2.907236337661743, 8.515787124633789, 7.998186111450195, ...]
    -      ]
    -    >,
    -    #Nx.Tensor<
    -      f32[2][64]
    -      [
    -        [1.2057430744171143, 0.0, 0.0, 0.8717040419578552, 1.7653638124465942, 0.0, 0.0, 0.0, 0.0, 0.9921279549598694, 0.0, 1.0860291719436646, 2.3648557662963867, 0.0, 0.0, 2.0518181324005127, 1.6323723793029785, 0.9113610982894897, 1.6805293560028076, 0.8101096749305725, 0.0, 0.0, 0.0, 2.2150073051452637, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2320713996887207, 0.0, 2.553570508956909, 0.28632092475891113, 0.0, 0.0, 0.020383253693580627, 0.0, 0.2926883101463318, 1.3561311960220337, 0.8884503245353699, 3.1455295085906982, 0.0, 0.0, 1.237722635269165, 0.0, 2.149625539779663, ...],
    +>
    {init_fn, predict_fn} = Axon.build(out)
    +params = init_fn.(template, %{})
    +predict_fn.(params, Nx.iota({2, 8}, type: :f32))
    {%{
    +   x1: {#Nx.Tensor<
    +      f32[2][32]
    +      [
    +        [1.7373675107955933, 0.0, 5.150482177734375, 0.544252336025238, 0.275376558303833, 0.0, 0.0, 0.0, 0.0, 1.7849855422973633, 0.7857151031494141, 0.2273893654346466, 0.2701767086982727, 2.321484327316284, 2.685051441192627, 0.0, 2.547382116317749, 0.0, 0.0, 0.0, 0.722919225692749, 2.3600289821624756, 1.4695687294006348, 0.0, 0.0, 0.0, 1.0015852451324463, 1.2762010097503662, 0.0, 0.07927703857421875, 0.0, 0.6216219663619995],
    +        [4.996878623962402, 0.0, 14.212154388427734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.517582356929779, 0.0, 2.036062479019165, 2.907236337661743, 8.515787124633789, 7.998186111450195, ...]
    +      ]
    +    >,
    +    #Nx.Tensor<
    +      f32[2][64]
    +      [
    +        [1.2057430744171143, 0.0, 0.0, 0.8717040419578552, 1.7653638124465942, 0.0, 0.0, 0.0, 0.0, 0.9921279549598694, 0.0, 1.0860291719436646, 2.3648557662963867, 0.0, 0.0, 2.0518181324005127, 1.6323723793029785, 0.9113610982894897, 1.6805293560028076, 0.8101096749305725, 0.0, 0.0, 0.0, 2.2150073051452637, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2320713996887207, 0.0, 2.553570508956909, 0.28632092475891113, 0.0, 0.0, 0.020383253693580627, 0.0, 0.2926883101463318, 1.3561311960220337, 0.8884503245353699, 3.1455295085906982, 0.0, 0.0, 1.237722635269165, 0.0, 2.149625539779663, ...],
             ...
    -      ]
    -    >},
    -   x2: %{
    -     x1: #Nx.Tensor<
    -       f32[2][32]
    -       [
    -         [1.7373675107955933, 0.0, 5.150482177734375, 0.544252336025238, 0.275376558303833, 0.0, 0.0, 0.0, 0.0, 1.7849855422973633, 0.7857151031494141, 0.2273893654346466, 0.2701767086982727, 2.321484327316284, 2.685051441192627, 0.0, 2.547382116317749, 0.0, 0.0, 0.0, 0.722919225692749, 2.3600289821624756, 1.4695687294006348, 0.0, 0.0, 0.0, 1.0015852451324463, 1.2762010097503662, 0.0, 0.07927703857421875, 0.0, 0.6216219663619995],
    -         [4.996878623962402, 0.0, 14.212154388427734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.517582356929779, 0.0, 2.036062479019165, 2.907236337661743, 8.515787124633789, ...]
    -       ]
    -     >,
    -     x2: {#Nx.Tensor<
    -        f32[2][64]
    -        [
    -          [1.2057430744171143, 0.0, 0.0, 0.8717040419578552, 1.7653638124465942, 0.0, 0.0, 0.0, 0.0, 0.9921279549598694, 0.0, 1.0860291719436646, 2.3648557662963867, 0.0, 0.0, 2.0518181324005127, 1.6323723793029785, 0.9113610982894897, 1.6805293560028076, 0.8101096749305725, 0.0, 0.0, 0.0, 2.2150073051452637, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2320713996887207, 0.0, 2.553570508956909, 0.28632092475891113, 0.0, 0.0, 0.020383253693580627, 0.0, 0.2926883101463318, 1.3561311960220337, 0.8884503245353699, 3.1455295085906982, 0.0, 0.0, 1.237722635269165, ...],
    +      ]
    +    >},
    +   x2: %{
    +     x1: #Nx.Tensor<
    +       f32[2][32]
    +       [
    +         [1.7373675107955933, 0.0, 5.150482177734375, 0.544252336025238, 0.275376558303833, 0.0, 0.0, 0.0, 0.0, 1.7849855422973633, 0.7857151031494141, 0.2273893654346466, 0.2701767086982727, 2.321484327316284, 2.685051441192627, 0.0, 2.547382116317749, 0.0, 0.0, 0.0, 0.722919225692749, 2.3600289821624756, 1.4695687294006348, 0.0, 0.0, 0.0, 1.0015852451324463, 1.2762010097503662, 0.0, 0.07927703857421875, 0.0, 0.6216219663619995],
    +         [4.996878623962402, 0.0, 14.212154388427734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.517582356929779, 0.0, 2.036062479019165, 2.907236337661743, 8.515787124633789, ...]
    +       ]
    +     >,
    +     x2: {#Nx.Tensor<
    +        f32[2][64]
    +        [
    +          [1.2057430744171143, 0.0, 0.0, 0.8717040419578552, 1.7653638124465942, 0.0, 0.0, 0.0, 0.0, 0.9921279549598694, 0.0, 1.0860291719436646, 2.3648557662963867, 0.0, 0.0, 2.0518181324005127, 1.6323723793029785, 0.9113610982894897, 1.6805293560028076, 0.8101096749305725, 0.0, 0.0, 0.0, 2.2150073051452637, 0.0, 0.0, 0.0, 0.0, 0.0, 2.2320713996887207, 0.0, 2.553570508956909, 0.28632092475891113, 0.0, 0.0, 0.020383253693580627, 0.0, 0.2926883101463318, 1.3561311960220337, 0.8884503245353699, 3.1455295085906982, 0.0, 0.0, 1.237722635269165, ...],
               ...
    -        ]
    -      >}
    -   }
    - }}
    +
    ] + >} + } + }}
    diff --git a/onnx_to_axon.html b/onnx_to_axon.html index 37a6b383..84f8c6d4 100644 --- a/onnx_to_axon.html +++ b/onnx_to_axon.html @@ -14,7 +14,7 @@ - + @@ -136,21 +136,21 @@

    -
    Mix.install(
    -  [
    -    {:axon, ">= 0.5.0"},
    -    {:exla, ">= 0.5.0"},
    -    {:axon_onnx, ">= 0.4.0"},
    -    {:stb_image, ">= 0.6.0"},
    -    {:kino, ">= 0.9.0"},
    -    {:req, ">= 0.3.8"}
    -  ]
    +
    Mix.install(
    +  [
    +    {:axon, ">= 0.5.0"},
    +    {:exla, ">= 0.5.0"},
    +    {:axon_onnx, ">= 0.4.0"},
    +    {:stb_image, ">= 0.6.0"},
    +    {:kino, ">= 0.9.0"},
    +    {:req, ">= 0.3.8"}
    +  ]
       # for Nvidia GPU change to "cuda111" for CUDA 11.1+ or "cuda118" for CUDA 11.8
       # CUDA 12.x not supported by XLA
       # or you can put this value in ENV variables in Livebook settings
       # XLA_TARGET=cuda111
       # system_env: %{"XLA_TARGET" => xla_target}
    -)

    +)

    @@ -190,7 +190,7 @@

    contains the ONNX model file. This notebook assumes the output file location will be in models axon. Copy your ONNX model files into the models/onnx folder.

    This opinionated module presents a simple API for loading in an ONNX file and saving the converted Axon model in the provided directory. This API will allow us to -save multiple models pretty quickly.

    defmodule OnnxToAxon do
    +save multiple models pretty quickly.

    defmodule OnnxToAxon do
       @moduledoc """
       Helper module from ONNX to Axon.
       """
    @@ -203,40 +203,40 @@ 

    OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir) """ - def onnx_axon(path_to_onnx_file, path_to_axon_dir) do - axon_name = axon_name_from_onnx_path(path_to_onnx_file) - path_to_axon = Path.join(path_to_axon_dir, axon_name) - - {model, parameters} = AxonOnnx.import(path_to_onnx_file) - model_bytes = Axon.serialize(model, parameters) - File.write!(path_to_axon, model_bytes) - end - - defp axon_name_from_onnx_path(onnx_path) do - model_root = onnx_path |> Path.basename() |> Path.rootname() - "#{model_root}.axon" - end -end

    + def onnx_axon(path_to_onnx_file, path_to_axon_dir) do + axon_name = axon_name_from_onnx_path(path_to_onnx_file) + path_to_axon = Path.join(path_to_axon_dir, axon_name) + + {model, parameters} = AxonOnnx.import(path_to_onnx_file) + model_bytes = Axon.serialize(model, parameters) + File.write!(path_to_axon, model_bytes) + end + + defp axon_name_from_onnx_path(onnx_path) do + model_root = onnx_path |> Path.basename() |> Path.rootname() + "#{model_root}.axon" + end +end

    ONNX model

    -

    For this example, we'll use a couple ONNX models that have been saved in the Huggingface Hub.

    The ONNX models were trained in Fast.ai (PyTorch) using the following notebooks:

    To repeat this notebook, the onnx files for this notebook can be found on huggingface hub. Download the onnx models from:

    Download the files and place them in a directory of your choice. By default, we will assume you downloaded them to the same directory as the notebook:

    File.cd!(__DIR__)

    Now let's convert an ONNX model into Axon

    path_to_onnx_file = "cats_v_dogs.onnx"
    +

    For this example, we'll use a couple ONNX models that have been saved in the Huggingface Hub.

    The ONNX models were trained in Fast.ai (PyTorch) using the following notebooks:

    To repeat this notebook, the onnx files for this notebook can be found on huggingface hub. Download the onnx models from:

    Download the files and place them in a directory of your choice. By default, we will assume you downloaded them to the same directory as the notebook:

    File.cd!(__DIR__)

    Now let's convert an ONNX model into Axon

    path_to_onnx_file = "cats_v_dogs.onnx"
     path_to_axon_dir = "."
    -OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)
    path_to_onnx_file = "cat_dog_breeds.onnx"
    +OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)
    path_to_onnx_file = "cat_dog_breeds.onnx"
     path_to_axon_dir = "."
    -OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)

    +OnnxToAxon.onnx_axon(path_to_onnx_file, path_to_axon_dir)

    Inference on ONNX derived models

    -

    To run inference on the model, you'll need 10 images focused on cats or dogs. You can download the images used in training the model at:

    "https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet.tgz"

    Or you can find or use your own images. In this notebook, we are going to use the local copies of the Oxford Pets dataset that was used in training the model.

    Let's load the Axon model.

    cats_v_dogs = File.read!("cats_v_dogs.axon")
    -{cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)

    We need a tensor representation of an image. Let's start by looking at samples of -our data.

    File.read!("oxford-iiit-pet/images/havanese_71.jpg")
    -|> Kino.Image.new(:jpeg)

    To manipulate the images, we will use the StbImage library:

    {:ok, img} = StbImage.read_file("oxford-iiit-pet/images/havanese_71.jpg")
    -%StbImage{data: binary, shape: shape, type: type} = StbImage.resize(img, 224, 224)

    Now let's work on a batch of images and convert them to tensors. Here are the images we will work with:

    file_names = [
    +

    To run inference on the model, you'll need 10 images focused on cats or dogs. You can download the images used in training the model at:

    "https://s3.amazonaws.com/fast-ai-imageclas/oxford-iiit-pet.tgz"

    Or you can find or use your own images. In this notebook, we are going to use the local copies of the Oxford Pets dataset that was used in training the model.

    Let's load the Axon model.

    cats_v_dogs = File.read!("cats_v_dogs.axon")
    +{cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)

    We need a tensor representation of an image. Let's start by looking at samples of +our data.

    File.read!("oxford-iiit-pet/images/havanese_71.jpg")
    +|> Kino.Image.new(:jpeg)

    To manipulate the images, we will use the StbImage library:

    {:ok, img} = StbImage.read_file("oxford-iiit-pet/images/havanese_71.jpg")
    +%StbImage{data: binary, shape: shape, type: type} = StbImage.resize(img, 224, 224)

    Now let's work on a batch of images and convert them to tensors. Here are the images we will work with:

    file_names = [
       "havanese_71.jpg",
       "yorkshire_terrier_9.jpg",
       "Sphynx_206.jpg",
    @@ -247,18 +247,18 @@ 

    "British_Shorthair_122.jpg", "Russian_Blue_20.jpg", "boxer_99.jpg" -]

    Next we resize the images:

    resized_images =
    -  Enum.map(file_names, fn file_name ->
    -    ("oxford-iiit-pet/images/" <> file_name)
    -    |> IO.inspect(label: file_name)
    -    |> StbImage.read_file!()
    -    |> StbImage.resize(224, 224)
    -  end)

    And finally convert them into tensors by using StbImage.to_nx/1. The created tensor will have three axes, named :height, :width, and :channel respectively. Our goal is to stack the tensors, then normalize and transpose their axes to the order expected by the neural network:

    img_tensors =
    +]

    Next we resize the images:

    resized_images =
    +  Enum.map(file_names, fn file_name ->
    +    ("oxford-iiit-pet/images/" <> file_name)
    +    |> IO.inspect(label: file_name)
    +    |> StbImage.read_file!()
    +    |> StbImage.resize(224, 224)
    +  end)

    And finally convert them into tensors by using StbImage.to_nx/1. The created tensor will have three axes, named :height, :width, and :channel respectively. Our goal is to stack the tensors, then normalize and transpose their axes to the order expected by the neural network:

    img_tensors =
       resized_images
    -  |> Enum.map(&StbImage.to_nx/1)
    -  |> Nx.stack(name: :index)
    -  |> Nx.divide(255.0)
    -  |> Nx.transpose(axes: [:index, :channels, :height, :width])

    With our input data, it is finally time to work on predictions. First let's define a helper module:

    defmodule Predictions do
    +  |> Enum.map(&StbImage.to_nx/1)
    +  |> Nx.stack(name: :index)
    +  |> Nx.divide(255.0)
    +  |> Nx.transpose(axes: [:index, :channels, :height, :width])

    With our input data, it is finally time to work on predictions. First let's define a helper module:

    defmodule Predictions do
       @doc """
       When provided a Tensor of single label predictions, returns the best vocabulary match for
       each row in the prediction tensor.
    @@ -269,26 +269,26 @@ 

    # ["dog", "cat", "dog"] """ - def single_label_classification(predictions_batch, vocabulary) do - IO.inspect(Nx.shape(predictions_batch), label: "predictions batch shape") + def single_label_classification(predictions_batch, vocabulary) do + IO.inspect(Nx.shape(predictions_batch), label: "predictions batch shape") - for prediction_tensor <- Nx.to_batched(predictions_batch, 1) do - {_prediction_value, prediction_label} = + for prediction_tensor <- Nx.to_batched(predictions_batch, 1) do + {_prediction_value, prediction_label} = prediction_tensor - |> Nx.to_flat_list() - |> Enum.zip(vocabulary) - |> Enum.max() + |> Nx.to_flat_list() + |> Enum.zip(vocabulary) + |> Enum.max() prediction_label - end - end -end

    Now we deserialize the model

    {cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)

    run a prediction using the EXLA compiler for performance

    tensor_of_predictions =
    -  Axon.predict(cats_v_dogs_model, cats_v_dogs_params, img_tensors, compiler: EXLA)

    and finally retrieve the predicted label

    dog_cat_vocabulary = [
    +    end
    +  end
    +end

    Now we deserialize the model

    {cats_v_dogs_model, cats_v_dogs_params} = Axon.deserialize(cats_v_dogs)

    run a prediction using the EXLA compiler for performance

    tensor_of_predictions =
    +  Axon.predict(cats_v_dogs_model, cats_v_dogs_params, img_tensors, compiler: EXLA)

    and finally retrieve the predicted label

    dog_cat_vocabulary = [
       "dog",
       "cat"
    -]
    +]
     
    -Predictions.single_label_classification(tensor_of_predictions, dog_cat_vocabulary)

    Let's repeat the above process for the dog and cat breed model.

    cat_dog_vocabulary = [
    +Predictions.single_label_classification(tensor_of_predictions, dog_cat_vocabulary)

    Let's repeat the above process for the dog and cat breed model.

    cat_dog_vocabulary = [
       "abyssinian",
       "american_bulldog",
       "american_pit_bull_terrier",
    @@ -326,9 +326,9 @@ 

    "staffordshire_bull_terrier", "wheaten_terrier", "yorkshire_terrier" -]

    cat_dog_breeds = File.read!("cat_dog_breeds.axon")
    -{cat_dog_breeds_model, cat_dog_breeds_params} = Axon.deserialize(cat_dog_breeds)
    Axon.predict(cat_dog_breeds_model, cat_dog_breeds_params, img_tensors)
    -|> Predictions.single_label_classification(cat_dog_vocabulary)

    For cat and dog breeds, the model performed pretty well, but it was not perfect.

    +
    ]
    cat_dog_breeds = File.read!("cat_dog_breeds.axon")
    +{cat_dog_breeds_model, cat_dog_breeds_params} = Axon.deserialize(cat_dog_breeds)
    Axon.predict(cat_dog_breeds_model, cat_dog_breeds_params, img_tensors)
    +|> Predictions.single_label_classification(cat_dog_vocabulary)

    For cat and dog breeds, the model performed pretty well, but it was not perfect.

    diff --git a/search.html b/search.html index eaee9be5..a1838af7 100644 --- a/search.html +++ b/search.html @@ -16,7 +16,7 @@ - + @@ -128,7 +128,7 @@

    - +

    diff --git a/sequential_models.html b/sequential_models.html index fc0e2c3e..5f523a78 100644 --- a/sequential_models.html +++ b/sequential_models.html @@ -14,7 +14,7 @@ - + @@ -136,30 +136,30 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"},
    -  {:kino, ">= 0.9.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"},
    +  {:kino, ">= 0.9.0"}
    +])
    :ok

    Creating a sequential model

    In the last guide, you created a simple identity model which just returned the input. Of course, you would never actually use Axon for such purposes. You want to create real neural networks!

    In equivalent frameworks in the Python ecosystem such as Keras and PyTorch, there is a concept of sequential models. Sequential models are named after the sequential nature in which data flows through them. Sequential models transform the input with sequential, successive transformations.

    If you're an experienced Elixir programmer, this paradigm of sequential transformations might sound a lot like what happens when using the pipe (|>) operator. In Elixir, it's common to see code blocks like:

    list
    -|> Enum.map(fn x -> x + 1 end)
    -|> Enum.filter(&rem(&1, 2) == 0)
    -|> Enum.count()

    The snippet above passes list through a sequence of transformations. You can apply this same paradigm in Axon to create sequential models. In fact, creating sequential models is so natural with Elixir's pipe operator, that Axon does not need a distinct sequential construct. To create a sequential model, you just pass Axon models through successive transformations in the Axon API:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(32)
    -  |> Axon.activation(:relu)
    -  |> Axon.dropout(rate: 0.5)
    -  |> Axon.dense(1)
    -  |> Axon.activation(:softmax)
    #Axon<
    -  inputs: %{"data" => nil}
    +|> Enum.map(fn x -> x + 1 end)
    +|> Enum.filter(&rem(&1, 2) == 0)
    +|> Enum.count()

    The snippet above passes list through a sequence of transformations. You can apply this same paradigm in Axon to create sequential models. In fact, creating sequential models is so natural with Elixir's pipe operator, that Axon does not need a distinct sequential construct. To create a sequential model, you just pass Axon models through successive transformations in the Axon API:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(32)
    +  |> Axon.activation(:relu)
    +  |> Axon.dropout(rate: 0.5)
    +  |> Axon.dense(1)
    +  |> Axon.activation(:softmax)
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "softmax_0"
       nodes: 6
    ->

    If you visualize this model, it's easy to see how data flows sequentially through it:

    template = Nx.template({2, 16}, :f32)
    -Axon.Display.as_graph(model, template)
    graph TD;
    +>

    If you visualize this model, it's easy to see how data flows sequentially through it:

    template = Nx.template({2, 16}, :f32)
    +Axon.Display.as_graph(model, template)
    graph TD;
     3[/"data (:input) {2, 16}"/];
     4["dense_0 (:dense) {2, 32}"];
     5["relu_0 (:relu) {2, 32}"];
    @@ -170,72 +170,72 @@ 

    6 --> 7; 5 --> 6; 4 --> 5; -3 --> 4;

    Your model is more involved and as a result so is the execution graph! Now, using the same constructs from the last section, you can build and run your model:

    {init_fn, predict_fn} = Axon.build(model)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    - #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}
    params = init_fn.(template, %{})
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[32]
    -      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[16][32]
    -      [
    -        [0.21433714032173157, -0.04525795578956604, 0.32405969500541687, -0.06933712959289551, -0.24735209345817566, 0.1957167088985443, -0.2714379131793976, -0.34026962518692017, 0.03781759738922119, -0.16317953169345856, -0.1272507756948471, -0.08459293842315674, 0.20401403307914734, 0.26613888144493103, -0.3234696388244629, 0.295791357755661, 0.29850414395332336, -0.22220905125141144, -0.33034151792526245, 0.32582345604896545, -0.19104702770709991, -0.3434463143348694, 0.031930625438690186, 0.32875487208366394, 0.17335721850395203, -0.0336279571056366, -0.02203202247619629, -0.30805233120918274, 0.01472097635269165, 0.293319970369339, 0.17995354533195496, 0.09916016459465027],
    -        [-0.33202630281448364, -0.09507006406784058, -0.12178492546081543, -0.005500674247741699, -0.24997547268867493, 0.31693217158317566, 0.31857630610466003, 0.13662374019622803, 0.11216515302658081, -0.2711845338344574, -0.18932600319385529, -0.10278302431106567, -0.1910824328660965, -0.15239068865776062, 0.2373746931552887, ...],
    +3 --> 4;

    Your model is more involved and as a result so is the execution graph! Now, using the same constructs from the last section, you can build and run your model:

    {init_fn, predict_fn} = Axon.build(model)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    + #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}
    params = init_fn.(template, %{})
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[32]
    +      [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[16][32]
    +      [
    +        [0.21433714032173157, -0.04525795578956604, 0.32405969500541687, -0.06933712959289551, -0.24735209345817566, 0.1957167088985443, -0.2714379131793976, -0.34026962518692017, 0.03781759738922119, -0.16317953169345856, -0.1272507756948471, -0.08459293842315674, 0.20401403307914734, 0.26613888144493103, -0.3234696388244629, 0.295791357755661, 0.29850414395332336, -0.22220905125141144, -0.33034151792526245, 0.32582345604896545, -0.19104702770709991, -0.3434463143348694, 0.031930625438690186, 0.32875487208366394, 0.17335721850395203, -0.0336279571056366, -0.02203202247619629, -0.30805233120918274, 0.01472097635269165, 0.293319970369339, 0.17995354533195496, 0.09916016459465027],
    +        [-0.33202630281448364, -0.09507006406784058, -0.12178492546081543, -0.005500674247741699, -0.24997547268867493, 0.31693217158317566, 0.31857630610466003, 0.13662374019622803, 0.11216515302658081, -0.2711845338344574, -0.18932600319385529, -0.10278302431106567, -0.1910824328660965, -0.15239068865776062, 0.2373746931552887, ...],
             ...
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.0]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[32][1]
    -      [
    -        [-0.22355356812477112],
    -        [0.09599864482879639],
    -        [0.06676572561264038],
    -        [-0.06866732239723206],
    -        [0.1822824478149414],
    -        [0.1860904097557068],
    -        [-0.3795042335987091],
    -        [-0.18182222545146942],
    -        [0.4170041084289551],
    -        [0.1812545657157898],
    -        [0.18777817487716675],
    -        [-0.15454193949699402],
    -        [0.16937363147735596],
    -        [-0.007449895143508911],
    -        [0.421792209148407],
    -        [-0.3314356803894043],
    -        [-0.29834187030792236],
    -        [0.3285354971885681],
    -        [0.034806013107299805],
    -        [0.1091541051864624],
    -        [-0.385672390460968],
    -        [0.004853636026382446],
    -        [0.3387643098831177],
    -        [0.03320261836051941],
    -        [0.3905656933784485],
    -        [-0.3835979700088501],
    -        [-0.06302008032798767],
    -        [0.03648516535758972],
    -        [0.24170255661010742],
    -        [0.01687285304069519],
    -        [-0.017035305500030518],
    -        [-0.2674438953399658]
    -      ]
    -    >
    -  }
    -}

    Wow! Notice that this model actually has trainable parameters. You can see that the parameter map is just a regular Elixir map. Each top-level entry maps to a layer with a key corresponding to that layer's name and a value corresponding to that layer's trainable parameters. Each layer's individual trainable parameters are given layer-specific names and map directly to Nx tensors.

    Now you can use these params with your predict_fn:

    predict_fn.(params, Nx.iota({2, 16}, type: :f32))
    #Nx.Tensor<
    -  f32[2][1]
    -  [
    -    [1.0],
    -    [1.0]
    -  ]
    ->

    And voila! You've successfully created and used a sequential model in Axon!

    + ] + > + }, + "dense_1" => %{ + "bias" => #Nx.Tensor< + f32[1] + [0.0] + >, + "kernel" => #Nx.Tensor< + f32[32][1] + [ + [-0.22355356812477112], + [0.09599864482879639], + [0.06676572561264038], + [-0.06866732239723206], + [0.1822824478149414], + [0.1860904097557068], + [-0.3795042335987091], + [-0.18182222545146942], + [0.4170041084289551], + [0.1812545657157898], + [0.18777817487716675], + [-0.15454193949699402], + [0.16937363147735596], + [-0.007449895143508911], + [0.421792209148407], + [-0.3314356803894043], + [-0.29834187030792236], + [0.3285354971885681], + [0.034806013107299805], + [0.1091541051864624], + [-0.385672390460968], + [0.004853636026382446], + [0.3387643098831177], + [0.03320261836051941], + [0.3905656933784485], + [-0.3835979700088501], + [-0.06302008032798767], + [0.03648516535758972], + [0.24170255661010742], + [0.01687285304069519], + [-0.017035305500030518], + [-0.2674438953399658] + ] + > + } +}

    Wow! Notice that this model actually has trainable parameters. You can see that the parameter map is just a regular Elixir map. Each top-level entry maps to a layer with a key corresponding to that layer's name and a value corresponding to that layer's trainable parameters. Each layer's individual trainable parameters are given layer-specific names and map directly to Nx tensors.

    Now you can use these params with your predict_fn:

    predict_fn.(params, Nx.iota({2, 16}, type: :f32))
    #Nx.Tensor<
    +  f32[2][1]
    +  [
    +    [1.0],
    +    [1.0]
    +  ]
    +>

    And voila! You've successfully created and used a sequential model in Axon!

    diff --git a/training_and_inference_mode.html b/training_and_inference_mode.html index b0fe221f..ca1b8039 100644 --- a/training_and_inference_mode.html +++ b/training_and_inference_mode.html @@ -14,7 +14,7 @@ - + @@ -136,93 +136,93 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Executing models in inference mode

    -

    Some layers have different considerations and behavior when running during model training versus model inference. For example dropout layers are intended only to be used during training as a form of model regularization. Certain stateful layers like batch normalization keep a running-internal state which changes during training mode but remains fixed during inference mode. Axon supports mode-dependent execution behavior via the :mode option passed to all building, compilation, and execution methods. By default, all models build in inference mode. You can see this behavior by adding a dropout layer with a dropout rate of 1. In inference mode this layer will have no affect:

    inputs = Nx.iota({2, 8}, type: :f32)
    +

    Some layers have different considerations and behavior when running during model training versus model inference. For example dropout layers are intended only to be used during training as a form of model regularization. Certain stateful layers like batch normalization keep a running-internal state which changes during training mode but remains fixed during inference mode. Axon supports mode-dependent execution behavior via the :mode option passed to all building, compilation, and execution methods. By default, all models build in inference mode. You can see this behavior by adding a dropout layer with a dropout rate of 1. In inference mode this layer will have no affect:

    inputs = Nx.iota({2, 8}, type: :f32)
     
     model =
    -  Axon.input("data")
    -  |> Axon.dense(4)
    -  |> Axon.sigmoid()
    -  |> Axon.dropout(rate: 0.99)
    -  |> Axon.dense(1)
    -
    -{init_fn, predict_fn} = Axon.build(model)
    -params = init_fn.(inputs, %{})
    -predict_fn.(params, inputs)
    #Nx.Tensor<
    -  f32[2][1]
    -  [
    -    [0.6900148391723633],
    -    [1.1159517765045166]
    -  ]
    ->

    You can also explicitly specify the mode:

    {init_fn, predict_fn} = Axon.build(model, mode: :inference)
    -params = init_fn.(inputs, %{})
    -predict_fn.(params, inputs)
    #Nx.Tensor<
    -  f32[2][1]
    -  [
    -    [-1.1250841617584229],
    -    [-1.161189317703247]
    -  ]
    ->

    It's important that you know which mode your model's were compiled for, as running a model built in :inference mode will behave drastically different than a model built in :train mode.

    + Axon.input("data") + |> Axon.dense(4) + |> Axon.sigmoid() + |> Axon.dropout(rate: 0.99) + |> Axon.dense(1) + +{init_fn, predict_fn} = Axon.build(model) +params = init_fn.(inputs, %{}) +predict_fn.(params, inputs)

    #Nx.Tensor<
    +  f32[2][1]
    +  [
    +    [0.6900148391723633],
    +    [1.1159517765045166]
    +  ]
    +>

    You can also explicitly specify the mode:

    {init_fn, predict_fn} = Axon.build(model, mode: :inference)
    +params = init_fn.(inputs, %{})
    +predict_fn.(params, inputs)
    #Nx.Tensor<
    +  f32[2][1]
    +  [
    +    [-1.1250841617584229],
    +    [-1.161189317703247]
    +  ]
    +>

    It's important that you know which mode your model's were compiled for, as running a model built in :inference mode will behave drastically different than a model built in :train mode.

    Executing models in training mode

    -

    By specifying mode: :train, you tell your models to execute in training mode. You can see the effects of this behavior here:

    {init_fn, predict_fn} = Axon.build(model, mode: :train)
    -params = init_fn.(inputs, %{})
    -predict_fn.(params, inputs)
    %{
    -  prediction: #Nx.Tensor<
    -    f32[2][1]
    -    [
    -      [0.0],
    -      [0.0]
    -    ]
    -  >,
    -  state: %{
    -    "dropout_0" => %{
    -      "key" => #Nx.Tensor<
    -        u32[2]
    -        [309162766, 2699730300]
    -      >
    -    }
    -  }
    -}

    First, notice that your model now returns a map with keys :prediction and :state. :prediction contains the actual model prediction, while :state contains the updated state for any stateful layers such as batch norm. When writing custom training loops, you should extract :state and use it in conjunction with the updates API to ensure your stateful layers are updated correctly. If your model has stateful layers, :state will look similar to your model's parameter map:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(4)
    -  |> Axon.sigmoid()
    -  |> Axon.batch_norm()
    -  |> Axon.dense(1)
    -
    -{init_fn, predict_fn} = Axon.build(model, mode: :train)
    -params = init_fn.(inputs, %{})
    -predict_fn.(params, inputs)
    %{
    -  prediction: #Nx.Tensor<
    -    f32[2][1]
    -    [
    -      [0.4891311526298523],
    -      [-0.4891311228275299]
    -    ]
    -  >,
    -  state: %{
    -    "batch_norm_0" => %{
    -      "mean" => #Nx.Tensor<
    -        f32[4]
    -        [0.525083601474762, 0.8689039349555969, 0.03931800276041031, 0.0021854371298104525]
    -      >,
    -      "var" => #Nx.Tensor<
    -        f32[4]
    -        [0.13831248879432678, 0.10107331722974777, 0.10170891880989075, 0.10000484436750412]
    -      >
    -    }
    -  }
    -}
    +

    By specifying mode: :train, you tell your models to execute in training mode. You can see the effects of this behavior here:

    {init_fn, predict_fn} = Axon.build(model, mode: :train)
    +params = init_fn.(inputs, %{})
    +predict_fn.(params, inputs)
    %{
    +  prediction: #Nx.Tensor<
    +    f32[2][1]
    +    [
    +      [0.0],
    +      [0.0]
    +    ]
    +  >,
    +  state: %{
    +    "dropout_0" => %{
    +      "key" => #Nx.Tensor<
    +        u32[2]
    +        [309162766, 2699730300]
    +      >
    +    }
    +  }
    +}

    First, notice that your model now returns a map with keys :prediction and :state. :prediction contains the actual model prediction, while :state contains the updated state for any stateful layers such as batch norm. When writing custom training loops, you should extract :state and use it in conjunction with the updates API to ensure your stateful layers are updated correctly. If your model has stateful layers, :state will look similar to your model's parameter map:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(4)
    +  |> Axon.sigmoid()
    +  |> Axon.batch_norm()
    +  |> Axon.dense(1)
    +
    +{init_fn, predict_fn} = Axon.build(model, mode: :train)
    +params = init_fn.(inputs, %{})
    +predict_fn.(params, inputs)
    %{
    +  prediction: #Nx.Tensor<
    +    f32[2][1]
    +    [
    +      [0.4891311526298523],
    +      [-0.4891311228275299]
    +    ]
    +  >,
    +  state: %{
    +    "batch_norm_0" => %{
    +      "mean" => #Nx.Tensor<
    +        f32[4]
    +        [0.525083601474762, 0.8689039349555969, 0.03931800276041031, 0.0021854371298104525]
    +      >,
    +      "var" => #Nx.Tensor<
    +        f32[4]
    +        [0.13831248879432678, 0.10107331722974777, 0.10170891880989075, 0.10000484436750412]
    +      >
    +    }
    +  }
    +}
    diff --git a/using_loop_event_handlers.html b/using_loop_event_handlers.html index 399457a0..53f7920c 100644 --- a/using_loop_event_handlers.html +++ b/using_loop_event_handlers.html @@ -14,7 +14,7 @@ - + @@ -136,15 +136,15 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Adding event handlers to training loops

    -

    Often times you want more fine-grained control over things that happen during loop execution. For example, you might want to save loop state to a file every 500 iterations, or log some output to :stdout at the end of every epoch. Axon loops allow more fine-grained control via events and event handlers.

    Axon fires a number of events during loop execution which allow you to instrument various points in the loop execution cycle. You can attach event handlers to any of these events:

    events = [
    +

    Often times you want more fine-grained control over things that happen during loop execution. For example, you might want to save loop state to a file every 500 iterations, or log some output to :stdout at the end of every epoch. Axon loops allow more fine-grained control via events and event handlers.

    Axon fires a number of events during loop execution which allow you to instrument various points in the loop execution cycle. You can attach event handlers to any of these events:

    events = [
       :started,             # After loop state initialization
       :epoch_started,       # On epoch start
       :iteration_started,   # On iteration start
    @@ -153,107 +153,107 @@ 

    :epoch_halted, # On epoch halt, if early halted :halted, # On loop halt, if early halted :completed # On loop completion -]

    Axon packages a number of common loop event handlers for you out of the box. These handlers should cover most of the common event handlers you would need to write in practice. Axon also allows for custom event handlers. See Writing custom event handlers for more information.

    An event handler will take the current loop state at the time of the fired event, and alter or use it in someway before returning control back to the main loop execution. You can attach any of Axon's pre-packaged event handlers to a loop by using the function directly. For example, if you want to checkpoint loop state at the end of every epoch, you can use Axon.Loop.checkpoint/2:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    +]

    Axon packages a number of common loop event handlers for you out of the box. These handlers should cover most of the common event handlers you would need to write in practice. Axon also allows for custom event handlers. See Writing custom event handlers for more information.

    An event handler will take the current loop state at the time of the fired event, and alter or use it in someway before returning control back to the main loop execution. You can attach any of Axon's pre-packaged event handlers to a loop by using the function directly. For example, if you want to checkpoint loop state at the end of every epoch, you can use Axon.Loop.checkpoint/2:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
     
     loop =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -  |> Axon.Loop.checkpoint(event: :epoch_completed)
    #Axon.Loop<
    -  metrics: %{
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<17.37390314/1 in Axon.Loop.checkpoint/2>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +  |> Axon.Loop.checkpoint(event: :epoch_completed)
    #Axon.Loop<
    +  metrics: %{
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<17.37390314/1 in Axon.Loop.checkpoint/2>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Now when you execute your loop, it will save a checkpoint at the end of every epoch:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -Axon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.5345965
    +>

    Now when you execute your loop, it will save a checkpoint at the end of every epoch:

    train_data =
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +Axon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.5345965
     Epoch: 1, Batch: 50, loss: 0.4578816
     Epoch: 2, Batch: 50, loss: 0.4527244
     Epoch: 3, Batch: 50, loss: 0.4466343
    -Epoch: 4, Batch: 50, loss: 0.4401709
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.1074252650141716, -0.0033432210329920053, -0.08044778555631638, 0.0016452680574730039, -0.01557128969579935, -0.061440952122211456, 0.061030879616737366, 0.012781506404280663]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.3504936695098877, 0.6722151041030884, -0.5550820231437683, 0.05254736915230751, 0.7404129505157471, -0.24307608604431152, -0.7073894739151001, 0.6447222828865051]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.19830459356307983, 0.0, 0.0, -0.04925372824072838]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.4873020648956299, -0.3363800644874573, -0.6058675050735474, -0.47888076305389404],
    -        [-0.18936580419540405, -0.5579301714897156, -0.49217337369918823, 0.04828363656997681],
    -        [0.3202762305736542, -0.033479928970336914, 0.11928367614746094, -0.5225698351860046],
    -        [0.3883931040763855, 0.07413274049758911, 0.548823893070221, -0.03494540974497795],
    -        [-0.2598196268081665, -0.4546756446361542, 0.5866180062294006, 0.2946240305900574],
    -        [0.2722054719924927, -0.5802338123321533, 0.4854300618171692, -0.5049118399620056],
    -        [-0.415179044008255, -0.5426293611526489, -0.1631108522415161, -0.6544353365898132],
    -        [-0.3079695403575897, 0.09391731023788452, -0.40262123942375183, -0.27837851643562317]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.016238097101449966]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.3102125823497772],
    -        [-1.078292727470398],
    -        [0.7910841703414917],
    -        [0.014510140754282475]
    -      ]
    -    >
    -  }
    -}

    You can also use event handlers for things as simple as implementing custom logging with the pre-packaged Axon.Loop.log/4 event handler:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.log(fn _state -> "epoch is over\n" end, event: :epoch_completed, device: :stdio)
    -|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.3220241
    +Epoch: 4, Batch: 50, loss: 0.4401709
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.1074252650141716, -0.0033432210329920053, -0.08044778555631638, 0.0016452680574730039, -0.01557128969579935, -0.061440952122211456, 0.061030879616737366, 0.012781506404280663]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.3504936695098877, 0.6722151041030884, -0.5550820231437683, 0.05254736915230751, 0.7404129505157471, -0.24307608604431152, -0.7073894739151001, 0.6447222828865051]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.19830459356307983, 0.0, 0.0, -0.04925372824072838]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.4873020648956299, -0.3363800644874573, -0.6058675050735474, -0.47888076305389404],
    +        [-0.18936580419540405, -0.5579301714897156, -0.49217337369918823, 0.04828363656997681],
    +        [0.3202762305736542, -0.033479928970336914, 0.11928367614746094, -0.5225698351860046],
    +        [0.3883931040763855, 0.07413274049758911, 0.548823893070221, -0.03494540974497795],
    +        [-0.2598196268081665, -0.4546756446361542, 0.5866180062294006, 0.2946240305900574],
    +        [0.2722054719924927, -0.5802338123321533, 0.4854300618171692, -0.5049118399620056],
    +        [-0.415179044008255, -0.5426293611526489, -0.1631108522415161, -0.6544353365898132],
    +        [-0.3079695403575897, 0.09391731023788452, -0.40262123942375183, -0.27837851643562317]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.016238097101449966]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.3102125823497772],
    +        [-1.078292727470398],
    +        [0.7910841703414917],
    +        [0.014510140754282475]
    +      ]
    +    >
    +  }
    +}

    You can also use event handlers for things as simple as implementing custom logging with the pre-packaged Axon.Loop.log/4 event handler:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +|> Axon.Loop.log(fn _state -> "epoch is over\n" end, event: :epoch_completed, device: :stdio)
    +|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.3220241
     epoch is over
     Epoch: 1, Batch: 50, loss: 0.2309804
     epoch is over
    @@ -262,108 +262,108 @@ 

    Epoch: 3, Batch: 50, loss: 0.1457551 epoch is over Epoch: 4, Batch: 50, loss: 0.1247821 -epoch is over

    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.01846296526491642, -0.0016654117498546839, 0.39859917759895325, 0.21187178790569305, 0.08815062046051025, -0.11071830987930298, 0.06280634552240372, -0.11682439595460892]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [0.08840499818325043, 0.44253841042518616, -0.6063749194145203, -0.1487167924642563, 0.24857401847839355, 0.1697462797164917, -0.5370600819587708, 0.1658734828233719]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.08111556619405746, 0.32310858368873596, -0.059386227279901505, -0.09515857696533203]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.6057762503623962, -0.2633209824562073, 0.23028653860092163, -0.2710704505443573],
    -        [0.03961030766367912, -0.335278183221817, 0.16016681492328644, 0.10653878003358841],
    -        [0.36239713430404663, 0.8330743312835693, 0.4745633602142334, -0.29585230350494385],
    -        [-0.04394621402025223, 0.45401355624198914, 0.5953336954116821, -0.6513576507568359],
    -        [-0.6447072625160217, -0.6225455403327942, -0.4814218580722809, 0.6882413625717163],
    -        [-0.44460421800613403, -0.04251839220523834, 0.4619944095611572, 0.24515877664089203],
    -        [-0.49396005272865295, -0.08895684778690338, 0.5212237238883972, 0.24301064014434814],
    -        [0.3074108958244324, 0.2640342712402344, 0.4197620749473572, -0.05698487162590027]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.6520459651947021]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.45083022117614746],
    -        [-0.8733288049697876],
    -        [-0.1894296556711197],
    -        [0.030911535024642944]
    -      ]
    -    >
    -  }
    -}

    For even more fine-grained control over when event handlers fire, you can add filters. For example, if you only want to checkpoint loop state every 2 epochs, you can use a filter:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.checkpoint(event: :epoch_completed, filter: [every: 2])
    -|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.3180207
    +epoch is over
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.01846296526491642, -0.0016654117498546839, 0.39859917759895325, 0.21187178790569305, 0.08815062046051025, -0.11071830987930298, 0.06280634552240372, -0.11682439595460892]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [0.08840499818325043, 0.44253841042518616, -0.6063749194145203, -0.1487167924642563, 0.24857401847839355, 0.1697462797164917, -0.5370600819587708, 0.1658734828233719]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.08111556619405746, 0.32310858368873596, -0.059386227279901505, -0.09515857696533203]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.6057762503623962, -0.2633209824562073, 0.23028653860092163, -0.2710704505443573],
    +        [0.03961030766367912, -0.335278183221817, 0.16016681492328644, 0.10653878003358841],
    +        [0.36239713430404663, 0.8330743312835693, 0.4745633602142334, -0.29585230350494385],
    +        [-0.04394621402025223, 0.45401355624198914, 0.5953336954116821, -0.6513576507568359],
    +        [-0.6447072625160217, -0.6225455403327942, -0.4814218580722809, 0.6882413625717163],
    +        [-0.44460421800613403, -0.04251839220523834, 0.4619944095611572, 0.24515877664089203],
    +        [-0.49396005272865295, -0.08895684778690338, 0.5212237238883972, 0.24301064014434814],
    +        [0.3074108958244324, 0.2640342712402344, 0.4197620749473572, -0.05698487162590027]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.6520459651947021]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.45083022117614746],
    +        [-0.8733288049697876],
    +        [-0.1894296556711197],
    +        [0.030911535024642944]
    +      ]
    +    >
    +  }
    +}

    For even more fine-grained control over when event handlers fire, you can add filters. For example, if you only want to checkpoint loop state every 2 epochs, you can use a filter:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +|> Axon.Loop.checkpoint(event: :epoch_completed, filter: [every: 2])
    +|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.3180207
     Epoch: 1, Batch: 50, loss: 0.1975918
     Epoch: 2, Batch: 50, loss: 0.1353940
     Epoch: 3, Batch: 50, loss: 0.1055405
    -Epoch: 4, Batch: 50, loss: 0.0890203
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.047411054372787476, 0.1582564115524292, -0.027924394235014915, 0.1774083375930786, 0.09764095395803452, 0.1040089949965477, 0.006841400172561407, -0.11682236939668655]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [0.20366023480892181, 0.7318703532218933, -0.028611917048692703, -0.5324040055274963, -0.6856501698493958, 0.21694214642047882, 0.3281741738319397, -0.13051153719425201]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.1859581470489502, 0.3360026180744171, 0.24061667919158936, -0.016354668885469437]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.07366377860307693, -0.3261552155017853, -0.6951385140419006, -0.4232194125652313],
    -        [0.7334840893745422, -0.17827139794826508, -0.6411628127098083, -0.41898131370544434],
    -        [0.4770638346672058, -0.4738321304321289, 0.5755389332771301, 0.30976954102516174],
    -        [-0.498087614774704, 0.10546410828828812, 0.690037190914154, -0.5016340613365173],
    -        [0.17509347200393677, 0.4518563449382782, -0.10358063131570816, 0.2223401516675949],
    -        [0.6422480344772339, 0.19363932311534882, 0.2870054543018341, -0.1483648419380188],
    -        [-0.10362248122692108, -0.7047968506813049, 0.02847556211054325, -0.18464618921279907],
    -        [-0.6756409406661987, -0.42686882615089417, -0.5484509468078613, 0.596512496471405]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.23296000063419342]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.48827823996543884],
    -        [-0.7908728122711182],
    -        [-0.5326805114746094],
    -        [0.3789232671260834]
    -      ]
    -    >
    -  }
    -}

    Axon event handlers support both keyword and function filters. Keyword filters include keywords such as :every, :once, and :always. Function filters are arity-1 functions which accept the current loop state and return a boolean.

    +
    Epoch: 4, Batch: 50, loss: 0.0890203
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.047411054372787476, 0.1582564115524292, -0.027924394235014915, 0.1774083375930786, 0.09764095395803452, 0.1040089949965477, 0.006841400172561407, -0.11682236939668655]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [0.20366023480892181, 0.7318703532218933, -0.028611917048692703, -0.5324040055274963, -0.6856501698493958, 0.21694214642047882, 0.3281741738319397, -0.13051153719425201]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.1859581470489502, 0.3360026180744171, 0.24061667919158936, -0.016354668885469437]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.07366377860307693, -0.3261552155017853, -0.6951385140419006, -0.4232194125652313],
    +        [0.7334840893745422, -0.17827139794826508, -0.6411628127098083, -0.41898131370544434],
    +        [0.4770638346672058, -0.4738321304321289, 0.5755389332771301, 0.30976954102516174],
    +        [-0.498087614774704, 0.10546410828828812, 0.690037190914154, -0.5016340613365173],
    +        [0.17509347200393677, 0.4518563449382782, -0.10358063131570816, 0.2223401516675949],
    +        [0.6422480344772339, 0.19363932311534882, 0.2870054543018341, -0.1483648419380188],
    +        [-0.10362248122692108, -0.7047968506813049, 0.02847556211054325, -0.18464618921279907],
    +        [-0.6756409406661987, -0.42686882615089417, -0.5484509468078613, 0.596512496471405]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.23296000063419342]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.48827823996543884],
    +        [-0.7908728122711182],
    +        [-0.5326805114746094],
    +        [0.3789232671260834]
    +      ]
    +    >
    +  }
    +}

    Axon event handlers support both keyword and function filters. Keyword filters include keywords such as :every, :once, and :always. Function filters are arity-1 functions which accept the current loop state and return a boolean.

    diff --git a/writing_custom_event_handlers.html b/writing_custom_event_handlers.html index 57a61c19..80122839 100644 --- a/writing_custom_event_handlers.html +++ b/writing_custom_event_handlers.html @@ -14,7 +14,7 @@ - + @@ -136,68 +136,68 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Writing custom event handlers

    -

    If you require functionality not offered by any of Axon's built-in event handlers, then you'll need to write a custom event handler. Custom event handlers are functions which accept loop state, perform some action, and then defer execution back to the main loop. For example, you can write custom loop handlers which visualize model outputs, communicate with an external Kino process, or simply halt the loop based on some criteria.

    All event handlers must accept an %Axon.Loop.State{} struct and return a tuple of {control_term, state} where control_term is one of :continue, :halt_epoch, or :halt_loop and state is the updated loop state:

    defmodule CustomEventHandler0 do
    +

    If you require functionality not offered by any of Axon's built-in event handlers, then you'll need to write a custom event handler. Custom event handlers are functions which accept loop state, perform some action, and then defer execution back to the main loop. For example, you can write custom loop handlers which visualize model outputs, communicate with an external Kino process, or simply halt the loop based on some criteria.

    All event handlers must accept an %Axon.Loop.State{} struct and return a tuple of {control_term, state} where control_term is one of :continue, :halt_epoch, or :halt_loop and state is the updated loop state:

    defmodule CustomEventHandler0 do
       alias Axon.Loop.State
     
    -  def my_weird_handler(%State{} = state) do
    -    IO.puts("My weird handler: fired")
    -    {:continue, state}
    -  end
    -end
    {:module, CustomEventHandler0, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:my_weird_handler, 1}}

    To register event handlers, you use Axon.Loop.handle/4:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    +  def my_weird_handler(%State{} = state) do
    +    IO.puts("My weird handler: fired")
    +    {:continue, state}
    +  end
    +end
    {:module, CustomEventHandler0, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:my_weird_handler, 1}}

    To register event handlers, you use Axon.Loop.handle/4:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
     
     loop =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -  |> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler0.my_weird_handler/1)
    #Axon.Loop<
    -  metrics: %{
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {&CustomEventHandler0.my_weird_handler/1,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +  |> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler0.my_weird_handler/1)
    #Axon.Loop<
    +  metrics: %{
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {&CustomEventHandler0.my_weird_handler/1,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>},
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Axon will trigger your custom handler to run on the attached event:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -Axon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.0990703
    +>

    Axon will trigger your custom handler to run on the attached event:

    train_data =
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +Axon.Loop.run(loop, train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.0990703
     My weird handler: fired
     Epoch: 1, Batch: 50, loss: 0.0567622
     My weird handler: fired
    @@ -206,128 +206,128 @@ 

    Epoch: 3, Batch: 50, loss: 0.0462587 My weird handler: fired Epoch: 4, Batch: 50, loss: 0.0452806 -My weird handler: fired

    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.10819189250469208, 0.008151392452418804, -0.0318693183362484, 0.010302421636879444, 0.15788722038269043, 0.05119801685214043, 0.14268818497657776, -0.11528034508228302]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.4275593161582947, 0.40442031621932983, 0.7287659645080566, -0.7832129597663879, 0.3329123258590698, -0.5598123073577881, 0.8389336466789246, 0.3197469413280487]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.0671013742685318, 0.13561469316482544, 0.06218714639544487, 0.2104845941066742]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.4444102942943573, 0.4518184959888458, 0.45315614342689514, 0.35392478108406067],
    -        [0.008407601155340672, -0.6081852912902832, -0.05863206833600998, 0.14386630058288574],
    -        [-0.010219200514256954, -0.5528244376182556, 0.3754919469356537, -0.6242967247962952],
    -        [0.3531058132648468, -0.18348301947116852, -0.0019897441379725933, 0.41002658009529114],
    -        [0.676723062992096, -0.09349705278873444, 0.1101854145526886, 0.06494166702032089],
    -        [0.1534113883972168, 0.6402403116226196, 0.23490086197853088, -0.2196572870016098],
    -        [0.5835862755775452, -0.6581316590309143, -0.3047991394996643, -0.07485166192054749],
    -        [-0.6115342378616333, 0.3316897749900818, -0.3606548309326172, 0.3397740423679352]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.10111129283905029]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.7433153390884399],
    -        [-0.8213723301887512],
    -        [-0.44361063838005066],
    -        [-1.049617052078247]
    -      ]
    -    >
    -  }
    -}

    You can use event handlers to early-stop a loop or loop epoch by returning a :halt_* control term. Halt control terms can be one of :halt_epoch or :halt_loop. :halt_epoch halts the current epoch and continues to the next. :halt_loop halts the loop altogether.

    defmodule CustomEventHandler1 do
    +My weird handler: fired
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.10819189250469208, 0.008151392452418804, -0.0318693183362484, 0.010302421636879444, 0.15788722038269043, 0.05119801685214043, 0.14268818497657776, -0.11528034508228302]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.4275593161582947, 0.40442031621932983, 0.7287659645080566, -0.7832129597663879, 0.3329123258590698, -0.5598123073577881, 0.8389336466789246, 0.3197469413280487]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.0671013742685318, 0.13561469316482544, 0.06218714639544487, 0.2104845941066742]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.4444102942943573, 0.4518184959888458, 0.45315614342689514, 0.35392478108406067],
    +        [0.008407601155340672, -0.6081852912902832, -0.05863206833600998, 0.14386630058288574],
    +        [-0.010219200514256954, -0.5528244376182556, 0.3754919469356537, -0.6242967247962952],
    +        [0.3531058132648468, -0.18348301947116852, -0.0019897441379725933, 0.41002658009529114],
    +        [0.676723062992096, -0.09349705278873444, 0.1101854145526886, 0.06494166702032089],
    +        [0.1534113883972168, 0.6402403116226196, 0.23490086197853088, -0.2196572870016098],
    +        [0.5835862755775452, -0.6581316590309143, -0.3047991394996643, -0.07485166192054749],
    +        [-0.6115342378616333, 0.3316897749900818, -0.3606548309326172, 0.3397740423679352]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.10111129283905029]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.7433153390884399],
    +        [-0.8213723301887512],
    +        [-0.44361063838005066],
    +        [-1.049617052078247]
    +      ]
    +    >
    +  }
    +}

    You can use event handlers to early-stop a loop or loop epoch by returning a :halt_* control term. Halt control terms can be one of :halt_epoch or :halt_loop. :halt_epoch halts the current epoch and continues to the next. :halt_loop halts the loop altogether.

    defmodule CustomEventHandler1 do
       alias Axon.Loop.State
     
    -  def always_halts(%State{} = state) do
    -    IO.puts("stopping loop")
    -    {:halt_loop, state}
    -  end
    -end
    {:module, CustomEventHandler1, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:always_halts, 1}}

    The loop will immediately stop executing and return the current state at the time it was halted:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler1.always_halts/1)
    -|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.2201974
    -stopping loop
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.07676638662815094, -0.18689222633838654, 0.10066182911396027, -0.021994125097990036, 0.12006694823503494, -0.014219668693840504, 0.13600556552410126, -0.017512166872620583]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.5354958772659302, -0.216745987534523, -0.5694359540939331, 0.023495405912399292, 0.17701618373394012, 0.011712944135069847, 0.5289720892906189, 0.07360327988862991]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.0012482400052249432, 0.09300543367862701, 0.08570009469985962, -0.018982920795679092]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.3016211688518524, 0.31998082995414734, -0.3300730884075165, 0.24982869625091553],
    -        [0.03864569962024689, -0.44071364402770996, 0.6553062200546265, -0.5294798612594604],
    -        [0.25020459294319153, 0.7249991297721863, 0.15611837804317474, -0.5045580863952637],
    -        [-0.5500670075416565, 0.15677094459533691, -0.6531851291656494, -0.09289993345737457],
    -        [0.1618722379207611, 0.4479053020477295, 0.705923318862915, -0.3853490352630615],
    -        [-0.6752215623855591, 0.577272891998291, -0.1268012821674347, 0.6133111715316772],
    -        [0.5361366271972656, -0.2996085286140442, 0.28480708599090576, 0.47739118337631226],
    -        [-0.6443014144897461, -0.2866927981376648, 0.023463081568479538, -0.1491370052099228]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.0047520860098302364]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.3796459138393402],
    -        [-0.9757304191589355],
    -        [0.9530885815620422],
    -        [-0.05134368687868118]
    -      ]
    -    >
    -  }
    -}

    Note that halting an epoch will fire a different event than completing an epoch. So if you implement a custom handler to halt the loop when an epoch completes, it will never fire if the epoch always halts prematurely:

    defmodule CustomEventHandler2 do
    +  def always_halts(%State{} = state) do
    +    IO.puts("stopping loop")
    +    {:halt_loop, state}
    +  end
    +end
    {:module, CustomEventHandler1, <<70, 79, 82, 49, 0, 0, 6, ...>>, {:always_halts, 1}}

    The loop will immediately stop executing and return the current state at the time it was halted:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler1.always_halts/1)
    +|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 50, loss: 0.2201974
    +stopping loop
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.07676638662815094, -0.18689222633838654, 0.10066182911396027, -0.021994125097990036, 0.12006694823503494, -0.014219668693840504, 0.13600556552410126, -0.017512166872620583]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.5354958772659302, -0.216745987534523, -0.5694359540939331, 0.023495405912399292, 0.17701618373394012, 0.011712944135069847, 0.5289720892906189, 0.07360327988862991]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.0012482400052249432, 0.09300543367862701, 0.08570009469985962, -0.018982920795679092]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.3016211688518524, 0.31998082995414734, -0.3300730884075165, 0.24982869625091553],
    +        [0.03864569962024689, -0.44071364402770996, 0.6553062200546265, -0.5294798612594604],
    +        [0.25020459294319153, 0.7249991297721863, 0.15611837804317474, -0.5045580863952637],
    +        [-0.5500670075416565, 0.15677094459533691, -0.6531851291656494, -0.09289993345737457],
    +        [0.1618722379207611, 0.4479053020477295, 0.705923318862915, -0.3853490352630615],
    +        [-0.6752215623855591, 0.577272891998291, -0.1268012821674347, 0.6133111715316772],
    +        [0.5361366271972656, -0.2996085286140442, 0.28480708599090576, 0.47739118337631226],
    +        [-0.6443014144897461, -0.2866927981376648, 0.023463081568479538, -0.1491370052099228]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.0047520860098302364]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.3796459138393402],
    +        [-0.9757304191589355],
    +        [0.9530885815620422],
    +        [-0.05134368687868118]
    +      ]
    +    >
    +  }
    +}

    Note that halting an epoch will fire a different event than completing an epoch. So if you implement a custom handler to halt the loop when an epoch completes, it will never fire if the epoch always halts prematurely:

    defmodule CustomEventHandler2 do
       alias Axon.Loop.State
     
    -  def always_halts_epoch(%State{} = state) do
    -    IO.puts("\nstopping epoch")
    -    {:halt_epoch, state}
    -  end
    -
    -  def always_halts_loop(%State{} = state) do
    -    IO.puts("stopping loop\n")
    -    {:halt_loop, state}
    -  end
    -end
    {:module, CustomEventHandler2, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:always_halts_loop, 1}}

    If you run these handlers in conjunction, the loop will not terminate prematurely:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -|> Axon.Loop.handle_event(:iteration_completed, &CustomEventHandler2.always_halts_epoch/1)
    -|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler2.always_halts_loop/1)
    -|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 0, loss: 0.0000000
    +  def always_halts_epoch(%State{} = state) do
    +    IO.puts("\nstopping epoch")
    +    {:halt_epoch, state}
    +  end
    +
    +  def always_halts_loop(%State{} = state) do
    +    IO.puts("stopping loop\n")
    +    {:halt_loop, state}
    +  end
    +end
    {:module, CustomEventHandler2, <<70, 79, 82, 49, 0, 0, 8, ...>>, {:always_halts_loop, 1}}

    If you run these handlers in conjunction, the loop will not terminate prematurely:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +|> Axon.Loop.handle_event(:iteration_completed, &CustomEventHandler2.always_halts_epoch/1)
    +|> Axon.Loop.handle_event(:epoch_completed, &CustomEventHandler2.always_halts_loop/1)
    +|> Axon.Loop.run(train_data, %{}, epochs: 5, iterations: 100)
    Epoch: 0, Batch: 0, loss: 0.0000000
     stopping epoch
     
     stopping epoch
    @@ -336,54 +336,54 @@ 

    stopping epoch -stopping epoch

    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.009215549565851688, -0.005282022058963776, -0.0023747326340526342, 0.002623362001031637, 0.003890525083988905, 6.010813522152603e-4, -0.0024882694706320763, 0.0029246946796774864]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.3484582304954529, -0.39938971400260925, 0.03963512182235718, -0.3549930155277252, 0.09539157152175903, 0.5987873077392578, -0.23635399341583252, 0.01850329153239727]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.00194685033056885, 0.007812315598130226, 0.01710106059908867, 0.0080711729824543]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.6497661471366882, -0.3379145562648773, 0.3343344032764435, 0.4334254860877991],
    -        [-0.37884217500686646, -0.41724908351898193, -0.19513007998466492, -0.22494879364967346],
    -        [-0.42438197135925293, -0.40400123596191406, 0.5355109572410583, 0.4295356869697571],
    -        [0.15086597204208374, 0.30529624223709106, 0.002222923096269369, 0.32834741473197937],
    -        [-0.09336567670106888, 0.471781849861145, -0.06567475199699402, -0.4361487627029419],
    -        [0.23664812743663788, 0.13572633266448975, -0.13837064802646637, -0.09471122920513153],
    -        [0.6461064219474792, -0.2435072958469391, -0.04861235246062279, -0.1969985067844391],
    -        [0.17856749892234802, 0.41614532470703125, -0.06008348613977432, -0.3271574079990387]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.005317525006830692]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.07891849428415298],
    -        [0.32653072476387024],
    -        [-0.5885495543479919],
    -        [-0.2781771719455719]
    -      ]
    -    >
    -  }
    -}

    You may access and update any portion of the loop state. Keep in mind that event handlers are not JIT-compiled, so you should be certain to manually JIT-compile any long-running or expensive operations.

    +
    stopping epoch
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.009215549565851688, -0.005282022058963776, -0.0023747326340526342, 0.002623362001031637, 0.003890525083988905, 6.010813522152603e-4, -0.0024882694706320763, 0.0029246946796774864]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.3484582304954529, -0.39938971400260925, 0.03963512182235718, -0.3549930155277252, 0.09539157152175903, 0.5987873077392578, -0.23635399341583252, 0.01850329153239727]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.00194685033056885, 0.007812315598130226, 0.01710106059908867, 0.0080711729824543]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.6497661471366882, -0.3379145562648773, 0.3343344032764435, 0.4334254860877991],
    +        [-0.37884217500686646, -0.41724908351898193, -0.19513007998466492, -0.22494879364967346],
    +        [-0.42438197135925293, -0.40400123596191406, 0.5355109572410583, 0.4295356869697571],
    +        [0.15086597204208374, 0.30529624223709106, 0.002222923096269369, 0.32834741473197937],
    +        [-0.09336567670106888, 0.471781849861145, -0.06567475199699402, -0.4361487627029419],
    +        [0.23664812743663788, 0.13572633266448975, -0.13837064802646637, -0.09471122920513153],
    +        [0.6461064219474792, -0.2435072958469391, -0.04861235246062279, -0.1969985067844391],
    +        [0.17856749892234802, 0.41614532470703125, -0.06008348613977432, -0.3271574079990387]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.005317525006830692]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.07891849428415298],
    +        [0.32653072476387024],
    +        [-0.5885495543479919],
    +        [-0.2781771719455719]
    +      ]
    +    >
    +  }
    +}

    You may access and update any portion of the loop state. Keep in mind that event handlers are not JIT-compiled, so you should be certain to manually JIT-compile any long-running or expensive operations.

    diff --git a/writing_custom_metrics.html b/writing_custom_metrics.html index c7047287..11ec7587 100644 --- a/writing_custom_metrics.html +++ b/writing_custom_metrics.html @@ -14,7 +14,7 @@ - + @@ -136,323 +136,323 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Writing custom metrics

    -

    When passing an atom to Axon.Loop.metric/5, Axon dispatches the function to a built-in function in Axon.Metrics. If you find you'd like to use a metric that does not exist in Axon.Metrics, you can define a custom function:

    defmodule CustomMetric do
    +

    When passing an atom to Axon.Loop.metric/5, Axon dispatches the function to a built-in function in Axon.Metrics. If you find you'd like to use a metric that does not exist in Axon.Metrics, you can define a custom function:

    defmodule CustomMetric do
       import Nx.Defn
     
    -  defn my_weird_metric(y_true, y_pred) do
    -    Nx.atan2(y_true, y_pred) |> Nx.sum()
    -  end
    -end
    {:module, CustomMetric, <<70, 79, 82, 49, 0, 0, 8, ...>>, true}

    Then you can pass that directly to Axon.Loop.metric/5. You must provide a name for your custom metric:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    +  defn my_weird_metric(y_true, y_pred) do
    +    Nx.atan2(y_true, y_pred) |> Nx.sum()
    +  end
    +end
    {:module, CustomMetric, <<70, 79, 82, 49, 0, 0, 8, ...>>, true}

    Then you can pass that directly to Axon.Loop.metric/5. You must provide a name for your custom metric:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
     
     loop =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -  |> Axon.Loop.metric(&CustomMetric.my_weird_metric/2, "my weird metric")
    #Axon.Loop<
    -  metrics: %{
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},
    -    "my weird metric" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     &CustomMetric.my_weird_metric/2}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +  |> Axon.Loop.metric(&CustomMetric.my_weird_metric/2, "my weird metric")
    #Axon.Loop<
    +  metrics: %{
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>},
    +    "my weird metric" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     &CustomMetric.my_weird_metric/2}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Then when running, Axon will invoke your custom metric function and accumulate it with the given aggregator:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0681635 my weird metric: -5.2842808
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.0866982489824295, 0.4234408140182495, 0.18205422163009644, 0.34029239416122437, -0.25770726799964905, -0.07117943465709686, 0.11470477283000946, -0.027526771649718285]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.7088809013366699, 0.4486531913280487, 0.4666421115398407, 0.4163222312927246, 0.5076444149017334, 0.10119977593421936, 0.6628422141075134, -0.024421442300081253]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.2924745976924896, 0.0065560233779251575, 0.0, -0.21106423437595367]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.3407173752784729, -0.6905813217163086, -0.5984221696853638, -0.23955762386322021],
    -        [0.42608022689819336, 0.5949274301528931, -0.24687853455543518, -0.4948572516441345],
    -        [0.27617380023002625, -0.44326621294021606, -0.5848686099052429, 0.31592807173728943],
    -        [0.5401414632797241, -0.1041281446814537, -0.4072037935256958, 0.4387882947921753],
    -        [-0.5410752892494202, 0.4544697403907776, -0.6238576173782349, -0.2077195793390274],
    -        [-0.41753143072128296, -0.11599045991897583, -0.22447934746742249, -0.5805748701095581],
    -        [0.1651047021150589, -0.526184618473053, 0.34729963541030884, 0.3307822048664093],
    -        [0.6879482865333557, 0.27184563875198364, -0.4907835125923157, -0.3555335998535156]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.8146252036094666]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [1.2187021970748901],
    -        [0.13001228868961334],
    -        [0.2703772783279419],
    -        [-0.3591017723083496]
    -      ]
    -    >
    -  }
    -}

    While the metric defaults are designed with supervised training loops in mind, they can be used for much more flexible purposes. By default, metrics look for the fields :y_true and :y_pred in the given loop's step state. They then apply the given metric function on those inputs. You can also define metrics which work on other fields. For example you can track the running average of a given parameter with a metric just by defining a custom output transform:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    -
    -output_transform = fn %{model_state: model_state} ->
    -  [model_state["dense_0"]["kernel"]]
    -end
    +>

    Then when running, Axon will invoke your custom metric function and accumulate it with the given aggregator:

    train_data =
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0681635 my weird metric: -5.2842808
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.0866982489824295, 0.4234408140182495, 0.18205422163009644, 0.34029239416122437, -0.25770726799964905, -0.07117943465709686, 0.11470477283000946, -0.027526771649718285]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.7088809013366699, 0.4486531913280487, 0.4666421115398407, 0.4163222312927246, 0.5076444149017334, 0.10119977593421936, 0.6628422141075134, -0.024421442300081253]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.2924745976924896, 0.0065560233779251575, 0.0, -0.21106423437595367]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.3407173752784729, -0.6905813217163086, -0.5984221696853638, -0.23955762386322021],
    +        [0.42608022689819336, 0.5949274301528931, -0.24687853455543518, -0.4948572516441345],
    +        [0.27617380023002625, -0.44326621294021606, -0.5848686099052429, 0.31592807173728943],
    +        [0.5401414632797241, -0.1041281446814537, -0.4072037935256958, 0.4387882947921753],
    +        [-0.5410752892494202, 0.4544697403907776, -0.6238576173782349, -0.2077195793390274],
    +        [-0.41753143072128296, -0.11599045991897583, -0.22447934746742249, -0.5805748701095581],
    +        [0.1651047021150589, -0.526184618473053, 0.34729963541030884, 0.3307822048664093],
    +        [0.6879482865333557, 0.27184563875198364, -0.4907835125923157, -0.3555335998535156]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.8146252036094666]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [1.2187021970748901],
    +        [0.13001228868961334],
    +        [0.2703772783279419],
    +        [-0.3591017723083496]
    +      ]
    +    >
    +  }
    +}

    While the metric defaults are designed with supervised training loops in mind, they can be used for much more flexible purposes. By default, metrics look for the fields :y_true and :y_pred in the given loop's step state. They then apply the given metric function on those inputs. You can also define metrics which work on other fields. For example you can track the running average of a given parameter with a metric just by defining a custom output transform:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    +
    +output_transform = fn %{model_state: model_state} ->
    +  [model_state["dense_0"]["kernel"]]
    +end
     
     loop =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -  |> Axon.Loop.metric(&Nx.mean/1, "dense_0_kernel_mean", :running_average, output_transform)
    -  |> Axon.Loop.metric(&Nx.variance/1, "dense_0_kernel_var", :running_average, output_transform)
    #Axon.Loop<
    -  metrics: %{
    -    "dense_0_kernel_mean" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     &Nx.mean/1},
    -    "dense_0_kernel_var" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     &Nx.variance/1},
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +  |> Axon.Loop.metric(&Nx.mean/1, "dense_0_kernel_mean", :running_average, output_transform)
    +  |> Axon.Loop.metric(&Nx.variance/1, "dense_0_kernel_var", :running_average, output_transform)
    #Axon.Loop<
    +  metrics: %{
    +    "dense_0_kernel_mean" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     &Nx.mean/1},
    +    "dense_0_kernel_var" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     &Nx.variance/1},
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Axon will apply your custom output transform to the loop's step state and forward the result to your custom metric function:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, dense_0_kernel_mean: -0.1978206 dense_0_kernel_var: 0.2699870 loss: 0.0605523
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.371105819940567, 0.26451945304870605, -0.048297226428985596, 0.14616385102272034, -0.19356133043766022, -0.2924956679344177, 0.08295489847660065, 0.25213995575904846]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.3888320028781891, -0.39463144540786743, 0.5427617430686951, -0.776488721370697, -0.2402891218662262, -0.6489362716674805, 0.772796094417572, -0.3739306926727295]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.0, -0.006653765682131052, 0.0, 0.3086839020252228]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.5556576251983643, 0.5547546148300171, -0.2708005905151367, 0.7341570258140564],
    -        [-0.01800161600112915, 0.19749529659748077, -0.09523773193359375, 0.4989740252494812],
    -        [-0.19737857580184937, -0.2741832435131073, -0.3699955344200134, 0.21036939322948456],
    -        [-0.09787613153457642, -0.5631319284439087, 0.007957160472869873, 0.23681949079036713],
    -        [-0.469108909368515, 0.24062377214431763, -0.012939095497131348, -0.5055088400840759],
    -        [0.11229842901229858, -0.5476430058479309, 0.013744592666625977, -0.631401538848877],
    -        [-0.5834296941757202, -0.42305096983909607, 0.1393480896949768, -0.4647532105445862],
    -        [-0.3684111535549164, -0.5147689580917358, -0.3725535273551941, 0.46682292222976685]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.8305950164794922]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.7111979722976685],
    -        [-0.49341335892677307],
    -        [-0.32701319456100464],
    -        [-1.0638068914413452]
    -      ]
    -    >
    -  }
    -}

    You can also define custom accumulation functions. Axon has definitions for computing running averages and running sums; however, you might find you need something like an exponential moving average:

    defmodule CustomAccumulator do
    +>

    Axon will apply your custom output transform to the loop's step state and forward the result to your custom metric function:

    train_data =
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, dense_0_kernel_mean: -0.1978206 dense_0_kernel_var: 0.2699870 loss: 0.0605523
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.371105819940567, 0.26451945304870605, -0.048297226428985596, 0.14616385102272034, -0.19356133043766022, -0.2924956679344177, 0.08295489847660065, 0.25213995575904846]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.3888320028781891, -0.39463144540786743, 0.5427617430686951, -0.776488721370697, -0.2402891218662262, -0.6489362716674805, 0.772796094417572, -0.3739306926727295]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.0, -0.006653765682131052, 0.0, 0.3086839020252228]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.5556576251983643, 0.5547546148300171, -0.2708005905151367, 0.7341570258140564],
    +        [-0.01800161600112915, 0.19749529659748077, -0.09523773193359375, 0.4989740252494812],
    +        [-0.19737857580184937, -0.2741832435131073, -0.3699955344200134, 0.21036939322948456],
    +        [-0.09787613153457642, -0.5631319284439087, 0.007957160472869873, 0.23681949079036713],
    +        [-0.469108909368515, 0.24062377214431763, -0.012939095497131348, -0.5055088400840759],
    +        [0.11229842901229858, -0.5476430058479309, 0.013744592666625977, -0.631401538848877],
    +        [-0.5834296941757202, -0.42305096983909607, 0.1393480896949768, -0.4647532105445862],
    +        [-0.3684111535549164, -0.5147689580917358, -0.3725535273551941, 0.46682292222976685]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.8305950164794922]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.7111979722976685],
    +        [-0.49341335892677307],
    +        [-0.32701319456100464],
    +        [-1.0638068914413452]
    +      ]
    +    >
    +  }
    +}

    You can also define custom accumulation functions. Axon has definitions for computing running averages and running sums; however, you might find you need something like an exponential moving average:

    defmodule CustomAccumulator do
       import Nx.Defn
     
    -  defn running_ema(acc, obs, _i, opts \\ []) do
    -    opts = keyword!(opts, alpha: 0.9)
    -    obs * opts[:alpha] + acc * (1 - opts[:alpha])
    -  end
    -end
    {:module, CustomAccumulator, <<70, 79, 82, 49, 0, 0, 11, ...>>, true}

    Your accumulator must be an arity-3 function which accepts the current accumulated value, the current observation, and the current iteration and returns the aggregated metric. You can pass a function direct as an accumulator in your metric:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    -
    -output_transform = fn %{model_state: model_state} ->
    -  [model_state["dense_0"]["kernel"]]
    -end
    +  defn running_ema(acc, obs, _i, opts \\ []) do
    +    opts = keyword!(opts, alpha: 0.9)
    +    obs * opts[:alpha] + acc * (1 - opts[:alpha])
    +  end
    +end
    {:module, CustomAccumulator, <<70, 79, 82, 49, 0, 0, 11, ...>>, true}

    Your accumulator must be an arity-3 function which accepts the current accumulated value, the current observation, and the current iteration and returns the aggregated metric. You can pass a function direct as an accumulator in your metric:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    +
    +output_transform = fn %{model_state: model_state} ->
    +  [model_state["dense_0"]["kernel"]]
    +end
     
     loop =
       model
    -  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    -  |> Axon.Loop.metric(
    +  |> Axon.Loop.trainer(:mean_squared_error, :sgd)
    +  |> Axon.Loop.metric(
         &Nx.mean/1,
         "dense_0_kernel_ema_mean",
         &CustomAccumulator.running_ema/3,
         output_transform
    -  )
    #Axon.Loop<
    -  metrics: %{
    -    "dense_0_kernel_ema_mean" => {#Function<15.37390314/3 in Axon.Loop.build_metric_fn/3>,
    -     &Nx.mean/1},
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  )
    #Axon.Loop<
    +  metrics: %{
    +    "dense_0_kernel_ema_mean" => {#Function<15.37390314/3 in Axon.Loop.build_metric_fn/3>,
    +     &Nx.mean/1},
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Then when you run the loop, Axon will use your custom accumulator:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, dense_0_kernel_ema_mean: -0.0139760 loss: 0.0682910
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.3344854414463043, -0.14519920945167542, 0.1061621680855751, 0.36911827325820923, 0.014146199449896812, 0.46089673042297363, -0.1707312911748886, -0.054649338126182556]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [0.6524605751037598, -0.3795280158519745, -0.2069108486175537, 0.6815686821937561, -0.5734748840332031, 0.5515486001968384, -0.13509605824947357, -0.711794912815094]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.3078235387802124, -0.24773009121418, -0.027328377589583397, 0.0769796073436737]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [-0.785156786441803, 0.07306647300720215, 0.339533269405365, -0.2188076674938202],
    -        [0.29139244556427, 0.15977036952972412, 0.6193944215774536, -0.4305708408355713],
    -        [-0.21063144505023956, -0.3738138973712921, -0.27965712547302246, 0.051842525601387024],
    -        [0.7297297716140747, -0.08164620399475098, 0.07651054859161377, -0.43577027320861816],
    -        [0.07917583733797073, -0.27750709652900696, 0.21028375625610352, -0.6430750489234924],
    -        [0.7177602648735046, -0.2743614912033081, -0.5894488096237183, 0.634209156036377],
    -        [0.4251592457294464, 0.6134526133537292, -0.35339266061782837, 0.4966743588447571],
    -        [-0.49672019481658936, 0.46769094467163086, -0.44432300329208374, -0.3249942660331726]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.8245151042938232]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.9500011205673218],
    -        [0.9115968942642212],
    -        [0.39282673597335815],
    -        [0.19936752319335938]
    -      ]
    -    >
    -  }
    -}
    +
    >

    Then when you run the loop, Axon will use your custom accumulator:

    train_data =
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, dense_0_kernel_ema_mean: -0.0139760 loss: 0.0682910
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.3344854414463043, -0.14519920945167542, 0.1061621680855751, 0.36911827325820923, 0.014146199449896812, 0.46089673042297363, -0.1707312911748886, -0.054649338126182556]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [0.6524605751037598, -0.3795280158519745, -0.2069108486175537, 0.6815686821937561, -0.5734748840332031, 0.5515486001968384, -0.13509605824947357, -0.711794912815094]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.3078235387802124, -0.24773009121418, -0.027328377589583397, 0.0769796073436737]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [-0.785156786441803, 0.07306647300720215, 0.339533269405365, -0.2188076674938202],
    +        [0.29139244556427, 0.15977036952972412, 0.6193944215774536, -0.4305708408355713],
    +        [-0.21063144505023956, -0.3738138973712921, -0.27965712547302246, 0.051842525601387024],
    +        [0.7297297716140747, -0.08164620399475098, 0.07651054859161377, -0.43577027320861816],
    +        [0.07917583733797073, -0.27750709652900696, 0.21028375625610352, -0.6430750489234924],
    +        [0.7177602648735046, -0.2743614912033081, -0.5894488096237183, 0.634209156036377],
    +        [0.4251592457294464, 0.6134526133537292, -0.35339266061782837, 0.4966743588447571],
    +        [-0.49672019481658936, 0.46769094467163086, -0.44432300329208374, -0.3249942660331726]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.8245151042938232]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.9500011205673218],
    +        [0.9115968942642212],
    +        [0.39282673597335815],
    +        [0.19936752319335938]
    +      ]
    +    >
    +  }
    +}
    diff --git a/xor.html b/xor.html index 6f8b184b..82d3a515 100644 --- a/xor.html +++ b/xor.html @@ -14,7 +14,7 @@ - + @@ -136,14 +136,14 @@

    -
    Mix.install([
    -  {:axon, "~> 0.3.0"},
    -  {:nx, "~> 0.4.0", override: true},
    -  {:exla, "~> 0.4.0"},
    -  {:kino_vega_lite, "~> 0.1.6"}
    -])
    +
    Mix.install([
    +  {:axon, "~> 0.3.0"},
    +  {:nx, "~> 0.4.0", override: true},
    +  {:exla, "~> 0.4.0"},
    +  {:kino_vega_lite, "~> 0.1.6"}
    +])
     
    -Nx.Defn.default_options(compiler: EXLA)
    +Nx.Defn.default_options(compiler: EXLA)
     
     alias VegaLite, as: Vl

    @@ -157,14 +157,14 @@

    The model

    -

    Let's start with the model. We need two inputs, since XOR has two operands. We then concatenate them into a single input vector with Axon.concatenate/3. Then we have one hidden layer and one output layer, both of them dense.

    Note: the model is a sequential neural network. In Axon, we can conveniently create such a model by using the pipe operator (|>) to add layers one by one.

    x1_input = Axon.input("x1", shape: {nil, 1})
    -x2_input = Axon.input("x2", shape: {nil, 1})
    +

    Let's start with the model. We need two inputs, since XOR has two operands. We then concatenate them into a single input vector with Axon.concatenate/3. Then we have one hidden layer and one output layer, both of them dense.

    Note: the model is a sequential neural network. In Axon, we can conveniently create such a model by using the pipe operator (|>) to add layers one by one.

    x1_input = Axon.input("x1", shape: {nil, 1})
    +x2_input = Axon.input("x2", shape: {nil, 1})
     
     model =
       x1_input
    -  |> Axon.concatenate(x2_input)
    -  |> Axon.dense(8, activation: :tanh)
    -  |> Axon.dense(1, activation: :sigmoid)

    + |> Axon.concatenate(x2_input) + |> Axon.dense(8, activation: :tanh) + |> Axon.dense(1, activation: :sigmoid)

    @@ -173,13 +173,13 @@

    The next step is to prepare training data. Since we are modeling a well-defined operation, we can just generate random operands and compute the expected XOR result for them.

    The training works with batches of examples, so we repeatedly generate a whole batch of inputs and the expected result.

    batch_size = 32
     
     data =
    -  Stream.repeatedly(fn ->
    -    x1 = Nx.random_uniform({batch_size, 1}, 0, 2)
    -    x2 = Nx.random_uniform({batch_size, 1}, 0, 2)
    -    y = Nx.logical_xor(x1, x2)
    +  Stream.repeatedly(fn ->
    +    x1 = Nx.random_uniform({batch_size, 1}, 0, 2)
    +    x2 = Nx.random_uniform({batch_size, 1}, 0, 2)
    +    y = Nx.logical_xor(x1, x2)
     
    -    {%{"x1" => x1, "x2" => x2}, y}
    -  end)

    Here's how a sample batch looks:

    Enum.at(data, 0)

    + {%{"x1" => x1, "x2" => x2}, y} + end)

    Here's how a sample batch looks:

    Enum.at(data, 0)

    @@ -189,17 +189,17 @@

    params = model - |> Axon.Loop.trainer(:binary_cross_entropy, :sgd) - |> Axon.Loop.run(data, %{}, epochs: epochs, iterations: 1000)

    + |> Axon.Loop.trainer(:binary_cross_entropy, :sgd) + |> Axon.Loop.run(data, %{}, epochs: epochs, iterations: 1000)

    Trying the model

    -

    Finally, we can test our model on sample data.

    Axon.predict(model, params, %{
    -  "x1" => Nx.tensor([[0]]),
    -  "x2" => Nx.tensor([[1]])
    -})

    Try other combinations of $x_1$ and $x_2$ and see what the output is. To improve the model performance, you can increase the number of training epochs.

    +

    Finally, we can test our model on sample data.

    Axon.predict(model, params, %{
    +  "x1" => Nx.tensor([[0]]),
    +  "x2" => Nx.tensor([[1]])
    +})

    Try other combinations of $x_1$ and $x_2$ and see what the output is. To improve the model performance, you can increase the number of training epochs.

    @@ -209,22 +209,22 @@

    n = 50 # We generate coordinates of inputs in the (n x n) grid -x1 = Nx.iota({n, n}, axis: 0) |> Nx.divide(n) |> Nx.reshape({:auto, 1}) -x2 = Nx.iota({n, n}, axis: 1) |> Nx.divide(n) |> Nx.reshape({:auto, 1}) +x1 = Nx.iota({n, n}, axis: 0) |> Nx.divide(n) |> Nx.reshape({:auto, 1}) +x2 = Nx.iota({n, n}, axis: 1) |> Nx.divide(n) |> Nx.reshape({:auto, 1}) # The output is also a real number, but we round it into one of the two classes -y = Axon.predict(model, params, %{"x1" => x1, "x2" => x2}) |> Nx.round() - -Vl.new(width: 300, height: 300) -|> Vl.data_from_values( - x1: Nx.to_flat_list(x1), - x2: Nx.to_flat_list(x2), - y: Nx.to_flat_list(y) -) -|> Vl.mark(:circle) -|> Vl.encode_field(:x, "x1", type: :quantitative) -|> Vl.encode_field(:y, "x2", type: :quantitative) -|> Vl.encode_field(:color, "y", type: :nominal)

    From the plot we can clearly see that during training our model learnt two clean boundaries to separate $(0,0)$, $(1,1)$ from $(0,1)$, $(1,0)$.

    +y = Axon.predict(model, params, %{"x1" => x1, "x2" => x2}) |> Nx.round() + +Vl.new(width: 300, height: 300) +|> Vl.data_from_values( + x1: Nx.to_flat_list(x1), + x2: Nx.to_flat_list(x2), + y: Nx.to_flat_list(y) +) +|> Vl.mark(:circle) +|> Vl.encode_field(:x, "x1", type: :quantitative) +|> Vl.encode_field(:y, "x2", type: :quantitative) +|> Vl.encode_field(:color, "y", type: :nominal)

    From the plot we can clearly see that during training our model learnt two clean boundaries to separate $(0,0)$, $(1,1)$ from $(0,1)$, $(1,0)$.

    diff --git a/your_first_axon_model.html b/your_first_axon_model.html index daaed8db..d1dcc504 100644 --- a/your_first_axon_model.html +++ b/your_first_axon_model.html @@ -14,7 +14,7 @@ - + @@ -136,29 +136,29 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"},
    -  {:kino, ">= 0.9.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"},
    +  {:kino, ">= 0.9.0"}
    +])
    :ok

    Your first model

    -

    Axon is a library for creating and training neural networks in Elixir. Everything in Axon centers around the %Axon{} struct which represents an instance of an Axon model.

    Models are just graphs which represent the transformation and flow of input data to a desired output. Really, you can think of models as representing a single computation or function. An Axon model, when executed, takes data as input and returns transformed data as output.

    All Axon models start with a declaration of input nodes. These are the root nodes of your computation graph, and correspond to the actual input data you want to send to Axon:

    input = Axon.input("data")
    #Axon<
    -  inputs: %{"data" => nil}
    +

    Axon is a library for creating and training neural networks in Elixir. Everything in Axon centers around the %Axon{} struct which represents an instance of an Axon model.

    Models are just graphs which represent the transformation and flow of input data to a desired output. Really, you can think of models as representing a single computation or function. An Axon model, when executed, takes data as input and returns transformed data as output.

    All Axon models start with a declaration of input nodes. These are the root nodes of your computation graph, and correspond to the actual input data you want to send to Axon:

    input = Axon.input("data")
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "data"
       nodes: 1
    ->

    Technically speaking, input is now a valid Axon model which you can inspect, execute, and initialize. You can visualize how data flows through the graph using Axon.Display.as_graph/2:

    template = Nx.template({2, 8}, :f32)
    -Axon.Display.as_graph(input, template)
    graph TD;
    +>

    Technically speaking, input is now a valid Axon model which you can inspect, execute, and initialize. You can visualize how data flows through the graph using Axon.Display.as_graph/2:

    template = Nx.template({2, 8}, :f32)
    +Axon.Display.as_graph(input, template)
    graph TD;
     3[/"data (:input) {2, 8}"/];
    -;

    Notice the execution flow is just a single node, because your graph only consists of an input node! You pass data in and the model spits the same data back out, without any intermediate transformations.

    You can see this in action by actually executing your model. You can build the %Axon{} struct into it's initialization and forward functions by calling Axon.build/2. This pattern of "lowering" or transforming the %Axon{} data structure into other functions or representations is very common in Axon. By simply traversing the data structure, you can create useful functions, execution visualizations, and more!

    {init_fn, predict_fn} = Axon.build(input)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    - #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}

    Notice that Axon.build/2 returns a tuple of {init_fn, predict_fn}. init_fn has the signature:

    init_fn.(template :: map(tensor) | tensor, initial_params :: map) :: map(tensor)

    while predict_fn has the signature:

    predict_fn.(params :: map(tensor), input :: map(tensor) | tensor)

    init_fn returns all of your model's trainable parameters and state. You need to pass a template of the expected inputs because the shape of certain model parameters often depend on the shape of model inputs. You also need to pass any initial parameters you want your model to start with. This is useful for things like transfer learning, which you can read about in another guide.

    predict_fn returns transformed inputs from your model's trainable parameters and the given inputs.

    params = init_fn.(Nx.template({1, 8}, :f32), %{})
    %{}

    In this example, you use Nx.template/2 to create a template tensor, which is a placeholder that does not actually consume any memory. Templates are useful for initialization because you don't actually need to know anything about your inputs other than their shape and type.

    Notice init_fn returned an empty map because your model does not have any trainable parameters. This should make sense because it's just an input layer.

    Now you can pass these trainable parameters to predict_fn along with some input to actually execute your model:

    predict_fn.(params, Nx.iota({1, 8}, type: :f32))
    #Nx.Tensor<
    -  f32[1][8]
    -  [
    -    [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
    -  ]
    ->

    And your model just returned the given input, as expected!

    +;

    Notice the execution flow is just a single node, because your graph only consists of an input node! You pass data in and the model spits the same data back out, without any intermediate transformations.

    You can see this in action by actually executing your model. You can build the %Axon{} struct into it's initialization and forward functions by calling Axon.build/2. This pattern of "lowering" or transforming the %Axon{} data structure into other functions or representations is very common in Axon. By simply traversing the data structure, you can create useful functions, execution visualizations, and more!

    {init_fn, predict_fn} = Axon.build(input)
    {#Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>,
    + #Function<135.109794929/2 in Nx.Defn.Compiler.fun/2>}

    Notice that Axon.build/2 returns a tuple of {init_fn, predict_fn}. init_fn has the signature:

    init_fn.(template :: map(tensor) | tensor, initial_params :: map) :: map(tensor)

    while predict_fn has the signature:

    predict_fn.(params :: map(tensor), input :: map(tensor) | tensor)

    init_fn returns all of your model's trainable parameters and state. You need to pass a template of the expected inputs because the shape of certain model parameters often depend on the shape of model inputs. You also need to pass any initial parameters you want your model to start with. This is useful for things like transfer learning, which you can read about in another guide.

    predict_fn returns transformed inputs from your model's trainable parameters and the given inputs.

    params = init_fn.(Nx.template({1, 8}, :f32), %{})
    %{}

    In this example, you use Nx.template/2 to create a template tensor, which is a placeholder that does not actually consume any memory. Templates are useful for initialization because you don't actually need to know anything about your inputs other than their shape and type.

    Notice init_fn returned an empty map because your model does not have any trainable parameters. This should make sense because it's just an input layer.

    Now you can pass these trainable parameters to predict_fn along with some input to actually execute your model:

    predict_fn.(params, Nx.iota({1, 8}, type: :f32))
    #Nx.Tensor<
    +  f32[1][8]
    +  [
    +    [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
    +  ]
    +>

    And your model just returned the given input, as expected!

    diff --git a/your_first_evaluation_loop.html b/your_first_evaluation_loop.html index 08087d8c..c854eff5 100644 --- a/your_first_evaluation_loop.html +++ b/your_first_evaluation_loop.html @@ -14,7 +14,7 @@ - + @@ -136,125 +136,125 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Creating an Axon evaluation loop

    Once you have a trained model, it's necessary to test the trained model on some test data. Axon's loop abstraction is general enough to work for both training and evaluating models. Just as Axon implements a canned Axon.Loop.trainer/3 factory, it also implements a canned Axon.Loop.evaluator/1 factory.

    Axon.Loop.evaluator/1 creates an evaluation loop which you can instrument with metrics to measure the performance of a trained model on test data. First, you need a trained model:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
     
    -train_loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)
    +train_loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)
     
     data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    -
    -trained_model_state = Axon.Loop.run(train_loop, data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.1285532
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.06848274916410446, 0.037988610565662384, -0.199247345328331, 0.18008524179458618, 0.10976515710353851, -0.10479626059532166, 0.562850832939148, -0.030415315181016922]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.2839881181716919, 0.11133058369159698, -0.5213645100593567, -0.14406965672969818, 0.37532612681388855, -0.28965434432029724, -0.9048429131507874, -5.540614947676659e-4]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.2961483597755432, 0.3721822202205658, -0.1726730614900589, -0.20648165047168732]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.602420449256897, 0.46551579236984253, 0.3295630216598511, 0.484800785779953],
    -        [0.05755739286541939, -0.2412092238664627, 0.27874955534935, 0.13457047939300537],
    -        [-0.26997247338294983, -0.4479314386844635, 0.4976465106010437, -0.05715075880289078],
    -        [-0.7245721220970154, 0.1187945082783699, 0.14330074191093445, 0.3257679343223572],
    -        [-0.032964885234832764, -0.625235915184021, -0.05669135972857475, -0.7016372680664062],
    -        [-0.08433973789215088, -0.07334757596254349, 0.08273869007825851, 0.46893611550331116],
    -        [0.4123252332210541, 0.9876810312271118, -0.3525731563568115, 0.030163511633872986],
    -        [0.6962482333183289, 0.5394620299339294, 0.6907036304473877, -0.5448697209358215]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [0.7519291043281555]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [0.7839917540550232],
    -        [-0.8586246967315674],
    -        [0.8599083423614502],
    -        [0.29766184091567993]
    -      ]
    -    >
    -  }
    -}

    Running loops with Axon.Loop.trainer/3 returns a trained model state which you can use to evaluate your model. To construct an evaluation loop, you just call Axon.Loop.evaluator/1 with your pre-trained model:

    test_loop = Axon.Loop.evaluator(model)
    #Axon.Loop<
    -  metrics: %{},
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    +
    +trained_model_state = Axon.Loop.run(train_loop, data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.1285532
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.06848274916410446, 0.037988610565662384, -0.199247345328331, 0.18008524179458618, 0.10976515710353851, -0.10479626059532166, 0.562850832939148, -0.030415315181016922]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.2839881181716919, 0.11133058369159698, -0.5213645100593567, -0.14406965672969818, 0.37532612681388855, -0.28965434432029724, -0.9048429131507874, -5.540614947676659e-4]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.2961483597755432, 0.3721822202205658, -0.1726730614900589, -0.20648165047168732]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.602420449256897, 0.46551579236984253, 0.3295630216598511, 0.484800785779953],
    +        [0.05755739286541939, -0.2412092238664627, 0.27874955534935, 0.13457047939300537],
    +        [-0.26997247338294983, -0.4479314386844635, 0.4976465106010437, -0.05715075880289078],
    +        [-0.7245721220970154, 0.1187945082783699, 0.14330074191093445, 0.3257679343223572],
    +        [-0.032964885234832764, -0.625235915184021, -0.05669135972857475, -0.7016372680664062],
    +        [-0.08433973789215088, -0.07334757596254349, 0.08273869007825851, 0.46893611550331116],
    +        [0.4123252332210541, 0.9876810312271118, -0.3525731563568115, 0.030163511633872986],
    +        [0.6962482333183289, 0.5394620299339294, 0.6907036304473877, -0.5448697209358215]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [0.7519291043281555]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [0.7839917540550232],
    +        [-0.8586246967315674],
    +        [0.8599083423614502],
    +        [0.29766184091567993]
    +      ]
    +    >
    +  }
    +}

    Running loops with Axon.Loop.trainer/3 returns a trained model state which you can use to evaluate your model. To construct an evaluation loop, you just call Axon.Loop.evaluator/1 with your pre-trained model:

    test_loop = Axon.Loop.evaluator(model)
    #Axon.Loop<
    +  metrics: %{},
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Next, you'll need to instrument your test loop with the metrics you'd like to aggregate:

    test_loop = test_loop |> Axon.Loop.metric(:mean_absolute_error)
    #Axon.Loop<
    -  metrics: %{
    -    "mean_absolute_error" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     :mean_absolute_error}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +>

    Next, you'll need to instrument your test loop with the metrics you'd like to aggregate:

    test_loop = test_loop |> Axon.Loop.metric(:mean_absolute_error)
    #Axon.Loop<
    +  metrics: %{
    +    "mean_absolute_error" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     :mean_absolute_error}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    Finally, you can run your loop on test data. Because you want to test your trained model, you need to provide your model's initial state to the test loop:

    Axon.Loop.run(test_loop, data, trained_model_state, iterations: 1000)
    Batch: 999, mean_absolute_error: 0.0856894
    %{
    -  0 => %{
    -    "mean_absolute_error" => #Nx.Tensor<
    +>

    Finally, you can run your loop on test data. Because you want to test your trained model, you need to provide your model's initial state to the test loop:

    Axon.Loop.run(test_loop, data, trained_model_state, iterations: 1000)
    Batch: 999, mean_absolute_error: 0.0856894
    %{
    +  0 => %{
    +    "mean_absolute_error" => #Nx.Tensor<
           f32
           0.08568935841321945
    -    >
    -  }
    -}
    + > + } +}
    diff --git a/your_first_training_loop.html b/your_first_training_loop.html index b2941a3f..49a7bac4 100644 --- a/your_first_training_loop.html +++ b/your_first_training_loop.html @@ -14,7 +14,7 @@ - + @@ -136,201 +136,201 @@

    -
    Mix.install([
    -  {:axon, ">= 0.5.0"}
    -])
    :ok

    +
    Mix.install([
    +  {:axon, ">= 0.5.0"}
    +])
    :ok

    Creating an Axon training loop

    Axon generalizes the concept of training, evaluation, hyperparameter optimization, and more into the Axon.Loop API. Axon loops are a instrumented reductions over Elixir Streams - that basically means you can accumulate some state over an Elixir Stream and control different points in the loop execution.

    With Axon, you'll most commonly implement and work with supervised training loops. Because supervised training loops are so common in deep learning, Axon has a loop factory function which takes care of most of the boilerplate of creating a supervised training loop for you. In the beginning of your deep learning journey, you'll almost exclusively use Axon's loop factories to create and run loops.

    Axon's supervised training loop assumes you have an input stream of data with entries that look like:

    {batch_inputs, batch_labels}

    Each entry is a batch of input data with a corresponding batch of labels. You can simulate some real training data by constructing an Elixir stream:

    train_data =
    -  Stream.repeatedly(fn ->
    -    {xs, _next_key} =
    -      :random.uniform(9999)
    -      |> Nx.Random.key()
    -      |> Nx.Random.normal(shape: {8, 1})
    -
    -    ys = Nx.sin(xs)
    -    {xs, ys}
    -  end)
    #Function<51.6935098/2 in Stream.repeatedly/1>

    The most basic supervised training loop in Axon requires 3 things:

    1. An Axon model
    2. A loss function
    3. An optimizer

    You can construct an Axon model using the knowledge you've gained from going through the model creation guides:

    model =
    -  Axon.input("data")
    -  |> Axon.dense(8)
    -  |> Axon.relu()
    -  |> Axon.dense(4)
    -  |> Axon.relu()
    -  |> Axon.dense(1)
    #Axon<
    -  inputs: %{"data" => nil}
    +  Stream.repeatedly(fn ->
    +    {xs, _next_key} =
    +      :random.uniform(9999)
    +      |> Nx.Random.key()
    +      |> Nx.Random.normal(shape: {8, 1})
    +
    +    ys = Nx.sin(xs)
    +    {xs, ys}
    +  end)
    #Function<51.6935098/2 in Stream.repeatedly/1>

    The most basic supervised training loop in Axon requires 3 things:

    1. An Axon model
    2. A loss function
    3. An optimizer

    You can construct an Axon model using the knowledge you've gained from going through the model creation guides:

    model =
    +  Axon.input("data")
    +  |> Axon.dense(8)
    +  |> Axon.relu()
    +  |> Axon.dense(4)
    +  |> Axon.relu()
    +  |> Axon.dense(1)
    #Axon<
    +  inputs: %{"data" => nil}
       outputs: "dense_2"
       nodes: 6
    ->

    Axon comes with built-in loss functions and optimizers which you can use directly when constructing your training loop. To construct your training loop, you use Axon.Loop.trainer/3:

    loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)
    #Axon.Loop<
    -  metrics: %{
    -    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    -     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    -  },
    -  handlers: %{
    -    completed: [],
    -    epoch_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    epoch_halted: [],
    -    epoch_started: [],
    -    halted: [],
    -    iteration_completed: [
    -      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    -       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    -    ],
    -    iteration_started: [],
    -    started: []
    -  },
    +>

    Axon comes with built-in loss functions and optimizers which you can use directly when constructing your training loop. To construct your training loop, you use Axon.Loop.trainer/3:

    loop = Axon.Loop.trainer(model, :mean_squared_error, :sgd)
    #Axon.Loop<
    +  metrics: %{
    +    "loss" => {#Function<11.133813849/3 in Axon.Metrics.running_average/1>,
    +     #Function<9.37390314/2 in Axon.Loop.build_loss_fn/1>}
    +  },
    +  handlers: %{
    +    completed: [],
    +    epoch_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<6.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    epoch_halted: [],
    +    epoch_started: [],
    +    halted: [],
    +    iteration_completed: [
    +      {#Function<27.37390314/1 in Axon.Loop.log/3>,
    +       #Function<64.37390314/2 in Axon.Loop.build_filter_fn/1>}
    +    ],
    +    iteration_started: [],
    +    started: []
    +  },
       ...
    ->

    You'll notice that Axon.Loop.trainer/3 returns an %Axon.Loop{} data structure. This data structure contains information which Axon uses to control the execution of the loop. In order to run the loop, you need to explicitly pass it to Axon.Loop.run/4:

    Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0563023
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.038592107594013214, 0.19925688207149506, -0.08018972724676132, -0.11267539858818054, 0.35166260600090027, -0.0794963389635086, 0.20298318564891815, 0.3049686849117279]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.06691190600395203, -0.32860732078552246, 0.22386932373046875, 0.16137443482875824, 0.23626506328582764, 0.2438151240348816, 0.2662005126476288, 0.32266947627067566]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.03138260543346405, 0.2621246576309204, 0.021843062713742256, -0.07498764991760254]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.541576087474823, 0.4923045039176941, 0.5933979749679565, -0.5083895921707153],
    -        [0.5120893120765686, -0.6925638318061829, 0.36635661125183105, -0.05748361349105835],
    -        [0.26158788800239563, -0.1788359135389328, -0.14064575731754303, -0.08323567360639572],
    -        [0.6685130596160889, -0.4880330264568329, 0.5104460120201111, -0.3399733006954193],
    -        [-0.6356683969497681, 0.770803689956665, -0.3876360058784485, -0.5178110599517822],
    -        [0.4476216733455658, -0.21042484045028687, -0.4300518333911896, -0.2693784534931183],
    -        [0.08789066225290298, 0.47043612599372864, 0.02871485985815525, 0.6908602714538574],
    -        [0.45776790380477905, 0.6735268235206604, 0.40828803181648254, 0.19558420777320862]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.748963475227356]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.22219088673591614],
    -        [1.1391150951385498],
    -        [-0.13221295177936554],
    -        [-0.27904900908470154]
    -      ]
    -    >
    -  }
    -}

    Axon.Loop.run/4 expects a loop to execute, some data to loop over, and any initial state you explicitly want your loop to start with. Axon.Loop.run/4 will then iterate over your data, executing a step function on each batch, and accumulating some generic loop state. In the case of a supervised training loop, this generic loop state actually represents training state including your model's trained parameters.

    Axon.Loop.run/4 also accepts options which control the loops execution. This includes :iterations which controls the number of iterations per epoch a loop should execute for, and :epochs which controls the number of epochs a loop should execute for:

    Axon.Loop.run(loop, train_data, %{}, epochs: 3, iterations: 500)
    Epoch: 0, Batch: 450, loss: 0.0935063
    +>

    You'll notice that Axon.Loop.trainer/3 returns an %Axon.Loop{} data structure. This data structure contains information which Axon uses to control the execution of the loop. In order to run the loop, you need to explicitly pass it to Axon.Loop.run/4:

    Axon.Loop.run(loop, train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 950, loss: 0.0563023
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.038592107594013214, 0.19925688207149506, -0.08018972724676132, -0.11267539858818054, 0.35166260600090027, -0.0794963389635086, 0.20298318564891815, 0.3049686849117279]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.06691190600395203, -0.32860732078552246, 0.22386932373046875, 0.16137443482875824, 0.23626506328582764, 0.2438151240348816, 0.2662005126476288, 0.32266947627067566]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.03138260543346405, 0.2621246576309204, 0.021843062713742256, -0.07498764991760254]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.541576087474823, 0.4923045039176941, 0.5933979749679565, -0.5083895921707153],
    +        [0.5120893120765686, -0.6925638318061829, 0.36635661125183105, -0.05748361349105835],
    +        [0.26158788800239563, -0.1788359135389328, -0.14064575731754303, -0.08323567360639572],
    +        [0.6685130596160889, -0.4880330264568329, 0.5104460120201111, -0.3399733006954193],
    +        [-0.6356683969497681, 0.770803689956665, -0.3876360058784485, -0.5178110599517822],
    +        [0.4476216733455658, -0.21042484045028687, -0.4300518333911896, -0.2693784534931183],
    +        [0.08789066225290298, 0.47043612599372864, 0.02871485985815525, 0.6908602714538574],
    +        [0.45776790380477905, 0.6735268235206604, 0.40828803181648254, 0.19558420777320862]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.748963475227356]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.22219088673591614],
    +        [1.1391150951385498],
    +        [-0.13221295177936554],
    +        [-0.27904900908470154]
    +      ]
    +    >
    +  }
    +}

    Axon.Loop.run/4 expects a loop to execute, some data to loop over, and any initial state you explicitly want your loop to start with. Axon.Loop.run/4 will then iterate over your data, executing a step function on each batch, and accumulating some generic loop state. In the case of a supervised training loop, this generic loop state actually represents training state including your model's trained parameters.

    Axon.Loop.run/4 also accepts options which control the loops execution. This includes :iterations which controls the number of iterations per epoch a loop should execute for, and :epochs which controls the number of epochs a loop should execute for:

    Axon.Loop.run(loop, train_data, %{}, epochs: 3, iterations: 500)
    Epoch: 0, Batch: 450, loss: 0.0935063
     Epoch: 1, Batch: 450, loss: 0.0576384
    -Epoch: 2, Batch: 450, loss: 0.0428323
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [-0.035534460097551346, 0.2604885697364807, -0.10573504120111465, -0.16461455821990967, 0.3610309064388275, -0.10921606421470642, 0.2061888873577118, 0.3162775933742523]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [-0.05344606190919876, -0.3463115096092224, 0.23782028257846832, 0.20592278242111206, 0.2195105254650116, 0.2618684470653534, 0.2559347450733185, 0.3006669282913208]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [0.03086121939122677, 0.28601887822151184, 0.02634759061038494, -0.08197703212499619]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.5404174327850342, 0.49248307943344116, 0.5927202701568604, -0.5083895921707153],
    -        [0.5133915543556213, -0.7197086811065674, 0.3669036030769348, -0.057483553886413574],
    -        [0.26609811186790466, -0.20234307646751404, -0.14102067053318024, -0.08141336590051651],
    -        [0.673393964767456, -0.512398362159729, 0.5106634497642517, -0.3384905159473419],
    -        [-0.6347945928573608, 0.7695014476776123, -0.3877493143081665, -0.5186421275138855],
    -        [0.45236992835998535, -0.2351287305355072, -0.4305106997489929, -0.2674770951271057],
    -        [0.08871842920780182, 0.46521952748298645, 0.02729635499417782, 0.691332221031189],
    -        [0.4584391117095947, 0.6687410473823547, 0.4068295657634735, 0.19576647877693176]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.7425869703292847]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.24965399503707886],
    -        [1.1746525764465332],
    -        [-0.12984804809093475],
    -        [-0.2796761095523834]
    -      ]
    -    >
    -  }
    -}

    You may have noticed that by default Axon.Loop.trainer/3 configures your loop to log information about training progress every 50 iterations. You can control this when constructing your supervised training loop with the :log option:

    model
    -|> Axon.Loop.trainer(:mean_squared_error, :sgd, log: 100)
    -|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 900, loss: 0.1492715
    %{
    -  "dense_0" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[8]
    -      [0.09267199039459229, 0.5775123834609985, -0.07691138982772827, 0.04283804073929787, -0.015639742836356163, -0.0725373700261116, -0.10598818212747574, 0.021243896335363388]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[1][8]
    -      [
    -        [0.07886508852243423, 0.826379120349884, 0.1022031158208847, -0.5164816975593567, 0.390212744474411, 0.2709604799747467, -0.05409134551882744, -0.6204537749290466]
    -      ]
    -    >
    -  },
    -  "dense_1" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[4]
    -      [-0.09577611088752747, 0.3303026556968689, -0.25102874636650085, -0.3312375247478485]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[8][4]
    -      [
    -        [0.5508446097373962, -0.03904113546013832, 0.382876992225647, -0.6273598670959473],
    -        [0.13289013504981995, 0.947068452835083, -0.27359727025032043, 0.4073275923728943],
    -        [-0.10011858493089676, -0.32976964116096497, -0.3160743713378906, -0.3586210012435913],
    -        [-0.628970205783844, -0.19567319750785828, -0.07241304218769073, -0.43270331621170044],
    -        [-0.6155693531036377, -0.020595157518982887, -0.3254905045032501, 0.18614870309829712],
    -        [-0.07561944425106049, -0.34477049112319946, -0.30149057507514954, -0.6603768467903137],
    -        [-0.17559891939163208, -0.2768605649471283, 0.5830116868019104, 0.11386138200759888],
    -        [-0.6376093626022339, -0.31125709414482117, 0.2749727964401245, -0.6777774691581726]
    -      ]
    -    >
    -  },
    -  "dense_2" => %{
    -    "bias" => #Nx.Tensor<
    -      f32[1]
    -      [-0.767456591129303]
    -    >,
    -    "kernel" => #Nx.Tensor<
    -      f32[4][1]
    -      [
    -        [-0.3530634641647339],
    -        [0.9497018456459045],
    -        [0.31334763765335083],
    -        [-0.624195396900177]
    -      ]
    -    >
    -  }
    -}
    +Epoch: 2, Batch: 450, loss: 0.0428323
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [-0.035534460097551346, 0.2604885697364807, -0.10573504120111465, -0.16461455821990967, 0.3610309064388275, -0.10921606421470642, 0.2061888873577118, 0.3162775933742523]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [-0.05344606190919876, -0.3463115096092224, 0.23782028257846832, 0.20592278242111206, 0.2195105254650116, 0.2618684470653534, 0.2559347450733185, 0.3006669282913208]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [0.03086121939122677, 0.28601887822151184, 0.02634759061038494, -0.08197703212499619]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.5404174327850342, 0.49248307943344116, 0.5927202701568604, -0.5083895921707153],
    +        [0.5133915543556213, -0.7197086811065674, 0.3669036030769348, -0.057483553886413574],
    +        [0.26609811186790466, -0.20234307646751404, -0.14102067053318024, -0.08141336590051651],
    +        [0.673393964767456, -0.512398362159729, 0.5106634497642517, -0.3384905159473419],
    +        [-0.6347945928573608, 0.7695014476776123, -0.3877493143081665, -0.5186421275138855],
    +        [0.45236992835998535, -0.2351287305355072, -0.4305106997489929, -0.2674770951271057],
    +        [0.08871842920780182, 0.46521952748298645, 0.02729635499417782, 0.691332221031189],
    +        [0.4584391117095947, 0.6687410473823547, 0.4068295657634735, 0.19576647877693176]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.7425869703292847]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.24965399503707886],
    +        [1.1746525764465332],
    +        [-0.12984804809093475],
    +        [-0.2796761095523834]
    +      ]
    +    >
    +  }
    +}

    You may have noticed that by default Axon.Loop.trainer/3 configures your loop to log information about training progress every 50 iterations. You can control this when constructing your supervised training loop with the :log option:

    model
    +|> Axon.Loop.trainer(:mean_squared_error, :sgd, log: 100)
    +|> Axon.Loop.run(train_data, %{}, iterations: 1000)
    Epoch: 0, Batch: 900, loss: 0.1492715
    %{
    +  "dense_0" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[8]
    +      [0.09267199039459229, 0.5775123834609985, -0.07691138982772827, 0.04283804073929787, -0.015639742836356163, -0.0725373700261116, -0.10598818212747574, 0.021243896335363388]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[1][8]
    +      [
    +        [0.07886508852243423, 0.826379120349884, 0.1022031158208847, -0.5164816975593567, 0.390212744474411, 0.2709604799747467, -0.05409134551882744, -0.6204537749290466]
    +      ]
    +    >
    +  },
    +  "dense_1" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[4]
    +      [-0.09577611088752747, 0.3303026556968689, -0.25102874636650085, -0.3312375247478485]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[8][4]
    +      [
    +        [0.5508446097373962, -0.03904113546013832, 0.382876992225647, -0.6273598670959473],
    +        [0.13289013504981995, 0.947068452835083, -0.27359727025032043, 0.4073275923728943],
    +        [-0.10011858493089676, -0.32976964116096497, -0.3160743713378906, -0.3586210012435913],
    +        [-0.628970205783844, -0.19567319750785828, -0.07241304218769073, -0.43270331621170044],
    +        [-0.6155693531036377, -0.020595157518982887, -0.3254905045032501, 0.18614870309829712],
    +        [-0.07561944425106049, -0.34477049112319946, -0.30149057507514954, -0.6603768467903137],
    +        [-0.17559891939163208, -0.2768605649471283, 0.5830116868019104, 0.11386138200759888],
    +        [-0.6376093626022339, -0.31125709414482117, 0.2749727964401245, -0.6777774691581726]
    +      ]
    +    >
    +  },
    +  "dense_2" => %{
    +    "bias" => #Nx.Tensor<
    +      f32[1]
    +      [-0.767456591129303]
    +    >,
    +    "kernel" => #Nx.Tensor<
    +      f32[4][1]
    +      [
    +        [-0.3530634641647339],
    +        [0.9497018456459045],
    +        [0.31334763765335083],
    +        [-0.624195396900177]
    +      ]
    +    >
    +  }
    +}