diff --git a/README.md b/README.md
index bc4c72a2..8702c8e4 100644
--- a/README.md
+++ b/README.md
@@ -34,7 +34,7 @@ Grab some data and split into features and target:
```julia
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To optmise for GPUs
```
@@ -83,17 +83,16 @@ Train the wrapped model:
julia> mach = machine(iterated_model, X, y)
julia> fit!(mach)
-[ Info: Training machine(ProbabilisticIteratedModel(model = NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …), …), …).
-[ Info: No iteration parameter specified. Using `iteration_parameter=:(epochs)`.
-[ Info: final loss: 0.10431026246922499
-[ Info: final training loss: 0.046286315
-[ Info: Stop triggered by Patience(4) stopping criterion.
-[ Info: Total of 349 iterations.
+[ Info: No iteration parameter specified. Using `iteration_parameter=:(epochs)`.
+[ Info: final loss: 0.1284184007796247
+[ Info: final training loss: 0.055630706
+[ Info: Stop triggered by NumberSinceBest(5) stopping criterion.
+[ Info: Total of 811 iterations.
```
Inspect results:
```julia-repl
-julia> plot(train_losses, label="Validation Loss", linewidth=2, size=(800,400))
+julia> plot(train_losses, label="Training Loss")
julia> plot!(validation_losses, label="Validation Loss", linewidth=2, size=(800,400))
```
diff --git a/docs/src/common_workflows/architecture_search/notebook.ipynb b/docs/src/common_workflows/architecture_search/notebook.ipynb
index 958109de..286491d1 100644
--- a/docs/src/common_workflows/architecture_search/notebook.ipynb
+++ b/docs/src/common_workflows/architecture_search/notebook.ipynb
@@ -95,7 +95,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng = 123);\n",
+ "y, X = unpack(iris, ==(:Species), rng = 123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters\n",
"first(X, 5)"
],
@@ -130,7 +130,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (1, 1, 1), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (1, 1, 1), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 4
@@ -306,7 +306,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (21, 57, 25), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (45, 49, 21), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 8
@@ -341,9 +341,9 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "\u001b[1m10×2 DataFrame\u001b[0m\n\u001b[1m Row \u001b[0m│\u001b[1m mlp \u001b[0m\u001b[1m measurement \u001b[0m\n │\u001b[90m MLP… \u001b[0m\u001b[90m Float64 \u001b[0m\n─────┼────────────────────────────────────────────\n 1 │ MLP(hidden = (21, 57, 25), …) 0.0867019\n 2 │ MLP(hidden = (45, 17, 13), …) 0.0929803\n 3 │ MLP(hidden = (33, 13, 49), …) 0.0973896\n 4 │ MLP(hidden = (21, 41, 61), …) 0.0981502\n 5 │ MLP(hidden = (57, 49, 61), …) 0.100331\n 6 │ MLP(hidden = (25, 25, 29), …) 0.101083\n 7 │ MLP(hidden = (29, 61, 21), …) 0.101466\n 8 │ MLP(hidden = (29, 61, 5), …) 0.107513\n 9 │ MLP(hidden = (21, 61, 17), …) 0.107874\n 10 │ MLP(hidden = (45, 49, 61), …) 0.111292",
+ "text/plain": "\u001b[1m10×2 DataFrame\u001b[0m\n\u001b[1m Row \u001b[0m│\u001b[1m mlp \u001b[0m\u001b[1m measurement \u001b[0m\n │\u001b[90m MLP… \u001b[0m\u001b[90m Float64 \u001b[0m\n─────┼────────────────────────────────────────────\n 1 │ MLP(hidden = (45, 49, 21), …) 0.0860875\n 2 │ MLP(hidden = (25, 45, 33), …) 0.0877367\n 3 │ MLP(hidden = (29, 17, 53), …) 0.0970372\n 4 │ MLP(hidden = (61, 9, 29), …) 0.0970978\n 5 │ MLP(hidden = (49, 49, 9), …) 0.0971594\n 6 │ MLP(hidden = (21, 33, 61), …) 0.0984172\n 7 │ MLP(hidden = (57, 61, 61), …) 0.099232\n 8 │ MLP(hidden = (41, 13, 25), …) 0.101498\n 9 │ MLP(hidden = (53, 29, 21), …) 0.105323\n 10 │ MLP(hidden = (57, 33, 45), …) 0.110168",
"text/html": [
- "
1 | MLP(hidden = (21, 57, 25), …) | 0.0867019 |
2 | MLP(hidden = (45, 17, 13), …) | 0.0929803 |
3 | MLP(hidden = (33, 13, 49), …) | 0.0973896 |
4 | MLP(hidden = (21, 41, 61), …) | 0.0981502 |
5 | MLP(hidden = (57, 49, 61), …) | 0.100331 |
6 | MLP(hidden = (25, 25, 29), …) | 0.101083 |
7 | MLP(hidden = (29, 61, 21), …) | 0.101466 |
8 | MLP(hidden = (29, 61, 5), …) | 0.107513 |
9 | MLP(hidden = (21, 61, 17), …) | 0.107874 |
10 | MLP(hidden = (45, 49, 61), …) | 0.111292 |
"
+ "1 | MLP(hidden = (45, 49, 21), …) | 0.0860875 |
2 | MLP(hidden = (25, 45, 33), …) | 0.0877367 |
3 | MLP(hidden = (29, 17, 53), …) | 0.0970372 |
4 | MLP(hidden = (61, 9, 29), …) | 0.0970978 |
5 | MLP(hidden = (49, 49, 9), …) | 0.0971594 |
6 | MLP(hidden = (21, 33, 61), …) | 0.0984172 |
7 | MLP(hidden = (57, 61, 61), …) | 0.099232 |
8 | MLP(hidden = (41, 13, 25), …) | 0.101498 |
9 | MLP(hidden = (53, 29, 21), …) | 0.105323 |
10 | MLP(hidden = (57, 33, 45), …) | 0.110168 |
"
]
},
"metadata": {},
diff --git a/docs/src/common_workflows/architecture_search/notebook.jl b/docs/src/common_workflows/architecture_search/notebook.jl
index 61ba5d49..a5e4a15a 100644
--- a/docs/src/common_workflows/architecture_search/notebook.jl
+++ b/docs/src/common_workflows/architecture_search/notebook.jl
@@ -25,7 +25,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng = 123);
+y, X = unpack(iris, ==(:Species), rng = 123);
X = Float32.(X); # To be compatible with type of network network parameters
first(X, 5)
diff --git a/docs/src/common_workflows/architecture_search/notebook.md b/docs/src/common_workflows/architecture_search/notebook.md
index e995c68f..b355247a 100644
--- a/docs/src/common_workflows/architecture_search/notebook.md
+++ b/docs/src/common_workflows/architecture_search/notebook.md
@@ -28,7 +28,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
````@example architecture_search
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng = 123);
+y, X = unpack(iris, ==(:Species), rng = 123);
X = Float32.(X); # To be compatible with type of network network parameters
first(X, 5)
````
diff --git a/docs/src/common_workflows/architecture_search/notebook.unexecuted.ipynb b/docs/src/common_workflows/architecture_search/notebook.unexecuted.ipynb
index 85b68135..6093c80e 100644
--- a/docs/src/common_workflows/architecture_search/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/architecture_search/notebook.unexecuted.ipynb
@@ -75,7 +75,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng = 123);\n",
+ "y, X = unpack(iris, ==(:Species), rng = 123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters\n",
"first(X, 5)"
],
diff --git a/docs/src/common_workflows/comparison/notebook.ipynb b/docs/src/common_workflows/comparison/notebook.ipynb
index 8163b302..d968843e 100644
--- a/docs/src/common_workflows/comparison/notebook.ipynb
+++ b/docs/src/common_workflows/comparison/notebook.ipynb
@@ -81,7 +81,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);"
+ "y, X = unpack(iris, ==(:Species), rng=123);"
],
"metadata": {},
"execution_count": 3
@@ -107,7 +107,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 50, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 50, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 4
@@ -271,9 +271,9 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "\u001b[1m4×2 DataFrame\u001b[0m\n\u001b[1m Row \u001b[0m│\u001b[1m mlp \u001b[0m\u001b[1m measurement \u001b[0m\n │\u001b[90m Probabil… \u001b[0m\u001b[90m Float64 \u001b[0m\n─────┼────────────────────────────────────────────────\n 1 │ BayesianLDA(method = gevd, …) 0.0610826\n 2 │ NeuralNetworkClassifier(builder … 0.0857014\n 3 │ RandomForestClassifier(max_depth… 0.102881\n 4 │ ProbabilisticTunedModel(model = … 0.221056",
+ "text/plain": "\u001b[1m4×2 DataFrame\u001b[0m\n\u001b[1m Row \u001b[0m│\u001b[1m mlp \u001b[0m\u001b[1m measurement \u001b[0m\n │\u001b[90m Probabil… \u001b[0m\u001b[90m Float64 \u001b[0m\n─────┼────────────────────────────────────────────────\n 1 │ BayesianLDA(method = gevd, …) 0.0610826\n 2 │ NeuralNetworkClassifier(builder … 0.0857014\n 3 │ RandomForestClassifier(max_depth… 0.107885\n 4 │ ProbabilisticTunedModel(model = … 0.221056",
"text/html": [
- "1 | BayesianLDA(method = gevd, …) | 0.0610826 |
2 | NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …) | 0.0857014 |
3 | RandomForestClassifier(max_depth = -1, …) | 0.102881 |
4 | ProbabilisticTunedModel(model = XGBoostClassifier(test = 1, …), …) | 0.221056 |
"
+ "1 | BayesianLDA(method = gevd, …) | 0.0610826 |
2 | NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …) | 0.0857014 |
3 | RandomForestClassifier(max_depth = -1, …) | 0.107885 |
4 | ProbabilisticTunedModel(model = XGBoostClassifier(test = 1, …), …) | 0.221056 |
"
]
},
"metadata": {},
diff --git a/docs/src/common_workflows/comparison/notebook.jl b/docs/src/common_workflows/comparison/notebook.jl
index 4d75c49d..6716ec52 100644
--- a/docs/src/common_workflows/comparison/notebook.jl
+++ b/docs/src/common_workflows/comparison/notebook.jl
@@ -23,7 +23,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
# ### Instantiating the models Now let's construct our model. This follows a similar setup
diff --git a/docs/src/common_workflows/comparison/notebook.md b/docs/src/common_workflows/comparison/notebook.md
index 1419ab55..8d689eb1 100644
--- a/docs/src/common_workflows/comparison/notebook.md
+++ b/docs/src/common_workflows/comparison/notebook.md
@@ -26,7 +26,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
````@example comparison
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
nothing #hide
````
diff --git a/docs/src/common_workflows/comparison/notebook.unexecuted.ipynb b/docs/src/common_workflows/comparison/notebook.unexecuted.ipynb
index b8517a90..65e472ff 100644
--- a/docs/src/common_workflows/comparison/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/comparison/notebook.unexecuted.ipynb
@@ -73,7 +73,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);"
+ "y, X = unpack(iris, ==(:Species), rng=123);"
],
"metadata": {},
"execution_count": null
diff --git a/docs/src/common_workflows/composition/notebook.ipynb b/docs/src/common_workflows/composition/notebook.ipynb
index ced33e3c..306a24c6 100644
--- a/docs/src/common_workflows/composition/notebook.ipynb
+++ b/docs/src/common_workflows/composition/notebook.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "This tutorial is available as a Jupyter notebook or julia script\n",
+ "This demonstration is available as a Jupyter notebook or julia script\n",
"[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/composition)."
],
"metadata": {}
@@ -83,7 +83,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters"
],
"metadata": {},
@@ -146,7 +146,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "MLJFlux.NeuralNetworkClassifier"
+ "text/plain": "NeuralNetworkClassifier"
},
"metadata": {},
"execution_count": 5
@@ -173,7 +173,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 50, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 50, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 6
@@ -284,7 +284,7 @@
"\rProgress: 13%|███████▏ | ETA: 0:00:01\u001b[K\rProgress: 100%|█████████████████████████████████████████████████████| Time: 0:00:00\u001b[K\n",
"\rProgress: 67%|███████████████████████████████████▍ | ETA: 0:00:01\u001b[K\r\n",
" class: virginica\u001b[K\r\u001b[A[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1, \"versicolor\" => 2).\n",
- "\rOptimising neural net: 4%[> ] ETA: 0:05:10\u001b[K\rOptimising neural net: 6%[=> ] ETA: 0:03:22\u001b[K\rOptimising neural net: 8%[=> ] ETA: 0:02:29\u001b[K\rOptimising neural net: 10%[==> ] ETA: 0:01:56\u001b[K\rOptimising neural net: 12%[==> ] ETA: 0:01:35\u001b[K\rOptimising neural net: 14%[===> ] ETA: 0:01:20\u001b[K\rOptimising neural net: 16%[===> ] ETA: 0:01:08\u001b[K\rOptimising neural net: 18%[====> ] ETA: 0:00:59\u001b[K\rOptimising neural net: 20%[====> ] ETA: 0:00:52\u001b[K\rOptimising neural net: 22%[=====> ] ETA: 0:00:46\u001b[K\rOptimising neural net: 24%[=====> ] ETA: 0:00:41\u001b[K\rOptimising neural net: 25%[======> ] ETA: 0:00:37\u001b[K\rOptimising neural net: 27%[======> ] ETA: 0:00:33\u001b[K\rOptimising neural net: 29%[=======> ] ETA: 0:00:30\u001b[K\rOptimising neural net: 31%[=======> ] ETA: 0:00:28\u001b[K\rOptimising neural net: 33%[========> ] ETA: 0:00:25\u001b[K\rOptimising neural net: 35%[========> ] ETA: 0:00:23\u001b[K\rOptimising neural net: 37%[=========> ] ETA: 0:00:21\u001b[K\rOptimising neural net: 39%[=========> ] ETA: 0:00:20\u001b[K\rOptimising neural net: 41%[==========> ] ETA: 0:00:18\u001b[K\rOptimising neural net: 43%[==========> ] ETA: 0:00:17\u001b[K\rOptimising neural net: 45%[===========> ] ETA: 0:00:15\u001b[K\rOptimising neural net: 47%[===========> ] ETA: 0:00:14\u001b[K\rOptimising neural net: 49%[============> ] ETA: 0:00:13\u001b[K\rOptimising neural net: 51%[============> ] ETA: 0:00:12\u001b[K\rOptimising neural net: 53%[=============> ] ETA: 0:00:11\u001b[K\rOptimising neural net: 55%[=============> ] ETA: 0:00:10\u001b[K\rOptimising neural net: 57%[==============> ] ETA: 0:00:10\u001b[K\rOptimising neural net: 59%[==============> ] ETA: 0:00:09\u001b[K\rOptimising neural net: 61%[===============> ] ETA: 0:00:08\u001b[K\rOptimising neural net: 63%[===============> ] ETA: 0:00:08\u001b[K\rOptimising neural net: 82%[====================> ] ETA: 0:00:03\u001b[K\rOptimising neural net: 84%[=====================> ] ETA: 0:00:02\u001b[K\rOptimising neural net: 86%[=====================> ] ETA: 0:00:02\u001b[K\rOptimising neural net: 88%[======================> ] ETA: 0:00:02\u001b[K\rOptimising neural net: 90%[======================> ] ETA: 0:00:01\u001b[K\rOptimising neural net: 92%[=======================> ] ETA: 0:00:01\u001b[K\rOptimising neural net: 94%[=======================> ] ETA: 0:00:01\u001b[K\rOptimising neural net: 96%[========================>] ETA: 0:00:01\u001b[K\rOptimising neural net: 98%[========================>] ETA: 0:00:00\u001b[K\rOptimising neural net: 100%[=========================] Time: 0:00:12\u001b[K\n",
+ "\rOptimising neural net: 4%[> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 6%[=> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 8%[=> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 10%[==> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 12%[==> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 14%[===> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 16%[===> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 18%[====> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 20%[====> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 22%[=====> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 24%[=====> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 25%[======> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 27%[======> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 29%[=======> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 31%[=======> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 33%[========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 35%[========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 37%[=========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 39%[=========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 41%[==========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 43%[==========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 45%[===========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 47%[===========> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 49%[============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 51%[============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 53%[=============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 55%[=============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 57%[==============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 59%[==============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 61%[===============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 63%[===============> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 65%[================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 67%[================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 69%[=================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 71%[=================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 73%[==================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 75%[==================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 76%[===================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 78%[===================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 80%[====================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 82%[====================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 84%[=====================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 86%[=====================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 88%[======================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 90%[======================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 92%[=======================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 94%[=======================> ] ETA: 0:00:00\u001b[K\rOptimising neural net: 96%[========================>] ETA: 0:00:00\u001b[K\rOptimising neural net: 98%[========================>] ETA: 0:00:00\u001b[K\rOptimising neural net: 100%[=========================] Time: 0:00:00\u001b[K\n",
"[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 3, \"versicolor\" => 1).\n",
"[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 3, \"versicolor\" => 1).\n",
"[ Info: After filtering, the mapping from each class to number of borderline points is (\"versicolor\" => 2).\n",
@@ -298,18 +298,18 @@
"│ layer = Dense(4 => 5, relu) # 25 parameters\n",
"│ summary(x) = \"4×8 Matrix{Float64}\"\n",
"└ @ Flux ~/.julia/packages/Flux/Wz6D4/src/layers/stateless.jl:60\n",
- "\rEvaluating over 5 folds: 40%[==========> ] ETA: 0:00:16\u001b[K[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1, \"versicolor\" => 2).\n",
+ "\rEvaluating over 5 folds: 40%[==========> ] ETA: 0:00:10\u001b[K[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1, \"versicolor\" => 2).\n",
"[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1, \"versicolor\" => 2).\n",
- "\rEvaluating over 5 folds: 60%[===============> ] ETA: 0:00:07\u001b[K[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1).\n",
+ "\rEvaluating over 5 folds: 60%[===============> ] ETA: 0:00:05\u001b[K[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1).\n",
"┌ Warning: Cannot oversample a class with no borderline points. Skipping.\n",
"└ @ Imbalance ~/.julia/packages/Imbalance/knJL1/src/oversampling_methods/borderline_smote1/borderline_smote1.jl:67\n",
"\rProgress: 67%|███████████████████████████████████▍ | ETA: 0:00:00\u001b[K\r\n",
" class: virginica\u001b[K\r\u001b[A[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 1).\n",
"┌ Warning: Cannot oversample a class with no borderline points. Skipping.\n",
"└ @ Imbalance ~/.julia/packages/Imbalance/knJL1/src/oversampling_methods/borderline_smote1/borderline_smote1.jl:67\n",
- "\rEvaluating over 5 folds: 80%[====================> ] ETA: 0:00:03\u001b[K[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 3, \"versicolor\" => 3).\n",
+ "\rEvaluating over 5 folds: 80%[====================> ] ETA: 0:00:02\u001b[K[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 3, \"versicolor\" => 3).\n",
"[ Info: After filtering, the mapping from each class to number of borderline points is (\"virginica\" => 3, \"versicolor\" => 3).\n",
- "\rEvaluating over 5 folds: 100%[=========================] Time: 0:00:11\u001b[K\n"
+ "\rEvaluating over 5 folds: 100%[=========================] Time: 0:00:07\u001b[K\n"
]
},
{
diff --git a/docs/src/common_workflows/composition/notebook.jl b/docs/src/common_workflows/composition/notebook.jl
index 182021eb..b617a4b6 100644
--- a/docs/src/common_workflows/composition/notebook.jl
+++ b/docs/src/common_workflows/composition/notebook.jl
@@ -26,7 +26,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
# To simulate an imbalanced dataset, we will take a random sample:
diff --git a/docs/src/common_workflows/composition/notebook.md b/docs/src/common_workflows/composition/notebook.md
index 0ef30b3b..949d5322 100644
--- a/docs/src/common_workflows/composition/notebook.md
+++ b/docs/src/common_workflows/composition/notebook.md
@@ -4,7 +4,7 @@ EditURL = "notebook.jl"
# Model Composition with MLJFlux
-This tutorial is available as a Jupyter notebook or julia script
+This demonstration is available as a Jupyter notebook or julia script
[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/composition).
In this workflow example, we see how MLJFlux enables composing MLJ models with MLJFlux
@@ -28,7 +28,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
````@example composition
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
nothing #hide
````
diff --git a/docs/src/common_workflows/composition/notebook.unexecuted.ipynb b/docs/src/common_workflows/composition/notebook.unexecuted.ipynb
index 54b2439a..ef75b9ab 100644
--- a/docs/src/common_workflows/composition/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/composition/notebook.unexecuted.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "This tutorial is available as a Jupyter notebook or julia script\n",
+ "This demonstration is available as a Jupyter notebook or julia script\n",
"[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/composition)."
],
"metadata": {}
@@ -75,7 +75,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters"
],
"metadata": {},
diff --git a/docs/src/common_workflows/early_stopping/notebook.ipynb b/docs/src/common_workflows/early_stopping/notebook.ipynb
index bbdda628..9f136402 100644
--- a/docs/src/common_workflows/early_stopping/notebook.ipynb
+++ b/docs/src/common_workflows/early_stopping/notebook.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "# Early Stopping with MLJFlux"
+ "# Early Stopping with MLJ"
],
"metadata": {}
},
@@ -81,7 +81,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters"
],
"metadata": {},
@@ -108,7 +108,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 50, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 50, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 4
@@ -148,7 +148,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "5-element Vector{Any}:\n IterationControl.Step(1)\n EarlyStopping.NumberLimit(100)\n EarlyStopping.Patience(5)\n EarlyStopping.NumberSinceBest(9)\n EarlyStopping.TimeLimit(Dates.Millisecond(1800000))"
+ "text/plain": "5-element Vector{Any}:\n Step(1)\n NumberLimit(100)\n Patience(5)\n NumberSinceBest(9)\n TimeLimit(Dates.Millisecond(1800000))"
},
"metadata": {},
"execution_count": 5
@@ -179,7 +179,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "1-element Vector{IterationControl.WithLossDo{Main.var\"##351\".var\"#3#4\"}}:\n IterationControl.WithLossDo{Main.var\"##351\".var\"#3#4\"}(Main.var\"##351\".var\"#3#4\"(), false, nothing)"
+ "text/plain": "1-element Vector{WithLossDo{Main.var\"##267\".var\"#1#2\"}}:\n WithLossDo{Main.var\"##267\".var\"#1#2\"}(Main.var\"##267\".var\"#1#2\"(), false, nothing)"
},
"metadata": {},
"execution_count": 6
@@ -250,7 +250,7 @@
"[ Info: Training machine(ProbabilisticIteratedModel(model = NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …), …), …).\n",
"[ Info: final loss: 0.05287897645527522\n",
"[ Info: final training loss: 0.045833383\n",
- "[ Info: Stop triggered by EarlyStopping.NumberLimit(100) stopping criterion. \n",
+ "[ Info: Stop triggered by NumberLimit(100) stopping criterion. \n",
"[ Info: Total of 100 iterations. \n"
]
}
@@ -290,101 +290,101 @@
"\n",
"\n"
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n"
],
"image/svg+xml": [
"\n",
"\n"
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n",
+ "\n"
]
},
"metadata": {},
diff --git a/docs/src/common_workflows/early_stopping/notebook.jl b/docs/src/common_workflows/early_stopping/notebook.jl
index a6c59da3..adcf39f7 100644
--- a/docs/src/common_workflows/early_stopping/notebook.jl
+++ b/docs/src/common_workflows/early_stopping/notebook.jl
@@ -10,7 +10,7 @@ using Pkg #!md
Pkg.activate(@__DIR__); #!md
Pkg.instantiate(); #!md
-# **Julia version** is assumed to be 1.10.*
+# **Julia version** is assumed to be 1.10.*
# ### Basic Imports
@@ -24,7 +24,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
diff --git a/docs/src/common_workflows/early_stopping/notebook.md b/docs/src/common_workflows/early_stopping/notebook.md
index e6738259..076b7007 100644
--- a/docs/src/common_workflows/early_stopping/notebook.md
+++ b/docs/src/common_workflows/early_stopping/notebook.md
@@ -2,7 +2,7 @@
EditURL = "notebook.jl"
```
-# Early Stopping with MLJFlux
+# Early Stopping with MLJ
This demonstration is available as a Jupyter notebook or julia script
[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/early_stopping).
@@ -26,7 +26,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
````@example early_stopping
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
nothing #hide
````
diff --git a/docs/src/common_workflows/early_stopping/notebook.unexecuted.ipynb b/docs/src/common_workflows/early_stopping/notebook.unexecuted.ipynb
index 5effdb73..4441ab52 100644
--- a/docs/src/common_workflows/early_stopping/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/early_stopping/notebook.unexecuted.ipynb
@@ -3,7 +3,7 @@
{
"cell_type": "markdown",
"source": [
- "# Early Stopping with MLJFlux"
+ "# Early Stopping with MLJ"
],
"metadata": {}
},
@@ -73,7 +73,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters"
],
"metadata": {},
diff --git a/docs/src/common_workflows/hyperparameter_tuning/notebook.ipynb b/docs/src/common_workflows/hyperparameter_tuning/notebook.ipynb
new file mode 100644
index 00000000..18a49f77
--- /dev/null
+++ b/docs/src/common_workflows/hyperparameter_tuning/notebook.ipynb
@@ -0,0 +1,444 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "source": [
+ "# Hyperparameter Tuning with MLJFlux"
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This demonstration is available as a Jupyter notebook or julia script\n",
+ "[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/hyperparameter_tuning)."
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "In this workflow example we learn how to tune different hyperparameters of MLJFlux\n",
+ "models with emphasis on training hyperparameters."
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ " Activating project at `~/GoogleDrive/Julia/MLJ/MLJFlux/docs/src/common_workflows/hyperparameter_tuning`\n"
+ ]
+ }
+ ],
+ "cell_type": "code",
+ "source": [
+ "using Pkg\n",
+ "Pkg.activate(@__DIR__);\n",
+ "Pkg.instantiate();"
+ ],
+ "metadata": {},
+ "execution_count": 1
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "**Julia version** is assumed to be 1.10.*"
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Basic Imports"
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [],
+ "cell_type": "code",
+ "source": [
+ "using MLJ # Has MLJFlux models\n",
+ "using Flux # For more flexibility\n",
+ "import RDatasets # Dataset source\n",
+ "using Plots # To plot tuning results\n",
+ "import Optimisers # native Flux.jl optimisers no longer supported"
+ ],
+ "metadata": {},
+ "execution_count": 2
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Loading and Splitting the Data"
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [],
+ "cell_type": "code",
+ "source": [
+ "iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
+ "X = Float32.(X); # To be compatible with type of network network parameters"
+ ],
+ "metadata": {},
+ "execution_count": 3
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Instantiating the model"
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Now let's construct our model. This follows a similar setup the one followed in the\n",
+ "[Quick Start](../../index.md#Quick-Start)."
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ Info: For silent loading, specify `verbosity=0`. \n",
+ "import MLJFlux ✔\n"
+ ]
+ },
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
+ },
+ "metadata": {},
+ "execution_count": 4
+ }
+ ],
+ "cell_type": "code",
+ "source": [
+ "NeuralNetworkClassifier = @load NeuralNetworkClassifier pkg=MLJFlux\n",
+ "clf = NeuralNetworkClassifier(\n",
+ " builder=MLJFlux.MLP(; hidden=(5,4), σ=Flux.relu),\n",
+ " optimiser=Optimisers.Adam(0.01),\n",
+ " batch_size=8,\n",
+ " epochs=10,\n",
+ " rng=42,\n",
+ ")"
+ ],
+ "metadata": {},
+ "execution_count": 4
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Hyperparameter Tuning Example"
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Let's tune the batch size and the learning rate. We will use grid search and 5-fold\n",
+ "cross-validation."
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "We start by defining the hyperparameter ranges"
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": "NominalRange(optimiser = Adam(0.0001, (0.9, 0.999), 1.0e-8), Adam(0.00215443, (0.9, 0.999), 1.0e-8), Adam(0.0464159, (0.9, 0.999), 1.0e-8), ...)"
+ },
+ "metadata": {},
+ "execution_count": 5
+ }
+ ],
+ "cell_type": "code",
+ "source": [
+ "r1 = range(clf, :batch_size, lower=1, upper=64)\n",
+ "etas = [10^x for x in range(-4, stop=0, length=4)]\n",
+ "optimisers = [Optimisers.Adam(eta) for eta in etas]\n",
+ "r2 = range(clf, :optimiser, values=optimisers)"
+ ],
+ "metadata": {},
+ "execution_count": 5
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Then passing the ranges along with the model and other arguments to the `TunedModel`\n",
+ "constructor."
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [],
+ "cell_type": "code",
+ "source": [
+ "tuned_model = TunedModel(\n",
+ " model=clf,\n",
+ " tuning=Grid(goal=25),\n",
+ " resampling=CV(nfolds=5, rng=42),\n",
+ " range=[r1, r2],\n",
+ " measure=cross_entropy,\n",
+ ");"
+ ],
+ "metadata": {},
+ "execution_count": 6
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Then wrapping our tuned model in a machine and fitting it."
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [],
+ "cell_type": "code",
+ "source": [
+ "mach = machine(tuned_model, X, y);\n",
+ "fit!(mach, verbosity=0);"
+ ],
+ "metadata": {},
+ "execution_count": 7
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Let's check out the best performing model:"
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.0464159, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 1, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
+ },
+ "metadata": {},
+ "execution_count": 8
+ }
+ ],
+ "cell_type": "code",
+ "source": [
+ "fitted_params(mach).best_model"
+ ],
+ "metadata": {},
+ "execution_count": 8
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "### Learning Curves"
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "With learning curves, it's possible to center our focus on the effects of a single\n",
+ "hyperparameter of the model"
+ ],
+ "metadata": {}
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "First define the range and wrap it in a learning curve"
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "[ Info: Training machine(ProbabilisticTunedModel(model = NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …), …), …).\n",
+ "[ Info: Attempting to evaluate 25 models.\n",
+ "\rEvaluating over 25 metamodels: 0%[> ] ETA: N/A\u001b[K\rEvaluating over 25 metamodels: 4%[=> ] ETA: 0:00:03\u001b[K\rEvaluating over 25 metamodels: 8%[==> ] ETA: 0:00:02\u001b[K\rEvaluating over 25 metamodels: 12%[===> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 16%[====> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 20%[=====> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 24%[======> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 28%[=======> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 32%[========> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 36%[=========> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 40%[==========> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 44%[===========> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 48%[============> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 52%[=============> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 56%[==============> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 60%[===============> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 64%[================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 68%[=================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 72%[==================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 76%[===================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 80%[====================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 84%[=====================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 88%[======================> ] ETA: 0:00:01\u001b[K\rEvaluating over 25 metamodels: 92%[=======================> ] ETA: 0:00:00\u001b[K\rEvaluating over 25 metamodels: 96%[========================>] ETA: 0:00:00\u001b[K\rEvaluating over 25 metamodels: 100%[=========================] Time: 0:00:06\u001b[K\n"
+ ]
+ },
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": "(parameter_name = \"epochs\",\n parameter_scale = :log10,\n parameter_values = [1, 2, 3, 4, 5, 6, 7, 9, 11, 13 … 39, 46, 56, 67, 80, 96, 116, 139, 167, 200],\n measurements = [0.9231712033780419, 0.7672938542047157, 0.6736075721456418, 0.6064130950372606, 0.5595521804926612, 0.5270759259385482, 0.5048969423979114, 0.47993815474701584, 0.46130985568830307, 0.4449225600160762 … 0.1621185148276446, 0.12283639917434747, 0.09543014842693512, 0.07850181447968614, 0.06950203807005066, 0.063248279208185, 0.060053521895940286, 0.05921442672620914, 0.05921052970422136, 0.060379476300399186],)"
+ },
+ "metadata": {},
+ "execution_count": 9
+ }
+ ],
+ "cell_type": "code",
+ "source": [
+ "r = range(clf, :epochs, lower=1, upper=200, scale=:log10)\n",
+ "curve = learning_curve(\n",
+ " clf,\n",
+ " X,\n",
+ " y,\n",
+ " range=r,\n",
+ " resampling=CV(nfolds=4, rng=42),\n",
+ " measure=cross_entropy,\n",
+ ")"
+ ],
+ "metadata": {},
+ "execution_count": 9
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "Then plot the curve"
+ ],
+ "metadata": {}
+ },
+ {
+ "outputs": [
+ {
+ "output_type": "execute_result",
+ "data": {
+ "text/plain": "Plot{Plots.GRBackend() n=1}",
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlgAAAGQCAIAAAD9V4nPAAAABmJLR0QA/wD/AP+gvaeTAAAgAElEQVR4nO3dZ0AU194G8DNl6SAgHQEBwQqoiCgqIE0EMfaaaIyKJYkmlsDNm6j3em8ssSSaxIhKLLFrbKiIWLBhw65oFLtIV0D6zM77YQ1BXBQNu7Pl+X3amR2GPzrw7Jk5hRIEgQAAAGgrWuwCAAAAxIQgBAAArYYgBAAArYYgBAAArYYgBAAArYYgBAAArYYgBAAArYYgBAAArYYgBAAArYYgBAAAraYGQbhnz54jR47U82CpVMrzvELrAai/qqoqsUsA+BsuSLnUIAhPnjx59uzZeh7M8zz+p0F1lJeXi10CwN9wQcqlBkEIAACgOAhCAADQaghCAADQaghCAADQaghCAADQaghCAADQaghCAADQaghCAADQapoWhDsekMBEOg9jRgEAoH5YsQtoYH2cyJUCodNubl8Pxr0RJXY5AAAie/z4cWpqqux1WVmZvr6+uPW8N2tra39/f0WcWdOCkCLkG0/B1ZT2T+C2h7BdrJGFAKDV4uLiduzY0bJlS0KIIAgUpZZ/FSsqKs6cOZOVlaWIk2taEMqMcqftDam+B7lfujADnDXt9i8AQP0JgjB48OBvvvlG7EL+kZycHA8PDwWdXGNDIsye2hfOTk6V/nxDKnYtAACgujQ2CAkhHSyoE1HMTzekk1N5qSB2NQAAoJI0OQgJIc7G1Mko9mK+MPgwX45lCgEA4DUaHoSEEHNdcrAny1AkIpF7Xil2NQAAoGI0PwgJIboM2RjEdLCkuu7hHr7ATVIAAPibVgQhIYQiZH5HZmxzusse/mI+shAAAF7SliCUmdyG/rEz3WM/l/gYWQgAoFoqKyuvXr368OFDJX9f7QpCQki/pvSuMHb0Mf73OxhWAQCgKqZNm2ZsbOzr6zt//nwlf2utC0JCSGcr6mAE822adNYFdCQFAFAJY8eOzcrKGj16tPK/tTYGISGklSmV2pvd81AYfYzn0DIEAFCWuLi4nj17Vm8OGzbsu+++I4Q0b97czMxMlJI0c4q1+rDRJymR7ODDXOQBblsIaywRuyAAAKX4s1C4XKCkfhKmOlSo/Suzmw4bNiwmJiYjI8PV1TU/P3/Pnj0LFixQTjF10d4gJIQYSciuUPbTU3zwPm5PGGutrnOyAwC8g/Tnwta7SgpCByMh1J6pucfIyGj48OG//fbbf//739WrV/fo0cPOzk45xdRFq4OQEMLSZHlXZt5lqd9ubl840xwrNwGApvvAif7AScwCPv300+Dg4JkzZ8bHx//www9ilkII0dpnhLXEeNHftqMDErgTWRhWAQCgWC1btnRzc5s+fXp5eXlwcLDY5Wh9i7Dax+50E0OqXzL3kx8zyAWfDwAAFGj8+PHDhg2bM2cOTb/8e3v48OGkpKRTp04RQmJjY8PCwoKCgpRTDILwbyH2VHIEG5XE55STz1ohCwEAFMXf319HR2fUqFHVe/T09MzMzAYNGlS9qbRiEISv8DSnUiKZiAP87UJhcSeGxhNDAICGxnHcnDlzBg8ebG1tXb3Tz8/Pz89PlHrQ7qmtqTF1Moq9lC8MwspNAAANraKiwtra+tKlS3PmzBG7lpcQhHKY6ZKknqyEJsH7uPwKsasBANAgurq6+fn5J06csLe3F7uWlxCE8ukyZEN3JtSe8sfKTQAAGg1BWCeKkFntmegWtN8e/kIeshAAQDMhCN9icht6aWc6PJHb/whZCACggRCEb9e3Kb07jP3kGLfiJubnBgDQNBg+US+drKjjUWzEAf5JqTCrPfP2LwAAUBnnz5+Pi4sTu4p/pLi4WHEnRxDWVzMT6lgvttcB7uELfnlXRoK2NACog9DQ0JycnLS0NEJIVVWVRKKuS+1MmjRJQWdGEL4DG32S0osdfIiLPMBtx8pNAKAO/P39/f39Za+Li4uNjY3FrUcFoV3zbgxZsiuMdTWhuu7hnpSg+wwAgNpDEL4zhiLLujDDXOluCfzN58hCAAD1hiB8TzFe9Mz2dOBe7jhWbgIAUGcIwvc30o1e350dcIjbfBfDKgAA1BWC8B8JtqOSe7LTz0jnX0EWAgCoJQThP+VhTqX2ZjbckU5O5aW4SwoAoG4QhA3A3pA6EsleLhAGHuLLOLGrAQCAd4EgbBhmuuRAOKvHkOB9XF652NUAAEC9IQgbjC5Dfu/OhDWhAhK4B1i5CQBATSAIG5Js5abxLekue/g0rNwEAKAOEIQN7/PW9E9+dM9Ebh9WbgIAUHkKDEKe5+/du1dYWFjXAYIgZGZmZmZmCoKmBUYfJ3pPGDvmOLccKzcBAKg2RQXh9evX3dzcIiIimjZtunDhwtcPyMjI8PT0DAwMDAwM9PT0zMjIUFAlYvG1oo73Yhdelcae4zUt5wEANIiignDy5MkjR45MT08/e/bsrFmz7t69W+uAWbNmdejQ4c8///zzzz99fHxmzpypoEpE5GpCpfZmj2cJnxzjq9AyBABQSQoJwqysrCNHjkycOJEQ4ubmFhQUtHnz5lrHlJSUuLm5yV67u7uXlpYqohLRNdYlST3ZvHIh4gBXVCV2NQAA8BqFrEf48OFDExMTS0tL2aabm9v9+/drHfPtt99+8sknUqmUELJ9+/b4+Pi6zlZRUfHkyRPZqpKEEH19/VatWimibAUxZMnOUPbzU3zXPdy+HkwTQ0rsigAA4G8KCcLi4mJ9ff3qTQMDgydPntQ6huM4hmHu3btHURRN01VVdTaX7ty5c+HChRMnTsg2aZresmWLlZWV3IOrqqp4nuc4lZvfZb4X+eUW03W3dHtAVXMTPDTUFiUlJRSFjz6gKrTwgtTT02PZtySdQoLQ2tr6+fPn1ZsFBQXW1ta1jhk7duynn346duxYQsjKlSujo6MvXbok92ytW7fu2rVrTExMfb61LAj19PTet3YF+sqb2DaSRh6htgSz/jbadS1qLUEQjIyMxK4C4CVckHIp5Bmhi4uLRCK5fPmybPPMmTPt2rWrdUxOTo6Tk5PsddOmTXNychRRiar5qBm9oTs78BC3KQOdZwAAVIJCgtDAwGDUqFGTJ08+d+7c3LlzHz9+PHDgQEJIamqqt7e37JiIiIiZM2eePn36zJkzM2bMiIiIUEQlKijIjjoUwcack866wItdCwAAKObWKCFk3rx5//vf/7788ksHB4dDhw4ZGBgQQkxMTNq3by87YMmSJYsWLZoxYwYhJCIiYsqUKQqqRAW1MaNSezORB/hnFfziTgyNu6QAAOKhVH9Wl9jYWDMzMw14RlhLcRUZkMwZSqj1gYy+oj6QgMiKi4uNjY3FrgLgJVyQcmGuUdEYS8ieHqwBS4L2cblYuQkAQCQIQjHp0GRdINOjCeW3m7tdqOpNcwAAjYQgFJls5aYYL7r7PqzcBAAgAgShShjTnP7Zj+6ZyCU8RBYCACgVglBVfOBEJ/Rgo09wv6ZjiCEAgPIgCFVIR0vqRBS7+Jp0cipWbgIAUBIEoWpxMaaO92JP5whjjvEcWoYAAIqHIFQ5VvrkcCSbVSb0S+ZLVW7ycAAATYMgVEWGLNkVylrpk6B9XB6GGAIAKBKCUEWxNFnRjQlvQgUkcI9K8MQQAEBREISqSzbEMLoF7bebv1qALAQAUAgEoaqb3Iae60OH7udOZiMLAQAaHoJQDQxvRq/vzg5I5vY+QhYCADQwBKF6CLajdoexY49zK29hUAUAQENCEKoNH0vqeC923mWs6AsA0JAQhOrE1YQ61ovd9UCYnMpLcZcUAKAhIAjVjK0BORLJXsoXPjrKV+IuKQDAP4YgVD+mOiSpJ1spJT0TuaIqsasBAFBzCEK1pMuQTUGMmwkVvJfLKRO7GgAAdYYgVFcMRX7tygxwpjvv5u4U4YEhAMB7QhCqtxgveooHHZDAX8pHFgIAvA8Eodr7tBX9cxc6bD938AmyEADgnSEINUEfJ3pLMPvhUW7LXXQkBQB4NwhCDRFoSx2KYKeflf6ajiwEAHgHCELN0caMOhrJLL4mjT2HqWcAAOoLQahRnI2pY73Y5CfCJ8d4Di1DAIB6QBBqGmt9cjSSzSwVBh7iyzixqwEAUHkIQg1kJCG7w1g9lvQ8wBVWil0NAIBqQxBqJh2abOjOdLCguu7hnpRgWAUAQJ0QhBqLImSBLzOmOd0tgb9ViCwEAJCPFbsAUKzJbWgzXRKQwO0IZTtbUWKXAwCgctAi1Hwj3OgV3ZgPkrjEx2gXAgDUhiDUClGO9J4w9pNj3MYMDKoAAHgFglBb+FpRyRFs7Dnp91eQhQAAf0MQapFWplRqb2bdHenkVB43SQEAZBCE2sXOgEqJZNPyhI9T+Cq0DAEAEIRayEyXJPVk88uFfslcKaaeAQCthyDURgYs2RXG2uhT3fdyeeViVwMAICoEoZZiKBLXjenpQPkncA9f4IkhAGgvBKH2ogiZ1Z4Z35L228NfKUAWAoCWQhBqu0mt6Xk+dOh+7kQWshAAtBGCEMjwZvTaALb/IW7fI2QhAGgdBCEQQkiPJtTeHuzoY1z8nxhUAQDaBZNuw0sdLKgjkWx4Il9QQaZ54BMSAGgL/L2Dv7UwpVJ7s2tvS2PPYeoZANAWCEJ4ha0BSYlkj2cJo1J4DndJAUALIAihNtnUMznlwoBDfBmmngEATYcgBDkMWbI7lDXTJT0PcIWVYlcDAKBICEKQj6VJvD/TwYLqlsBlluKJIQBoLAQh1IkiZIEvM9yV7raHv1OELAQAzYQghLeI8aK/bUd338tfykcWAoAGQhDC233sTi/1o3smcscxDRsAaBwEIdRLHyd6QxDbP5n74z4GVQCARpEfhCtWrMjLy1NyKaDiuttS+8PZz07xq24hCwFAc8gPwhkzZjRp0mT48OEpKSlKLghUmbcFdSKKnXtZOv8KshAANIT8IExLS/v3v/996tSpwMDA5s2bz5s3Dw1EkHExpo71YjfckU5OxTRsAKAJ5AehnZ1dTExMRkbGwYMHvby8vvnmmyZNmgwaNCg5OVkQ8NdP29kakCORbFqe8DGmYQMA9femzjI0TYeEhGzZsuXevXtjxozZunVraGioh4fHqlWrKisx3YhWk03Dllcu9EvGNGwAoN7e0mtUEIQjR45Mnz595cqVRkZGY8aMcXJyio6ODggIqKioUE6JoJoMWLIrlLXUI+GJmIYNANRYnUGYm5u7YMGCFi1aBAUFXblyZcGCBY8fP16xYsXevXtPnTp14cKFpKQkZRYKKoilyUp/pqMl1XUP96QE98wBQC3JX5h39OjR69evFwShX79+cXFxAQEBNd/19fV1dnbOyclRSoWg0ihCvvdlLPSk3RL4A+GMWyNK7IoAAN6N/CC8du3ajBkzRo8ebW1tLfeA3377zdHRUZGFgTqJ8aJtDEj3fXxCGNO2MbIQANSJ/CA8ffo0Rb3pz1nnzp0VUw+oq5FutKkO6ZnIbQ5m/W2QhQCgNuQHIUVRgiCcPHny4sWLmZmZVlZWHh4e3bt3ZxhGyfWBGvnAiW6kQw08xP3ix/R3xux9AKAe5Adhbm7ugAEDjh07RghhWZbjOEJI27Ztd+zY0bRp0/qct7Ky8ocffkhNTXVycoqNjbWxsXn9mKysrMWLF//5559WVlaff/55mzZt3v/nANUQaEsl92QjD/AFFWRsC2QhAKgB+X+qPv7440uXLi1fvjwvL6+qqur58+cbNmzIzs7u169fPQfUT5s2bffu3dHR0eXl5WFhYVJp7XHXWVlZHTt2LCoqGjlyZPv27XNzc//pjwKqwcOcOtqL+f6qdNYFXuxaAADejno92J4/f25ubr527doPP/yw5v4jR44EBQWlp6e3aNHizSctLCy0s7O7cOFC8+bNBUFo2rRpXFxcjx49ah4zceLE4uLidevWvbXE2NhYMzOzmJiY+vw8VVVVPM/r6enV52BQnMxSoWci72dNLenMSLS4ZVhcXGxsbCx2FQAv4YKUS86fqKqqKkEQfHx8au2X7anPOPpr164ZGRk1b96cEEJRVJcuXc6ePVvrmJSUlNDQ0Llz537xxRf79+9/z/JBVdkZUCej2KxSErSPyykTuxoAgLrJeUZoaWnZpk2b5ORkWZJVS05OtrS0bNmy5VtPmp2dbW5uXr3ZuHHjrKysWsfcv39/5syZkyZNatOmzZgxY2bOnBkdHS33bLdu3bp69eqBAwdkmwzDrFixwsLCQu7Bshah7KEmiG5tZzLnGtNhp7ChS2Vbc20ccV9SUvLmDtgAyqSFF6Senh7Lyu8NU03+20uXLh02bJjsoaCNjU1eXt6+fft+/PHHZcuWlZSUlJSUEEIMDAx0dXXlfrmBgUHNhmN5eXnNXJTR19cfMmTIl19+SQjR1dWdO3duXUHo4OBgYWExePBg2SbDMI6OjjQt/3Ybbo2qmu86ES8raf/jOr92Yfo21bqbpIIgGBkZiV0FwEu4IOWSH4RDhgzJzs6ePXv27Nmza+4fMGBA9esVK1aMGTNG7pc7ODhkZWWVl5fLAun+/fuenp61jnF0dHRwcKh+/YbOMgYGBvb29iEhIfX4cUAVDXahmzei+hzkLxcIM9sz2vVxFABUnvwg/P7778vK3vJgp2vXrnW91bp1a2dn540bN44aNerOnTunTp367bffCCH3798/ffr0kCFDCCFDhgxJTEycMGECRVF79+59/ZEkaJK2janU3mzfZC79Of+bP2PwlhsVAADKI/8P0kcfffQPz7tkyZKhQ4euXbv2+vXrsvXuCSHnz5+fOnWqLAgnTpyYkJDg5eVlYmKSm5u7e/fuf/gdQcXZGpCUSHbcCb7LHm5XKONohJYhAKiEt3wyz8vLe/z4sa2tbV2TjtYlODg4IyPj+vXrjo6OdnZ2sp29e/cOCgqSvTYyMjp27Nj169elUmmLFi0kEsl7VA/qRZchqwOYH69Ju+zhtwUzvlbIQgAQX52dF1asWOHg4GBpadmuXTsbGxsrK6v58+e/Pi7+DYyNjTt16lSdgoQQHR2dWr1mWrdu7eHhgRTUKpPb0Cu6MR8c5NbcxvL2ACA++S3CZcuWTZw40dvbe/LkybJeo7t27YqJiSkuLq7VfQbgPYQ3oY71Ynsn8RfyhEWdGPSfAQARyZlZRiqV2tvbh4WFrV69uuaIk6+//nrx4sV5eXmGhobKLBEzy2iqggoy6BAnocmmILaRjtjVKAYm8gCVggtSLjm3RnNycrKysiZNmlRr3OWkSZPKy8tv3bqlrNpAw5nrksRw1qsx1XEXd6tQG4fbA4AqkBOEOjo6hJCioqJa+2V76hpED/AeWJrM9WGmedCBCdyhTGQhAIhAThCam5t7eXlNnz49Ozu7emdRUdEXX3xha2v71hm3Ad7V2Bb0thB2xFF+3mV0nwEAZatzirWwsDBnZ+fAwEBbW9ucnJzjx4+/ePFiy5YtWJsXFKGLNXX6A6bPQT6jWPjJj9HRurnYAEA08v/edOvWLS0tbdCgQbdu3dqxY8eVK1fCw8NPnTrVr18/JdcH2sPBkEqJZPPKSdBeLFgBAMojp0VYWlq6dOnSqKio1atXK70e0GpGErI9hPn3Bb7DTm5nKNPeAuMqAEDh5LQIi4qKYmNji4uLlV8NAEXIrPbMvI50zwPcpgw8MgQAhZMThFZWVtbW1vfu3VN+NQAyQ13pxHD2Pxelw4/wzyvFrgYANJqcIKRpeu7cud9+++3169eVXxCATLvG1IW+rIMRafsHd+QpRlYAgKLI7zW6b9++oqIiT09PV1fXJk2a1OwpevDgQWXVBtpOjyFzfZgQO2HkUb6nA7W4E9ZvAoCGV2cvdU9Pz6CgICcnJ4yXAHGF2FNX+rMlHOm4i7uYj6YhADQw+R+wt2zZouQ6AN7AVIf8HshsvScNT+QmtKS/bYd5ugGgwchvEa5bt67mtDIy2dnZcXFxii8JQL6BzvS5D9iUp4J/ApdRhKYhADQM+UE4ffr0jIyMWjvv3r07btw4xZcEUCdHI+pwJDvIme68m4u7icEVANAA3mEmqxcvXhgZGSmuFID6oAiZ3IY+EskuS5cOPMTnV4hdEACouVeeEV69ejU1NZUQUlZWtmvXrmvXrlW/VV5evmHDBsy4DSqitRmV2puddYH33M6t6MZEOOCZIQC8p1eCMDk5ecqUKbLX8+fPr3Wou7s7nhGC6pANrgizF0Yd48ObYHAFALynV26NTpgwoaCgoKCgwNLSMjExsaCGsrKyW7duBQQEiFUogFxBdtSVfmwZRzrs5C7koQcNALyzVz5C6+np6enpEULOnTtnbW0tew2g4hrpkLWBzIYMac8D3GetmGketD6ahgBQb/I7yzg5OSEFQb0Mc6XP92GvFggtt3EbM6RoGwJAPckPwvz8/M8//1w2uRr1KiXXB1B/DobUlmBmSzDz0w2p7y7ueBbSEADeTv4tpEGDBp08eXLo0KHu7u40jcXCQZ10tKRORLHb7klHpPBtzMiPnRkXY3yAA4A6yQnCsrKylJSUZcuWjR07VvkFAfxzFCEDneleDvSS61LfXdwwV/o/3kwjHbHLAgCVJKe1V1JSwvN8hw4dlF8NQAPSZ0mMF50+QEIIcd9a9eM1KYe5aADgNXKC0MLCon379rKR9QDqzkKP/NiZORLJHngi9fiD2/sIDw4B4BXynxEuWbJkxIgRenp6PXr0MDAwqPmWmZmZUgoDaEitTKl9PdjkJ8IXp/kfrpFFvoyHOR4cAgAhdQVh//79s7OzR48e/fpbgoAP1KCuQuypi33ZX25IQ/ZzA5zpf3nRTQwRhwDaTn4QfvPNNyUlJUouBUAJJDSZ3Ib+yI3+7hLf9g/O14oa3ZyOcqQl6BwNoK3kB+Fnn32m5DoAlMlclyzwZf7Xgdn9ULosXTr2OD/Amf60Fe2J+6UA2ucdPgbzPF9aWqq4UgCUTJchA53pgz3ZS/1YF2MqKonvsJOLuyl9USV2ZQCgRK8EYceOHX/66SfZa0EQhg0bVrPv6ObNmw0NDZVaHYBSOBhSMV70vcHsXB8m+YngtKlqxFE++QkehwNohVeCMDs7u7i4WPZaEISNGzc+ePBAjKoAREBTJMSe2hLMXB8gaW1GTTjJt9rGzbsszSsXuzIAUCT0EACozUafxHjRtwex6wKZu8WC+9aqQYf45CfoMA2gmRCEAHXytqCWd2XuDZaE2FMx53jHjVzsOf7BCwQigEZBEAK8RSMdEt2CTuvDJoYzhBCfnVzofm7rPWkVJmwD0AgIQoD6am1GzfVhHg6VRLeg425KnTZVTU7lrz1DAxFAvVE1Z4pxcnJ69OhR9aYgCLUWIBQEQfkzy8TGxpqZmcXExNTn4KqqKp7nsaowKMGfhcKqW9K1t6UuJtRHzehBLrS5bu1jiouLjY2NxagOQA5ckHK9MqB+yJAheXl5YpUCoF7cG1HzOjLf+TBHMoW1t6X/d77Kz5oa6EwPdKb15c9UAQCq6JXf13nz5olVB4CaYigSYk+F2DNFVczO+9Kt96RTTvMRDvQINzrYHvPUAKgBfHAFaBgmEjLCjR7hRj8pEbbdE6af5QsqSH8HdkIbwa0REhFAdaGzDEADszekJrehL/Zl9/VgCCH+CVyHndyP16S5GJgPoJIQhACK0tqM+rcn93iYZK4Pk5YntNhaFZXEbb0nrcS4CwBVglujAIpV/RCxsJLZfl/68w3pxJP8IBf6w2Z0ZyvcMgUQH4IQQEka6ZBP3OlP3OmHL4Tf7wijj/ECIZ+40x+50Tb6YhcHoMVwaxRA2RyNqK/b0jcGsL8HMneKhJZbqzBVDYCI5AfhyZMnqxdgKisrmzZtmr+//5QpU7AeIUADks1l+nT4K1PVXC3AVDUASiU/CD/88MPz58/LXs+YMWPx4sUURcXHx48dO1aJtQFoBb2/1gc+Gsma6ZJeWB8YQLnkBOGLFy/u37/fpUsXQgjP82vWrPniiy9SUlK2bdu2efPmwsJCpRcJoBXcG1Gz2jPV6wPbb8DyTwDKIKezTFFRESGkcePGhJALFy7k5uYOHDiQENKtWzee5+/fv+/l5aXkKgG0B/1XL9OCCmbbPem0M3w5T0a50x+709boUwOgAHJahJaWljRN3759mxCybdu2Ro0aeXt7E0Jki9czDKPkEgG0k7kuiW5BX+rHxvszt4uEVtuqRqZgsQuAhienRSiRSCIiIsaNG9e/f/+4uLj+/ftLJBJCyJUrV2iadnR0VHqRAFrNz5rys2YWd2Lib0nDE/mmRiTGi45yRJdvgIYh/3dp+fLlbdq02bBhQ0BAwJw5c2Q7V69e3bZtWxMTEyWWBwAvGUvI5Db03cFsdAv6q7PSDju5tbelPNqHAP8Ypfz1Bd8V1iME9aWg5d+kAtn7SPrdJWleOfmsFT2uJa2HRxZQD1iPUK563V15+vRpUlLS06dPFV0NANQHTZEoRzq1N/ubP5OcKXXeVDXrAl9YKXZZAOpJfhAOGTJk5syZstfHjh1r1qxZjx49XFxcdu/ercTaAOAtutpQe8LYxJ7s3SLisrlqciqfWarq93gAVI2cIOQ4bufOnbJxhISQ2NhYV1fXlJSUwYMHT5o0ied55VYIAG/hZU6tDWTO9WGrpMRjOzftDF/KiV0TgPqQE4QFBQUVFRUuLi6EkNzc3DNnzsTExPj7+8+ZM+fBgwePHj1SepEA8HYuxtQvXZj0AZLsMtJ2B3cqG01DgHqRE4SywRJVVVWEkP379wuCEBwcTAgxNzcnhOTl5Sm3QgB4B1b6ZF0g80MnZvBhftwJNA0B3k5OEJqZmdnZ2cXHx7948WLlypVt27a1sbEhhDx48IAQYmlpqewaAeAdRThQV/uzZRzpsJM7l4umIcCbyO8sM3v27EWLFhkbG588efLrr7+W7dy7d6+FhQUG1AOoBVMdsjaQ+bc33TuJiz3HV+DhPkAd5C/M+8knn7Rv3/7ixYtt27Zt166dbKednd3SpUspCmtqA6iNgc50gA094STvvZNbE5CqzJYAAB6fSURBVMB4W+D3F6C2Oleob9u2bdu2bWvuGTx48DudOi8v7+bNm66urra2tm84LDMzk6KoNx8DAO/NSp9sD2G23pNGHuA+dqf/483oYHY2gBrq/IV48eJFXFzcxIkTo6KioqOjlyxZ8uzZs/qfd9u2bc2bN//3v//t4eHx66+/1nVYenq6q6vriBEj3q1qAHhHA53pS/0kN58Tn53cxXw8NQT4m/wp1h48eBAcHJyRkdGoUSMbG5vc3NyCggIbG5sDBw54enq+9aRVVVVOTk4rV66MiIi4ePGiv7//48ePGzVqVOswqVQaGBjYpEmT3NzcgwcP1nU2TLEG6ksFZ7Taek/62Sl+lDs925uRoGmoZVTwglQF8n8Pxo8fX1xcvG/fvmfPnt28eTM/P//EiRNGRkYfffRRfU568uRJQRB69uxJCGnXrl2zZs327t37+mGLFy/28fHp1KnTP/kBAOCdDHSmL/WVXH8mdN3D3XyOpiGAvGeEJSUlBw8e3LhxoyzJZLp06bJmzZouXbrcvXtXNtb+DR4+fOjk5FTdraZp06avD8O/d+9efHz8mTNn4uPj33y2kpKSvLy86iYjTdP+/v51LYso/cubzwmgHKp5NVrrkZ0h9Mpbgn8CN6UNPdWDYtCHRjuo5gWpUDT99vsecoKwsLCQ5/kWLVrU2t+yZUtCSH5+/luDsLy8XEdHp3pTV1e3tLS05gFSqXTUqFGLFi0yMjJ6a4lPnjy5cuXK3bt3q8/WrFmzxo0byz1YdmtU2/6nQWWVlZWp7FrWwx1IN3Nqwhl2xz2yvBPXzBitQ82nyhekgujp6bFsnd1CZeS8bWlpaWRktGfPHg8Pj5r79+zZQ9O0s7PzW7+xtbV1fn5+9WZ+fn6tTqFJSUl37969dOnSpUuXTp06df/+/YULF06dOlXu2dzd3X19ffGMENSRIAj1+bQnllZG5GhvsuKmNPQQPc2Dme5J02gaajQVvyDFIn+F+tGjR8+YMSMnJ2fgwIF2dna5ubkJCQkLFy4cMGCAhYXFW0/q7e19586dnJwcKyurioqKs2fPzps3r+YBzs7On376aYP9EADwvihColvQIfbUqBR+z0Pp6gCmmQnCELSL/F6jlZWV48ePX7NmTc17jH369FmzZk09V6gfNmxYfn7+pEmT1q5dm5ube/jwYUJIfHz8b7/9dvz48ZpHLlmyZM+ePeg1ChpJjTrpSQWy8pb0/87z37RlJrVBy1AzqdEFqUzy75zq6OjEx8fPmDHj5MmTz549MzEx6dSpk7u7e/3Pu2rVqkWLFq1evbp58+ZxcXGyna1atRowYECtIzt06GBgYPB+1QNAQ6EpEt2C7mpDfZzC738s3didNdMVuyYApZDTInz69Kmdnd3evXsjIiJEqakWtAhBfanjB3BOSr46yx95KhzsyVrgN0mzqOMFqQRy+pUaGxvTNI0HqgDaiaXJok7MEBe6WwKH9e5BG8gJQiMjo8jIyC1btii/GgBQETFe9MdudNBe/nEJshA0nPxnhCNHjpw4cWJ2dnZUVJStrW3NFSdCQkKUVRsAiCnGi2Zp0i2BPxTBuBij9wxoLPlB+Omnn+bk5Gzbtm3btm213pLbyxQANNJUD9qQJcH7+IM9MawCNJb8IExKSqqqqlJyKQCggsa3pBmKBCTwST2Z1mbIQtBA8oOwPktMAICWGNuCNpSQsP18YjjjYY4sBE3zSmeZ0tLSuLi406dPv37cjRs34uLicnJylFUYAKiQYa704k50yH7ufB4ejoCmeaVFuHTp0u++++7mzZuvH+fk5BQVFXXp0qVffvlFWbUBgAoZ5EIbsFTUAW5nKOtrhXYhaI5XWoRr164dM2ZMrQmyZQwNDadOnfr7779zHKes2gBAtfRypOL92Q8Ocqey0S4EzfF3EJaVlaWnpwcGBtZ1aGBgYHFx8e3bt5VRFwCopJ4O1KYgtl8ydzgTWQga4u8gLC0tFQThDbPvyOaaefHihTLqAgBVFWhLbQlmhx3hDj5BFoIm+DsITU1NJRJJRkZGXYfK3rKyslJGXQCgwvxtqO0h7PAj3O4HWAQb1N7fQcgwTJcuXVauXMnzvNxDly9f7uTk5OTkpKzaAEB1dbGm9oSxY0/wJ/G8ENTcK51lvvrqq9OnT48YMeL58+c195eVlcXExGzevHn69OnKLQ8AVJevFfV7IDv4MP8I85GCOntl+ETPnj1nz549Y8aMXbt2BQQEODs7Mwzz6NGjlJSUgoKCjz/+eOLEiWIVCgAqKNSe+rIN3TuJPxnFGsifnwNA1dW+cr/55pvOnTvPmzfv8OHD5eXlhBCWZTt27Dh58uRBgwaJUSEAqLSpHnT6c2FECr81mMHoQlBHcj7CBQcHBwcHV1ZW5uTkSKVSKysrrHMLAG/wkx8TuJebe1n6Ly85K7sBqLg672Xo6Og0adJEmaUAgJrSY8jOUNZ3F9fGjEQ5IgtBzeCSBYAGYKNPdoQwY47z156h4wyoGQQhADSM9hbUQl+mdxKfVy52KQDvAkEIAA3mw2Z0v6bU0CMch3H2oD4QhADQkOZ3ZHRp8tVZ+fNyAKggBCEANCSaIhuC2AOPhZW30CoE9YAgBIAGZiIhf4QyX5/jj2eh4wyoAQQhADS85o2odYHs0CP8Y8y+BioPQQgACtGjCTWpNd07iS/FYt6g2hCEAKAoX3nSbRtT0SfQcQZUGoIQABToly7M7ULh+yvoOAOqC0EIAAokm31t6XVpwkM8LAQVhSAEAMWyNSCbg5lRx7jrmH0NVBKCEAAUrrMVtdCX6ZfMP68UuxSA1yAIAUAZRrjREQ7UoEMcj2YhqBgEIQAoyQJfhqVJLGZfAxWDIAQAJWEosj6Q3fVQiP8TnUhBhSAIAUB5zHTJ7lDm63P8mRzcIQVVgSAEAKVqYUqt6MYMPMRnliILQSUgCAFA2aIc6egW9PAjvBRRCCoAQQgAIvi6LU1TZD5mnAEVgCAEABHQFFkbwPxwDQ8LQXwIQgAQh70htawL8+FRvrhK7FJAuyEIAUA0fZvSAbbUl6cxshDEhCAEADH92Jk5kSVsvouHhSAaBCEAiMmQJeu7M5NS+Ycv8LAQxIEgBACReVtQX7ZhPjzKYxpSEAWCEADE95UnLaEJ1u8FUSAIAUB8GE0BIkIQAoBKkI2mGI7RFKB0CEIAUBV9m9KBGE0BSocgBAAVgtEUoHwIQgBQIRhNAcqHIAQA1YLRFKBkCEIAUDkYTQHKhCAEAJWD0RSgTAhCAFBFGE0BSoMgBAAVhdEUoBwIQgBQXRhNAUqAIAQA1WXIkg0YTQEKhiAEAJXWHqMpQMEQhACg6r7ypFmKzL2MG6SgEAhCAFB1NEXWBTK/3JAefIJWITQ8BCEAqAF7Q2prMPPRUe5eMbIQGhiCEADUg5819ZUn0y+ZL+PELgU0C4IQANTGFA/aw4wadwIjC6EhIQgBQJ0s68pczBdW3ETHGWgwCgzCXbt2hYSEBAQErFy58vV3b9++PX369O7duwcHB8+dO7eiokJxlQCAxjBkyR8hzP+d509m42EhNAxWQedNS0sbOXLkmjVrTE1Nhw8fbmpqOmDAgJoHHDt2TFdX99tvv+V5furUqZmZmUuWLFFQMQCgSdwaUav8mWFH+PN9WEs9sasB9UcJgkI+VY0ePdrExGTx4sWEkF9++WXLli1Hjx6t6+AdO3ZMmzYtIyND7ruxsbFmZmYxMTH1+b5VVVU8z+vp4ZcDVEJxcbGxsbHYVWim/zvPp2YLST1ZFk946g0XpFyKuoIuX77s6+sre+3r63vp0qU3HHz16lVXV1cFVQIAGmm2N6PHkv87j44z8E8p6tZoTk6Oqamp7LW5uXlhYWF5ebnchtrFixcXLlx4+PDhuk517dq1U6dO/frrr7JNXV3dvXv3WllZyT1Y1iKsqsLCLaASXrx4IXYJmuzXDlRAkk5ro/K+Dug7Uy9aeEHq6elJJJI3H6OoIDQ2Ni4tLZW9fvHiha6urq6u7uuHpaenR0ZGrly50tvbu65TNW/evHXr1uPGjZNt0jTdtGnTug7GrVFQNbgTpTjGhOzsIYTtp7xt2dZmlNjlqAdckK9TVBA2bdq0+plfRkZG06ZNKar2ZXr79u2wsLD58+cPHDjwDaeSSCRmZmYuLi4KKhUA1JeXObXQl+mXzJ/9gG2kI3Y1oJ4U9Yxw2LBhq1evLikp4Xl+2bJlQ4cOle1funRpeno6IeTBgwdhYWHffPPNhx9+qKAaAEAbfNiM7m5LjUzB6hTwnhQVhIMHD/b29nZ2dnZychIEYcqUKbL9ixYtunHjBiFkxYoV9+/fHz9+PEVRFEUZGBgoqBIA0HhL/Zi8cuH7K3hSCO9DUcMnZPLz86uqqmxsbP7JSTB8AtQXeqsrzdNS4rOLW9WN6dEEDwvrhAtSLsUOwGncuPE/TEEAgPqwNSDrApkRKVieAt4ZRqICgIbobktN92T6Y3kKeEcIQgDQHFM9aLdG1OTTGGUP7wBBCACagyJkVTfmVLaw8hY6zkB9IQgBQKMYSV4uT3EuFw8LoV4QhACgadwbUSu6MgMP8bnlYpcC6gBBCAAaqLcTPdSVGnKYwzB7eCsEIQBopv91YHRo8g2Wp4C3QRACgGaiKbIukN2YIay7g44z8CaKmnQbAEB0FnpkXzgTvp8vriQTW+FzP8iHKwMANFkrU+p4FPPDdWnsOdwjBfkQhACg4ZyMqOO92MRHwqRUdJ0BORCEAKD5rPXJ0V7shTxh5FGewxNDeBWCEAC0gqkOSerJ5pQLAw7x5bhLCjUgCAFAWxiwZHcYq0OTiESuuErsakBlIAgBQIvo0GRjEONqQgXv4/IrxK4GVAOCEAC0C0ORuG5MgA0VkMBllqL3DCAIAUD7UIR878t81IzuuofPKEIWajsEIQBoqRgveronHbCXv1qALNRqmFkGALTXhJZ0Ix0Ssp/bFcp2sqLELgfEgRYhAGi1Ya70qm5s7yQu+QnahVoKQQgA2q6XI7UthB12hPvjPgbbayPcGgUAIP421L5wNuoAV8aR4c3QQtAu+P8GACCEkA4W1OFI9ts06aBD/B10JdUmCEIAgJdamlI3B7JdrCm/3dy4E3x2mdgFgVIgCAEA/qZDk8lt6JsDJWa6pPW2qthzPCZj03gIQgCA2sx1yVwf5kJf9lkFab2Ni7spxZoVGgxBCAAgn6MRtbwr80cosylD6vEHt/UewlAzIQgBAN5E1olmaWfmfxelfru5k9noR6NpEIQAAG8XYk9d6Mt+6UF/dJRHt1INgyAEAKgXmiIDnelr/VlvC3Qr1SgIQgCAd2DAkhgvOv2vbqWzLvBlnNg1wT+DIAQAeGeNdclcHyatL3u3iLhv5eJuSnncK1VbCEIAgPfkZEStDWS2BTPr70hbb+PmXpY+LkEeqh8EIQDAP+JrRaX0Yn8LYB68ENr+wYXu536/Iy3B/VL1gSAEAGgAna2oZV2YJ8Mkk1rTux8IduurBh3ik58IaCGqPgQhAECD0WVIlCO9JZi5N0QSYk/NusA7buRiz/G3CxGIqgtBCADQ8Mx1SXQL+kQUe6AnQwjxT+A67OR+vCbNKxe7MngNghAAQIFamVJzfZgnwyRzfZi0PMF9a1VUErf1nrQK87WpDCzMCwCgcDRFQuypEHvmeSWz+a70h2vSSaf4fs50R0vK24JqaUoxlNglajEEIQCA8pjqkHEt6HEt6NuFwu6HwoHHwneXpJmlgqc51b4x5W3xMhdZ3K1TIgQhAIAI3BpRUz1eNgOLq8jlfCEtTzicKXx/RXq3WHAxfhmK3haUjyWly4hbrIZDEAIAiMxYQrraUF1t/s7Fi/lCWp5wLk/49ab00QuhjfnLUPQ0p1yMKXNdcevVNAhCAADVYiwh/jaUf41cvJQvpOUJRzKFn29IM4oEihBXE8rFmHIxIS7GlKsJ5WJMHAxxQ/U9IQgBAFSasYR0s6G62fzdneZZBblbLNwtFu4WkbQ8Yes96d0iklkq2Bm8jMbqjGxmQjXSEbF29YAgBABQM2a6xFuX8rZ4padppZTcLxbuFpOMIuFusXA6h2QUS+8WCToMsdCjzHSIqQ4xZnQsDXkzHWKmS5npElPZCx1ipkvMdClTbY1MBCEAgCbQoYl7I8q9ESHklYDMKycFFcKzCvK8kmQ+ryhnJM8qSE6ZcKuQPKsgzyulzyrIs0ryrEIoqiRmusRUhzLTJWa6hBCiz1B6f/XTMdEhsjEeDEVM/opMuQe8k+IqwkmJlJDCSoEQUs4T2bJWRVWElxJOIMVVAiGklCMVPCGEPK8kk1rTk1o35F1gBCEAgCaz0CMWen89bmwkNTauM0KkwstEfF5JnlcQQkgZL5TzL98trCRSgRBCeIEUVb7cWcYLz/56ff/FywPeiZGESGhCEWKmQxFC9BiizxJCiImEMDRhKGIioQkh+iyRJa6pDrExaOBBlwhCAAAghBCaIo11SWPdmjGjFeP80ccIAAC0GoIQAAC0mqYF4a1bt86fPy92FQAv7dq1q7S0VOwqAAghpKioKCEhQewqVJGmBWFiYuKWLVvErgLgpTlz5ty9e1fsKgAIIeTmzZsLFy4UuwpVpGlBCAAA8E4QhAAAoNUQhAAAoNUoQXj3AZDK1bdv3/Pnz1tbW9fn4JycnMrKyiZNmii6KoD6uHHjhouLi56entiFAJDS0tKHDx+2aNFC7EKUaujQoVOnTn3zMWoQhPfu3Xv8+LGBgUF9Di4tLeU4zsTERNFVAdRHdna2lZUVRWnFqGRQcVKpNC8vz8rKSuxClMre3t7GxubNx6hBEAIAACgOnhECAIBWQxACAIBWQxACAIBW0/wgXLJkSVRU1OzZs3mef/vRAIq0fv36L7744qeffhK7EABy586dzz//vEePHuPGjXv48KHY5YhJw4NwzZo1qamp69atKygomDdvntjlgLbLzc21sbFJTEwUuxAAkp2dHRkZ+fvvv/v4+AwePFjscsSk4b1Gw8PDZ82a1alTp0ePHkVGRl65ckXsikDbnThxYu7cuZj7GFRHUVGRu7t7VlaW2IWIRsNbhI8fP5YNrre3t3/y5InY5QAAqJw5c+aMGjVK7CrEpOEr1EskEtmjQZ7nJRKJ2OUAAKiWX3/99erVqzt27BC7EDGpaxA+f/78woULd+7cCQwMdHd3r97/9OnTtWvXvnjxom/fvu3bt3dzc0tPT3dycrp161azZs1ELBg027Nnz86fP3/v3r3Q0FBnZ+fq/Y8ePVq3bl1FRUX//v09PT1FrBC0B8/z6enply5dMjQ07Nu3b/V+QRC2bduWlpbm5uY2YsQIiUQSHx+/Y8eOXbt2aXk7gZk1a5bYNbyPdu3aHTlyZPv27a1bt27btq1sZ0FBQfv27S0tLS0sLKKjo319fTt27Pjtt99aWVnNnj17/Pjxbdq0Ebds0FTu7u5nzpzZtGmTr69vy5YtZTuzsrLatWvn5ORkZGQ0duzYoKCg8+fPJycnp6WlMQxja2trbGwsbtmgkZYsWTJx4sS0tLTU1NTo6Ojq/bGxsStWrOjWrdumTZuSkpKMjIyio6PHjh1748aNtLS09u3ba+1cgOraWYbjOJZlO3XqNGHChJEjR8p2Lliw4ODBgwcOHCCELFq0KDExMSkp6cyZM0ePHvXx8QkKChK1ZNBksguydevWs2fP7tevn2znrFmzrl69un37dkLIf/7zn8uXL3/00Uc5OTmyd3v16mVnZydaxaC5ZFfj77///uOPP547d0628/nz5/b29mlpaS1atCgqKrKzs1u/fn12dnb1V40dO1Zrg1Bdb42yrJzKjx49Gh4eLnsdHh7+r3/9SyqV+vr6+vr6Krc60DpyL8gjR458+OGHstfh4eE//PCDLBQBFEru1XjmzBkrKyvZ0hMmJiadOnXKzMycMGGC0qtTRRrVazQrK8vS0lL22traurKyMi8vT9ySQJvVuiCfPXtWVlYmbkmgtZ4+fVpz3Qlra+unT5+KWI9K0aggZBhGKpXKXnMcRwjR8ifAIK5aFyRFUQzDiFsSaC2WZauvRkIIz/NyG47aSaOC0M7OLjMzU/Y6MzNTX1/f1NRU3JJAm9W6IC0tLXV0dMQtCbSWra1t9dVICHny5AkeUVfTqCCMjIzcsWOH7FPP9u3bIyMjtfbZL6iCyMjI7du3y/qjyS5IsSsC7eXn51deXn7q1ClCyJMnT9LS0qp7VIC69hqdMWNGamrquXPnmjRpYmtr+9133/n4+JSWlnbr1q1Ro0aOjo579+49dOgQRm6BckybNu3y5cupqamurq5WVlY//PBD69atCwsL/fz8HBwcLCwsDh48ePz48ZpjXgEU5MKFCzExMVlZWQ8fPuzYsaOvr+9///tfQsjPP//8v//9r0+fPgcPHuzTp8/3338vdqWqQl2D8PLly7m5udWb7dq1a9y4MSGkoqIiMTGxuLg4NDTU2tpavAJBu6SlpT179qx6s0OHDrLb8qWlpQcOHCgrKwsLC7OwsBCvQNAiBQUFFy5cqN60sLCoHmx95cqVtLS05s2b+/n5iVSdKlLXIAQAAGgQGvWMEAAA4F0hCAEAQKshCAEAQKshCAEAQKshCAEAQKshCAEAQKshCAEAQKshCAE039ChQ0eNGiV2FQAqCrOPA2i+p0+f6uvri10FgIpCixAAALQaWoQAypabm7t58+aMjAxTU9OoqKj27dvL9vM8v2rVqs6dOxsZGW3cuDE/P9/Hx2fQoEE0/fcH1tLS0k2bNl27ds3AwCAkJCQwMLDmmXmeT0hIOHv2bHl5ebNmzXr16uXg4FD9bnl5+fr169PT052dnQcOHFhzmdZr164lJCTk5OSYmJh4enqGhYUZGRkp9l8BQGVgrlEApTp8+HC/fv309fW9vb0fP3585cqV77//furUqYSQiooKPT29wYMHHzx4sG3bthUVFadOnerVq9eOHTtkK/o+fPiwe/fuWVlZfn5+ubm5ly9fHjVq1KpVq2TLjeXm5kZERFy8eLFdu3a2trbXr19v0qRJSkoKISQwMFAqlZaVlZWUlFhbW585c6Zx48bXr183MTEhhMTHx48ZM8bT09Pd3T0vLy8tLW3NmjV9+vQR9d8JQIkEAFCWgoICc3PzXr16lZSUyPbMnDmTYZjr168LglBeXk4IoWn68OHDsnfj4+MJIStXrpRtRkREmJiYXLlyRbYpW1tnw4YNsk1ZvqakpFR/uz///FP2IiAggBAyf/582eaZM2coilqwYIFss3nz5iNHjqz+qtLS0vz8/Ib/4QFUFYIQQHl+/fVXQsjt27er91RVVRkYGCxatEj4Kwh79epV/a5UKm3VqlWPHj0EQSguLqYoaurUqdXvVlZW2tvbh4eHC4KQnZ1d692aAgICHB0deZ6v3tOqVavq8HN0dOzXr19paWkD/qQAagTPCAGU58qVKzRNf/nll7LMkxEE4c6dO9Wb7dq1q35NUVTbtm3PnDlDCMnIyBAEofqBIiFEIpF4eXnduHGDEHLjxg1BEDp16lTXt3Zzc6v5rNHCwqJ6Rc/p06dPnjzZ2to6MjIyLCysT58+ZmZmDfDTAqgJBCGA8lRWVrIs27Vr15o7Q0JCPDw8qjdZ9pXfSh0dnYqKCkJIYWGhbLPWuzzPE0I4jnv93ZokEknNTdljRZnPPvvM399/27Zthw4dio6O/uqrr/bt2+fj4/PuPx+AWkIQAiiPq6trZWXlkCFDnJyc6jrm9u3bNTdv3brl6upKCGnatKlss+a76enpsv3NmjUjhFy7dq13797vUZinp6enp+d//vOf+/fvd+rUac6cOX/88cd7nAdAHWEcIYDyDBo0SCKRxMbGyhpwMiUlJc+ePave3L59+/3792WvT506dfr06bCwMEKIo6Ojt7d3XFxcQUGB7N0dO3bcunWrX79+hJCmTZv6+fn9+OOPjx8/rj6VrCn5ZlKpNDMzs3rTycnJ1ta2ZnkAGg8tQgDlcXFxWbZs2fjx469evRoeHq6np3fnzp3ExMRNmzaFh4fLjmnfvr2fn9+wYcMqKirWrFnTqlWryZMny9765ZdfgoODfXx8BgwYkJOTs379+s6dO0+YMEH27sqVK7t37+7l5TVgwABbW9sbN24UFhYeOHDgzSVxHOfk5BQWFubh4WFoaHj8+PGrV6/OmTNHcf8IAKoG4wgBlO3y5curVq26fv26RCJxcHAIDQ3t1auXgYGBbBzhwoULW7ZsGR8fn5+f37Fjx5iYmJpdV27fvr106dKrV68aGhoGBwePHz++5txp2dnZP//889mzZ3med3Z2HjZsmGzE/fLlyyUSySeffFJ95PLly3V0dEaNGiUIwsaNG48dO/bw4UOO45o1azZmzJiaXXIANB6CEEBVVAfhlClTxK4FQIvgGSEAAGg1BCGACjEzM9PT0xO7CgDtglujAACg1dAiBAAArYYgBAAArfb/ks8yH29TyCcAAAAASUVORK5CYII=",
+ "text/html": [
+ "\n",
+ "\n"
+ ],
+ "image/svg+xml": [
+ "\n",
+ "\n"
+ ]
+ },
+ "metadata": {},
+ "execution_count": 10
+ }
+ ],
+ "cell_type": "code",
+ "source": [
+ "plot(\n",
+ " curve.parameter_values,\n",
+ " curve.measurements,\n",
+ " xlab=curve.parameter_name,\n",
+ " xscale=curve.parameter_scale,\n",
+ " ylab = \"Cross Entropy\",\n",
+ ")"
+ ],
+ "metadata": {},
+ "execution_count": 10
+ },
+ {
+ "cell_type": "markdown",
+ "source": [
+ "---\n",
+ "\n",
+ "*This notebook was generated using [Literate.jl](https://github.com/fredrikekre/Literate.jl).*"
+ ],
+ "metadata": {}
+ }
+ ],
+ "nbformat_minor": 3,
+ "metadata": {
+ "language_info": {
+ "file_extension": ".jl",
+ "mimetype": "application/julia",
+ "name": "julia",
+ "version": "1.10.3"
+ },
+ "kernelspec": {
+ "name": "julia-1.10",
+ "display_name": "Julia 1.10.3",
+ "language": "julia"
+ }
+ },
+ "nbformat": 4
+}
diff --git a/docs/src/common_workflows/hyperparameter_tuning/notebook.jl b/docs/src/common_workflows/hyperparameter_tuning/notebook.jl
index aa39830d..3c85ec16 100644
--- a/docs/src/common_workflows/hyperparameter_tuning/notebook.jl
+++ b/docs/src/common_workflows/hyperparameter_tuning/notebook.jl
@@ -24,7 +24,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
diff --git a/docs/src/common_workflows/hyperparameter_tuning/notebook.md b/docs/src/common_workflows/hyperparameter_tuning/notebook.md
index ae50dd14..d6649fe0 100644
--- a/docs/src/common_workflows/hyperparameter_tuning/notebook.md
+++ b/docs/src/common_workflows/hyperparameter_tuning/notebook.md
@@ -26,7 +26,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
````@example hyperparameter_tuning
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
nothing #hide
````
diff --git a/docs/src/common_workflows/hyperparameter_tuning/notebook.unexecuted.ipynb b/docs/src/common_workflows/hyperparameter_tuning/notebook.unexecuted.ipynb
index 2060f391..bbb6280a 100644
--- a/docs/src/common_workflows/hyperparameter_tuning/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/hyperparameter_tuning/notebook.unexecuted.ipynb
@@ -73,7 +73,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters"
],
"metadata": {},
diff --git a/docs/src/common_workflows/incremental_training/notebook.ipynb b/docs/src/common_workflows/incremental_training/notebook.ipynb
index b85e848b..e3b44f52 100644
--- a/docs/src/common_workflows/incremental_training/notebook.ipynb
+++ b/docs/src/common_workflows/incremental_training/notebook.ipynb
@@ -7,6 +7,14 @@
],
"metadata": {}
},
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This demonstration is available as a Jupyter notebook or julia script\n",
+ "[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/incremental_training)."
+ ],
+ "metadata": {}
+ },
{
"cell_type": "markdown",
"source": [
@@ -36,9 +44,7 @@
{
"cell_type": "markdown",
"source": [
- "**Julia version** is assumed to be 1.10.* This tutorial is available as a Jupyter\n",
- "notebook or julia script\n",
- "[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/incremental_training)."
+ "**Julia version** is assumed to be 1.10.*"
],
"metadata": {}
},
@@ -73,7 +79,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X) # To be compatible with type of network network parameters\n",
"(X_train, X_test), (y_train, y_test) = partition(\n",
" (X, y), 0.8,\n",
@@ -113,7 +119,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
+ "text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (5, 4), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 4
@@ -161,7 +167,7 @@
{
"output_type": "execute_result",
"data": {
- "text/plain": "trained Machine; caches model-specific representations of data\n model: NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …)\n args: \n 1:\tSource @068 ⏎ ScientificTypesBase.Table{AbstractVector{ScientificTypesBase.Continuous}}\n 2:\tSource @767 ⏎ AbstractVector{ScientificTypesBase.Multiclass{3}}\n"
+ "text/plain": "trained Machine; caches model-specific representations of data\n model: NeuralNetworkClassifier(builder = MLP(hidden = (5, 4), …), …)\n args: \n 1:\tSource @547 ⏎ Table{AbstractVector{Continuous}}\n 2:\tSource @645 ⏎ AbstractVector{Multiclass{3}}\n"
},
"metadata": {},
"execution_count": 5
diff --git a/docs/src/common_workflows/incremental_training/notebook.jl b/docs/src/common_workflows/incremental_training/notebook.jl
index 20d38b53..6d44c046 100644
--- a/docs/src/common_workflows/incremental_training/notebook.jl
+++ b/docs/src/common_workflows/incremental_training/notebook.jl
@@ -22,7 +22,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X) # To be compatible with type of network network parameters
(X_train, X_test), (y_train, y_test) = partition(
(X, y), 0.8,
diff --git a/docs/src/common_workflows/incremental_training/notebook.md b/docs/src/common_workflows/incremental_training/notebook.md
index 94be1207..3810f90c 100644
--- a/docs/src/common_workflows/incremental_training/notebook.md
+++ b/docs/src/common_workflows/incremental_training/notebook.md
@@ -4,11 +4,12 @@ EditURL = "notebook.jl"
# Incremental Training with MLJFlux
+This demonstration is available as a Jupyter notebook or julia script
+[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/incremental_training).
+
In this workflow example we explore how to incrementally train MLJFlux models.
-**Julia version** is assumed to be 1.10.* This tutorial is available as a Jupyter
-notebook or julia script
-[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/incremental_training).
+**Julia version** is assumed to be 1.10.*
### Basic Imports
@@ -23,7 +24,7 @@ import Optimisers # native Flux.jl optimisers no longer supported
````@example incremental_training
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X) # To be compatible with type of network network parameters
(X_train, X_test), (y_train, y_test) = partition(
(X, y), 0.8,
diff --git a/docs/src/common_workflows/incremental_training/notebook.unexecuted.ipynb b/docs/src/common_workflows/incremental_training/notebook.unexecuted.ipynb
index 4d12d4d7..b9227430 100644
--- a/docs/src/common_workflows/incremental_training/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/incremental_training/notebook.unexecuted.ipynb
@@ -7,6 +7,14 @@
],
"metadata": {}
},
+ {
+ "cell_type": "markdown",
+ "source": [
+ "This demonstration is available as a Jupyter notebook or julia script\n",
+ "[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/incremental_training)."
+ ],
+ "metadata": {}
+ },
{
"cell_type": "markdown",
"source": [
@@ -28,9 +36,7 @@
{
"cell_type": "markdown",
"source": [
- "**Julia version** is assumed to be 1.10.* This tutorial is available as a Jupyter\n",
- "notebook or julia script\n",
- "[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/incremental_training)."
+ "**Julia version** is assumed to be 1.10.*"
],
"metadata": {}
},
@@ -65,7 +71,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X) # To be compatible with type of network network parameters\n",
"(X_train, X_test), (y_train, y_test) = partition(\n",
" (X, y), 0.8,\n",
diff --git a/docs/src/common_workflows/live_training/notebook.jl b/docs/src/common_workflows/live_training/notebook.jl
index 16bae98a..de1a6fb8 100644
--- a/docs/src/common_workflows/live_training/notebook.jl
+++ b/docs/src/common_workflows/live_training/notebook.jl
@@ -23,7 +23,7 @@ using Plots
# ### Loading and Splitting the Data
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
diff --git a/docs/src/common_workflows/live_training/notebook.md b/docs/src/common_workflows/live_training/notebook.md
index edc1b140..14b77358 100644
--- a/docs/src/common_workflows/live_training/notebook.md
+++ b/docs/src/common_workflows/live_training/notebook.md
@@ -4,7 +4,7 @@ EditURL = "notebook.jl"
# Live Training with MLJFlux
-This tutorial is available as a Jupyter notebook or julia script
+This demonstration is available as a Jupyter notebook or julia script
[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/live_training).
**Julia version** is assumed to be 1.10.*
@@ -26,7 +26,7 @@ using Plots
````@example live_training
iris = RDatasets.dataset("datasets", "iris");
-y, X = unpack(iris, ==(:Species), colname -> true, rng=123);
+y, X = unpack(iris, ==(:Species), rng=123);
X = Float32.(X); # To be compatible with type of network network parameters
nothing #hide
````
diff --git a/docs/src/common_workflows/live_training/notebook.unexecuted.ipynb b/docs/src/common_workflows/live_training/notebook.unexecuted.ipynb
index a647a39a..fb86f8e7 100644
--- a/docs/src/common_workflows/live_training/notebook.unexecuted.ipynb
+++ b/docs/src/common_workflows/live_training/notebook.unexecuted.ipynb
@@ -10,7 +10,7 @@
{
"cell_type": "markdown",
"source": [
- "This tutorial is available as a Jupyter notebook or julia script\n",
+ "This demonstration is available as a Jupyter notebook or julia script\n",
"[here](https://github.com/FluxML/MLJFlux.jl/tree/dev/docs/src/common_workflows/live_training)."
],
"metadata": {}
@@ -73,7 +73,7 @@
"cell_type": "code",
"source": [
"iris = RDatasets.dataset(\"datasets\", \"iris\");\n",
- "y, X = unpack(iris, ==(:Species), colname -> true, rng=123);\n",
+ "y, X = unpack(iris, ==(:Species), rng=123);\n",
"X = Float32.(X); # To be compatible with type of network network parameters"
],
"metadata": {},
diff --git a/docs/src/index.md b/docs/src/index.md
index aba818d5..b24a0537 100644
--- a/docs/src/index.md
+++ b/docs/src/index.md
@@ -72,8 +72,9 @@ As in the example above, any MLJFlux model has a `builder` hyperparameter, an ob
encoding instructions for creating a neural network given the data that the model
eventually sees (e.g., the number of classes in a classification problem). While each MLJ
model has a simple default builder, users may need to define custom builders to get
-optimal results, and this will require familiarity with the [Flux
-API](https://fluxml.ai/Flux.jl/stable/) for defining a neural network chain.
+optimal results (see [Defining Custom Builders](@ref) and this will require familiarity
+with the [Flux API](https://fluxml.ai/Flux.jl/stable/) for defining a neural network
+chain.
## Flux or MLJFlux?
diff --git a/docs/src/interface/Custom Builders.md b/docs/src/interface/Custom Builders.md
index 42543ed2..cc9fd698 100644
--- a/docs/src/interface/Custom Builders.md
+++ b/docs/src/interface/Custom Builders.md
@@ -23,7 +23,7 @@ end
```
Note here that `n_in` and `n_out` depend on the size of the data (see
-[Table 1](@ref Models).
+[Table 1](@ref Models)).
For a concrete image classification example, see [Using MLJ to classifiy the MNIST image
dataset](@ref).
@@ -41,9 +41,8 @@ This method must return a `Flux.Chain` instance, `chain`, subject to the
following conditions:
- `chain(x)` must make sense:
- - for any `x <: Array{<:AbstractFloat, 2}` of size `(n_in,
- batch_size)` where `batch_size` is any integer (for use with one
- of the first three model types); or
+ - for any `x <: Array{<:AbstractFloat, 2}` of size `(n_in, batch_size)` where
+ `batch_size` is any integer (for all models except `ImageClassifier`); or
- for any `x <: Array{<:Float32, 4}` of size `(W, H, n_channels,
batch_size)`, where `(W, H) = n_in`, `n_channels` is 1 or 3, and
`batch_size` is any integer (for use with `ImageClassifier`)
diff --git a/docs/src/interface/Summary.md b/docs/src/interface/Summary.md
index cc607e53..6f5f0aec 100644
--- a/docs/src/interface/Summary.md
+++ b/docs/src/interface/Summary.md
@@ -1,19 +1,17 @@
## Models
-MLJFlux provides four model types, for use with input features `X` and
-targets `y` of the [scientific
-type](https://alan-turing-institute.github.io/MLJScientificTypes.jl/dev/)
-indicated in the table below. The parameters `n_in`, `n_out` and `n_channels`
-refer to information passed to the builder, as described under
-[Defining Custom Builders](@ref).
-
-| Model Type | Prediction type | `scitype(X) <: _` | `scitype(y) <: _` |
-|---------------------------------------------|-----------------|-----------------------------------------------------|-------------------------------------------------|
-| [`NeuralNetworkRegressor`](@ref) | `Deterministic` | `Table(Continuous)` with `n_in` columns | `AbstractVector{<:Continuous)` (`n_out = 1`) |
-| [`MultitargetNeuralNetworkRegressor`](@ref) | `Deterministic` | `Table(Continuous)` with `n_in` columns | `<: Table(Continuous)` with `n_out` columns |
-| [`NeuralNetworkClassifier`](@ref) | `Probabilistic` | `<:Table(Continuous)` with `n_in` columns | `AbstractVector{<:Finite}` with `n_out` classes |
-| [`NeuralNetworkBinaryClassifier`](@ref) | `Probabilistic` | `<:Table(Continuous)` with `n_in` columns | `AbstractVector{<:Finite{2}}` (`n_out = 2`) |
-| [`ImageClassifier`](@ref) | `Probabilistic` | `AbstractVector(<:Image{W,H})` with `n_in = (W, H)` | `AbstractVector{<:Finite}` with `n_out` classes |
+MLJFlux provides the model types below, for use with input features `X` and targets `y` of
+the [scientific type](https://juliaai.github.io/ScientificTypes.jl/dev/)
+indicated in the table below. The parameters `n_in`, `n_out` and `n_channels` refer to
+information passed to the builder, as described under [Defining Custom Builders](@ref).
+
+| Model Type | Prediction type | `scitype(X) <: _` | `scitype(y) <: _` |
+|---------------------------------------------|-----------------|-------------------------------------------------------------------------|-------------------------------------------------|
+| [`NeuralNetworkRegressor`](@ref) | `Deterministic` | `AbstractMatrix{Continuous}` or `Table(Continuous)` with `n_in` columns | `AbstractVector{<:Continuous)` (`n_out = 1`) |
+| [`MultitargetNeuralNetworkRegressor`](@ref) | `Deterministic` | `AbstractMatrix{Continuous}` or `Table(Continuous)` with `n_in` columns | `<: Table(Continuous)` with `n_out` columns |
+| [`NeuralNetworkClassifier`](@ref) | `Probabilistic` | `AbstractMatrix{Continuous}` or `Table(Continuous)` with `n_in` columns | `AbstractVector{<:Finite}` with `n_out` classes |
+| [`NeuralNetworkBinaryClassifier`](@ref) | `Probabilistic` | `AbstractMatrix{Continuous}` or `Table(Continuous)` with `n_in` columns | `AbstractVector{<:Finite{2}}` (but `n_out = 1`) |
+| [`ImageClassifier`](@ref) | `Probabilistic` | `AbstractVector(<:Image{W,H})` with `n_in = (W, H)` | `AbstractVector{<:Finite}` with `n_out` classes |
```@raw html
@@ -33,23 +31,24 @@ particular, an MLJ model does not store learned parameters.
```
```@raw html
-Dealing with non-tabular input
+Are oberservations rows or columns?
```
-Any `AbstractMatrix{<:AbstractFloat}` object `Xmat` can be forced to
-have scitype `Table(Continuous)` by replacing it with ` X =
-MLJ.table(Xmat)`. Furthermore, this wrapping, and subsequent
-unwrapping under the hood, will compile to a no-op. At present this
-includes support for sparse matrix data, but the implementation has
-not been optimized for sparse data at this time and so should be used
-with caution.
-
-Instructions for coercing common image formats into some
-`AbstractVector{<:Image}` are
-[here](https://juliaai.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data).
+
+In MLJ the convention for two-dimensional data (tables and matrices) is
+**rows=obervations**. For matrices Flux has the opposite convention. If your data is a
+matrix with whose column index the observation index, then your optimal solution is to
+present the `adjoint` or `transpose` of your matrix to MLJFlux models. Otherwise, you can
+use the matrix as is, or transform one time with `permutedims`, and again present the
+`adjoint` or `transpose` as the optimal solution for MLJFlux training.
+
```@raw html
```
+Instructions for coercing common image formats into some `AbstractVector{<:Image}` are
+[here](https://juliaai.github.io/ScientificTypes.jl/dev/#Type-coercion-for-image-data).
+
+
```@raw html
Fitting and warm restarts
```
diff --git a/readme_figure.png b/readme_figure.png
index c5ad0267..77554bfd 100644
Binary files a/readme_figure.png and b/readme_figure.png differ
diff --git a/src/types.jl b/src/types.jl
index b6e9af9b..e7bb880d 100644
--- a/src/types.jl
+++ b/src/types.jl
@@ -950,7 +950,7 @@ We arrange for standardization of the the target by wrapping our model in
model in a pipeline:
```julia
-pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer)
+pipe = Standardizer |> TransformedTargetModel(model, transformer=Standardizer)
```
If we fit with a high verbosity (>1), we will see the losses during training. We can also
@@ -1166,7 +1166,7 @@ We will arrange for standardization of the the target by wrapping our model in
model in a pipeline:
```julia
-pipe = Standardizer |> TransformedTargetModel(model, target=Standardizer)
+pipe = Standardizer |> TransformedTargetModel(model, transformer=Standardizer)
```
If we fit with a high verbosity (>1), we will see the losses during training. We can also