Skip to content

Commit

Permalink
✏️ Fix indentations
Browse files Browse the repository at this point in the history
  • Loading branch information
EssamWisam committed Jun 7, 2024
1 parent 2698413 commit 958b9fe
Show file tree
Hide file tree
Showing 3 changed files with 36 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,18 @@
"metadata": {}
},
{
"outputs": [],
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"┌ Warning: The project dependencies or compat requirements have changed since the manifest was last resolved.\n",
"│ It is recommended to `Pkg.resolve()` or consider `Pkg.update()` if necessary.\n",
"└ @ Pkg.API /Applications/Julia-1.10.app/Contents/Resources/julia/share/julia/stdlib/v1.10/Pkg/src/API.jl:1800\n",
"[ Info: Precompiling RDatasets [ce6b1742-4840-55fa-b093-852dadbb1d8b]\n"
]
}
],
"cell_type": "code",
"source": [
"using MLJ # Has MLJFlux models\n",
Expand Down Expand Up @@ -100,7 +111,7 @@
{
"output_type": "execute_result",
"data": {
"text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (1, 1, 1), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any, Any}()), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
"text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (1, 1, 1), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any, Any}()), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 3
Expand Down Expand Up @@ -149,25 +160,25 @@
"\tneuron_step::Int,\n",
"\tnum_layers::Int,\n",
")\n",
" # Define the range of neurons\n",
"\t# Define the range of neurons\n",
"\tneuron_range = min_neurons:neuron_step:max_neurons\n",
"\n",
" # Empty list to store the network configurations\n",
"\t# Empty list to store the network configurations\n",
"\tnetworks = Vector{Tuple{Vararg{Int, num_layers}}}()\n",
"\n",
"\t# Recursive helper function to generate all combinations of tuples\n",
"\tfunction generate_tuple(current_layers, remaining_layers)\n",
"\t\tif remaining_layers > 0\n",
"\t\t\tfor n in neuron_range\n",
"\t\t\t\t# current_layers =[] then current_layers=[(min_neurons)],\n",
" # [(min_neurons+neuron_step)], [(min_neurons+2*neuron_step)],...\n",
"\t\t\t\t# [(min_neurons+neuron_step)], [(min_neurons+2*neuron_step)],...\n",
"\t\t\t\t# for each of these we call generate_layers again which appends\n",
" # the n combinations for each one of them\n",
"\t\t\t\t# the n combinations for each one of them\n",
"\t\t\t\tgenerate_tuple(vcat(current_layers, [n]), remaining_layers - 1)\n",
"\t\t\tend\n",
"\t\telse\n",
"\t\t\t# in the base case, no more layers to \"recurse on\"\n",
" # and we just append the current_layers as a tuple\n",
"\t\t\t# and we just append the current_layers as a tuple\n",
"\t\t\tpush!(networks, tuple(current_layers...))\n",
"\t\tend\n",
"\tend\n",
Expand Down Expand Up @@ -260,7 +271,7 @@
{
"output_type": "execute_result",
"data": {
"text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (33, 25, 37), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any, Any}()), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = CPU1{Nothing}(nothing))"
"text/plain": "NeuralNetworkClassifier(\n builder = MLP(\n hidden = (25, 53, 45), \n σ = NNlib.relu), \n finaliser = NNlib.softmax, \n optimiser = Flux.Optimise.Adam(0.01, (0.9, 0.999), 1.0e-8, IdDict{Any, Any}()), \n loss = Flux.Losses.crossentropy, \n epochs = 10, \n batch_size = 8, \n lambda = 0.0, \n alpha = 0.0, \n rng = 42, \n optimiser_changes_trigger_retraining = false, \n acceleration = ComputationalResources.CPU1{Nothing}(nothing))"
},
"metadata": {},
"execution_count": 7
Expand Down Expand Up @@ -294,9 +305,9 @@
{
"output_type": "execute_result",
"data": {
"text/plain": "\u001b[1m10×2 DataFrame\u001b[0m\n\u001b[1m Row \u001b[0m│\u001b[1m mlp \u001b[0m\u001b[1m measurement \u001b[0m\n\u001b[90m MLP… \u001b[0m\u001b[90m Float64 \u001b[0m\n─────┼────────────────────────────────────────────\n 1 │ MLP(hidden = (33, 25, 37), …) 0.0830329\n 2 │ MLP(hidden = (45, 9, 29), …) 0.0835569\n 3 │ MLP(hidden = (37, 37, 49), …) 0.0904466\n 4 │ MLP(hidden = (25, 17, 57), …) 0.091456\n 5 │ MLP(hidden = (33, 45, 57), …) 0.0975553\n 6 │ MLP(hidden = (13, 9, 45), …) 0.097908\n 7 │ MLP(hidden = (37, 37, 13), …) 0.0985847\n 8 │ MLP(hidden = (49, 21, 21), …) 0.102274\n 9 │ MLP(hidden = (29, 17, 53), …) 0.103724\n 10 │ MLP(hidden = (13, 53, 49), …) 0.105756",
"text/plain": "\u001b[1m10×2 DataFrame\u001b[0m\n\u001b[1m Row \u001b[0m│\u001b[1m mlp \u001b[0m\u001b[1m measurement \u001b[0m\n\u001b[90m MLP… \u001b[0m\u001b[90m Float64 \u001b[0m\n─────┼────────────────────────────────────────────\n 1 │ MLP(hidden = (25, 53, 45), …) 0.0865692\n 2 │ MLP(hidden = (49, 41, 49), …) 0.0870145\n 3 │ MLP(hidden = (25, 61, 21), …) 0.0870776\n 4 │ MLP(hidden = (45, 21, 41), …) 0.0921284\n 5 │ MLP(hidden = (49, 13, 33), …) 0.0941658\n 6 │ MLP(hidden = (21, 49, 53), …) 0.100384\n 7 │ MLP(hidden = (33, 57, 61), …) 0.101213\n 8 │ MLP(hidden = (33, 49, 9), …) 0.10241\n 9 │ MLP(hidden = (17, 37, 17), …) 0.10542\n 10 │ MLP(hidden = (29, 49, 17), …) 0.108438",
"text/html": [
"<div><div style = \"float: left;\"><span>10×2 DataFrame</span></div><div style = \"clear: both;\"></div></div><div class = \"data-frame\" style = \"overflow-x: scroll;\"><table class = \"data-frame\" style = \"margin-bottom: 6px;\"><thead><tr class = \"header\"><th class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">Row</th><th style = \"text-align: left;\">mlp</th><th style = \"text-align: left;\">measurement</th></tr><tr class = \"subheader headerLastRow\"><th class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\"></th><th title = \"MLJFlux.MLP{3}\" style = \"text-align: left;\">MLP…</th><th title = \"Float64\" style = \"text-align: left;\">Float64</th></tr></thead><tbody><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">1</td><td style = \"text-align: left;\">MLP(hidden = (33, 25, 37), …)</td><td style = \"text-align: right;\">0.0830329</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">2</td><td style = \"text-align: left;\">MLP(hidden = (45, 9, 29), …)</td><td style = \"text-align: right;\">0.0835569</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">3</td><td style = \"text-align: left;\">MLP(hidden = (37, 37, 49), …)</td><td style = \"text-align: right;\">0.0904466</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">4</td><td style = \"text-align: left;\">MLP(hidden = (25, 17, 57), …)</td><td style = \"text-align: right;\">0.091456</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">5</td><td style = \"text-align: left;\">MLP(hidden = (33, 45, 57), …)</td><td style = \"text-align: right;\">0.0975553</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">6</td><td style = \"text-align: left;\">MLP(hidden = (13, 9, 45), …)</td><td style = \"text-align: right;\">0.097908</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">7</td><td style = \"text-align: left;\">MLP(hidden = (37, 37, 13), …)</td><td style = \"text-align: right;\">0.0985847</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">8</td><td style = \"text-align: left;\">MLP(hidden = (49, 21, 21), …)</td><td style = \"text-align: right;\">0.102274</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">9</td><td style = \"text-align: left;\">MLP(hidden = (29, 17, 53), …)</td><td style = \"text-align: right;\">0.103724</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">10</td><td style = \"text-align: left;\">MLP(hidden = (13, 53, 49), …)</td><td style = \"text-align: right;\">0.105756</td></tr></tbody></table></div>"
"<div><div style = \"float: left;\"><span>10×2 DataFrame</span></div><div style = \"clear: both;\"></div></div><div class = \"data-frame\" style = \"overflow-x: scroll;\"><table class = \"data-frame\" style = \"margin-bottom: 6px;\"><thead><tr class = \"header\"><th class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">Row</th><th style = \"text-align: left;\">mlp</th><th style = \"text-align: left;\">measurement</th></tr><tr class = \"subheader headerLastRow\"><th class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\"></th><th title = \"MLJFlux.MLP{3}\" style = \"text-align: left;\">MLP…</th><th title = \"Float64\" style = \"text-align: left;\">Float64</th></tr></thead><tbody><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">1</td><td style = \"text-align: left;\">MLP(hidden = (25, 53, 45), …)</td><td style = \"text-align: right;\">0.0865692</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">2</td><td style = \"text-align: left;\">MLP(hidden = (49, 41, 49), …)</td><td style = \"text-align: right;\">0.0870145</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">3</td><td style = \"text-align: left;\">MLP(hidden = (25, 61, 21), …)</td><td style = \"text-align: right;\">0.0870776</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">4</td><td style = \"text-align: left;\">MLP(hidden = (45, 21, 41), …)</td><td style = \"text-align: right;\">0.0921284</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">5</td><td style = \"text-align: left;\">MLP(hidden = (49, 13, 33), …)</td><td style = \"text-align: right;\">0.0941658</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">6</td><td style = \"text-align: left;\">MLP(hidden = (21, 49, 53), …)</td><td style = \"text-align: right;\">0.100384</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">7</td><td style = \"text-align: left;\">MLP(hidden = (33, 57, 61), …)</td><td style = \"text-align: right;\">0.101213</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">8</td><td style = \"text-align: left;\">MLP(hidden = (33, 49, 9), …)</td><td style = \"text-align: right;\">0.10241</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">9</td><td style = \"text-align: left;\">MLP(hidden = (17, 37, 17), …)</td><td style = \"text-align: right;\">0.10542</td></tr><tr><td class = \"rowNumber\" style = \"font-weight: bold; text-align: right;\">10</td><td style = \"text-align: left;\">MLP(hidden = (29, 49, 17), …)</td><td style = \"text-align: right;\">0.108438</td></tr></tbody></table></div>"
]
},
"metadata": {},
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -50,25 +50,25 @@ function generate_networks(;
neuron_step::Int,
num_layers::Int,
)
## Define the range of neurons
neuron_range = min_neurons:neuron_step:max_neurons
## Define the range of neurons
neuron_range = min_neurons:neuron_step:max_neurons

## Empty list to store the network configurations
networks = Vector{Tuple{Vararg{Int, num_layers}}}()
## Empty list to store the network configurations
networks = Vector{Tuple{Vararg{Int, num_layers}}}()

## Recursive helper function to generate all combinations of tuples
function generate_tuple(current_layers, remaining_layers)
if remaining_layers > 0
for n in neuron_range
## current_layers =[] then current_layers=[(min_neurons)],
## [(min_neurons+neuron_step)], [(min_neurons+2*neuron_step)],...
## for each of these we call generate_layers again which appends
## the n combinations for each one of them
## current_layers =[] then current_layers=[(min_neurons)],
## [(min_neurons+neuron_step)], [(min_neurons+2*neuron_step)],...
## for each of these we call generate_layers again which appends
## the n combinations for each one of them
generate_tuple(vcat(current_layers, [n]), remaining_layers - 1)
end
else
## in the base case, no more layers to "recurse on"
## and we just append the current_layers as a tuple
## in the base case, no more layers to "recurse on"
## and we just append the current_layers as a tuple
push!(networks, tuple(current_layers...))
end
end
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,25 +55,25 @@ function generate_networks(;
neuron_step::Int,
num_layers::Int,
)
# Define the range of neurons
# Define the range of neurons
neuron_range = min_neurons:neuron_step:max_neurons
# Empty list to store the network configurations
# Empty list to store the network configurations
networks = Vector{Tuple{Vararg{Int, num_layers}}}()
# Recursive helper function to generate all combinations of tuples
function generate_tuple(current_layers, remaining_layers)
if remaining_layers > 0
for n in neuron_range
# current_layers =[] then current_layers=[(min_neurons)],
# [(min_neurons+neuron_step)], [(min_neurons+2*neuron_step)],...
# [(min_neurons+neuron_step)], [(min_neurons+2*neuron_step)],...
# for each of these we call generate_layers again which appends
# the n combinations for each one of them
# the n combinations for each one of them
generate_tuple(vcat(current_layers, [n]), remaining_layers - 1)
end
else
# in the base case, no more layers to "recurse on"
# and we just append the current_layers as a tuple
# and we just append the current_layers as a tuple
push!(networks, tuple(current_layers...))
end
end
Expand Down

0 comments on commit 958b9fe

Please sign in to comment.