Skip to content

Commit

Permalink
Merge pull request #20 from Fraunhofer-IIS/simplify_examples_for_docs
Browse files Browse the repository at this point in the history
Format and simplify loss calculation
  • Loading branch information
bknico-iis authored Jan 10, 2024
2 parents 2516399 + 774b049 commit 3163a12
Show file tree
Hide file tree
Showing 7 changed files with 61 additions and 63 deletions.
15 changes: 8 additions & 7 deletions docs/source/examples_for_doc/crcnn_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,16 @@
batchsize = 5

# Initialise Causal-Retro-Causal Neural Network
crcnn_model = CRCNN(n_state_neurons, n_features_Y, past_horizon, forecast_horizon, n_branches)

crcnn_model = CRCNN(
n_state_neurons, n_features_Y, past_horizon, forecast_horizon, n_branches
)

# Generate data with "unknown" variables U
Y, U = gtsd.sample_data(n_data, n_features_Y=n_features_Y-1, n_features_U=1)
Y, U = gtsd.sample_data(n_data, n_features_Y=n_features_Y - 1, n_features_U=1)
Y = torch.cat((Y, U), 1)
Y_batches = ci.create_input(Y, past_horizon, batchsize)

targets = torch.zeros((past_horizon, batchsize, n_features_Y))
targets = torch.zeros((n_branches - 1, past_horizon, batchsize, n_features_Y))

# Train model
optimizer = torch.optim.Adam(crcnn_model.parameters())
Expand All @@ -34,7 +36,6 @@
past_errors, forecasts = torch.split(model_output, past_horizon, dim=1)

crcnn_model.zero_grad()
loss = sum([loss_function(past_errors[k, i], targets[i]) for i in range(past_horizon)
for k in range(n_branches - 1)]) / (past_horizon * n_branches)
loss = loss_function(past_errors, targets)
loss.backward()
optimizer.step()
optimizer.step()
9 changes: 4 additions & 5 deletions docs/source/examples_for_doc/deepff_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,9 @@
deepness = 3

# Initialise Deep Feedforward Neural Network
deepff_model = DeepFeedForward(input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=output_dim,
deepness=deepness)
deepff_model = DeepFeedForward(
input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim, deepness=deepness
)

X = torch.randn([n_batches, batchsize, input_dim])
Y = torch.randn([n_batches, batchsize, output_dim])
Expand All @@ -29,4 +28,4 @@
deepff_model.zero_grad()
loss = sum([loss_function(output[i], y) for i in range(deepness)]) / deepness
loss.backward()
optimizer.step()
optimizer.step()
15 changes: 8 additions & 7 deletions docs/source/examples_for_doc/dhcnn_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,16 @@
batchsize = 5

# Initialise Historical Consistant Neural Network
dhcnn_model = DHCNN(n_state_neurons, n_features_Y, past_horizon, forecast_horizon, deepness)

dhcnn_model = DHCNN(
n_state_neurons, n_features_Y, past_horizon, forecast_horizon, deepness
)

# Generate data with "unknown" variables U
Y, U = gtsd.sample_data(n_data, n_features_Y=n_features_Y-1, n_features_U=1)
Y, U = gtsd.sample_data(n_data, n_features_Y=n_features_Y - 1, n_features_U=1)
Y = torch.cat((Y, U), 1)
Y_batches = ci.create_input(Y, past_horizon, batchsize)

targets = torch.zeros((past_horizon, batchsize, n_features_Y))
targets = torch.zeros((deepness, past_horizon, batchsize, n_features_Y))

# Train model
optimizer = torch.optim.Adam(dhcnn_model.parameters())
Expand All @@ -34,7 +36,6 @@
past_errors, forecast = torch.split(model_output, past_horizon, dim=1)

dhcnn_model.zero_grad()
loss = sum([loss_function(past_errors[k, i], targets[i]) for i in range(past_horizon)
for k in range(deepness)]) / (past_horizon * deepness)
loss = loss_function(past_errors, targets)
loss.backward()
optimizer.step()
optimizer.step()
35 changes: 19 additions & 16 deletions docs/source/examples_for_doc/ecnn_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,35 +14,38 @@
batchsize = 1

# Initialise Error Correction Neural Network
ecnn_model = ECNN(n_features_U,
n_state_neurons,
past_horizon,
forecast_horizon,
n_features_Y=n_features_Y)
ecnn = ECNN(
n_features_U,
n_state_neurons,
past_horizon,
forecast_horizon,
n_features_Y=n_features_Y,
)

# Generate data
Y, U = gtsd.sample_data(n_data, n_features_Y, n_features_U)
Y_batches, U_batches = ci.create_input(Y=Y,
past_horizon=past_horizon,
batchsize=batchsize,
U=U,
forecast_horizon=forecast_horizon)
Y_batches, U_batches = ci.create_input(
Y=Y,
past_horizon=past_horizon,
batchsize=batchsize,
U=U,
forecast_horizon=forecast_horizon,
)

targets = torch.zeros((past_horizon, batchsize, n_features_Y))

# Train model
optimizer = torch.optim.Adam(ecnn_model.parameters())
optimizer = torch.optim.Adam(ecnn.parameters())
loss_function = torch.nn.MSELoss()

for epoch in range(10):
for batch_index in range(0, U_batches.shape[0]):
U_batch = U_batches[batch_index]
Y_batch = Y_batches[batch_index]
model_output = ecnn_model(U_batch, Y_batch)
model_output = ecnn(U_batch, Y_batch)
past_error, forecast = torch.split(model_output, past_horizon)

ecnn_model.zero_grad()
loss = sum([loss_function(past_error[i], targets[i])
for i in range(past_horizon)]) / past_horizon
ecnn.zero_grad()
loss = loss_function(past_error, targets)
loss.backward()
optimizer.step()
optimizer.step()
6 changes: 2 additions & 4 deletions docs/source/examples_for_doc/ff_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,7 @@
batchsize = 5

# Initialise Deep Feedforward Neural Network
ff_model = FFNN(input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=output_dim)
ff_model = FFNN(input_dim=input_dim, hidden_dim=hidden_dim, output_dim=output_dim)

X = torch.randn([n_batches, batchsize, input_dim])
Y = torch.randn([n_batches, batchsize, output_dim])
Expand All @@ -27,4 +25,4 @@
ff_model.zero_grad()
loss = loss_function(output, y)
loss.backward()
optimizer.step()
optimizer.step()
9 changes: 4 additions & 5 deletions docs/source/examples_for_doc/hcnn_example.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,9 @@

# Initialise Historical Consistant Neural Network
hcnn_model = HCNN(n_state_neurons, n_features_Y, past_horizon, forecast_horizon)

# Generate data with "unknown" variables U
Y, U = gtsd.sample_data(n_data, n_features_Y=n_features_Y-1, n_features_U=1)
Y, U = gtsd.sample_data(n_data, n_features_Y=n_features_Y - 1, n_features_U=1)
Y = torch.cat((Y, U), 1)
Y_batches = ci.create_input(Y, past_horizon, batchsize)

Expand All @@ -33,7 +33,6 @@
past_error, forecast = torch.split(model_output, past_horizon)

hcnn_model.zero_grad()
loss = sum([loss_function(past_error[i], targets[i])
for i in range(past_horizon)]) / past_horizon
loss = loss_function(past_error, targets)
loss.backward()
optimizer.step()
optimizer.step()
35 changes: 16 additions & 19 deletions docs/source/examples_for_doc/hcnn_known_u_example.py
Original file line number Diff line number Diff line change
@@ -1,31 +1,30 @@
import prosper_nn.utils.generate_time_series_data as gtsd
import prosper_nn.utils.create_input_ecnn_hcnn as ci
import prosper_nn.utils.neuron_correlation_hidden_layers as nchl
from prosper_nn.utils import visualize_forecasts
from prosper_nn.models.hcnn_known_u import hcnn_known_u
import torch

# Define network parameters
n_features_U = 10 #setting this to zero reverts to vanilla HCNN with tf
n_features_U = 10 # setting this to zero reverts to vanilla HCNN with tf
batchsize = 5
past_horizon = 15
forecast_horizon = 5
future_U = True #Has to be true for Hcnn_known_U
n_state_neurons = 20
n_data=50
n_data = 50
n_features_Y = 5
sparsity = 0
teacher_forcing = 1
decrease_teacher_forcing = 0.0001

# Generate data
Y, U = gtsd.sample_data(n_data, n_features_Y, n_features_U)
Y_batches, U_batches = ci.create_input(Y,
past_horizon,
batchsize,
U,
future_U,
forecast_horizon)
Y_batches, U_batches = ci.create_input(
Y,
past_horizon,
batchsize,
U,
True, # Has to be true for Hcnn_known_U
forecast_horizon,
)

Y_batches.shape, U_batches.shape

Expand All @@ -37,8 +36,9 @@
past_horizon,
forecast_horizon,
sparsity,
teacher_forcing = teacher_forcing,
decrease_teacher_forcing = decrease_teacher_forcing )
teacher_forcing=teacher_forcing,
decrease_teacher_forcing=decrease_teacher_forcing,
)

# setting the optimizer, loss and targets
optimizer = torch.optim.Adam(hcnn_known_u_model.parameters(), lr=0.01)
Expand All @@ -47,16 +47,13 @@

# Train model
epochs = 150
total_loss = epochs * [0]
for epoch in range(epochs):
for batch_index in range(0, U_batches.shape[0]):
hcnn_known_u_model.zero_grad()
U_batch = U_batches[batch_index]
Y_batch = Y_batches[batch_index]
model_out = hcnn_known_u_model(U_batch,Y_batch)
past_error , forecast = torch.split(model_out,past_horizon)
losses = [loss_function(past_error[i], targets[i]) for i in range(past_horizon)]
loss = sum(losses)
model_out = hcnn_known_u_model(U_batch, Y_batch)
past_error, forecast = torch.split(model_out, past_horizon)
loss = loss_function(past_error, targets)
loss.backward()
optimizer.step()
total_loss[epoch] += loss.detach()

0 comments on commit 3163a12

Please sign in to comment.