Skip to content

Commit

Permalink
fix(dbn): Fixes adding missing DBN layers when they are not supplied.
Browse files Browse the repository at this point in the history
  • Loading branch information
gugarosa committed Jan 8, 2023
1 parent 4582dc2 commit 34681ae
Show file tree
Hide file tree
Showing 23 changed files with 244 additions and 233 deletions.
4 changes: 2 additions & 2 deletions docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@
author = "Mateus Roder and Gustavo de Rosa"

# The short X.Y version
version = "1.1.3"
version = "1.1.4"

# The full version, including alpha/beta/rc tags
release = "1.1.3"
release = "1.1.4"


# -- General configuration ---------------------------------------------------
Expand Down
10 changes: 4 additions & 6 deletions examples/applications/bernoulli/conv_rbm_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@
nf = model.n_filters

if model.maxpooling:
input_fc = nf * (h1//2 + 1) * (h2//2 + 1)
input_fc = nf * (h1 // 2 + 1) * (h2 // 2 + 1)
else:
input_fc = nf * h1 * h2
fc = nn.Linear(input_fc , n_classes)
fc = nn.Linear(input_fc, n_classes)

# Check if model uses GPU
if model.device == "cuda":
Expand Down Expand Up @@ -99,8 +99,7 @@
y = model(x_batch)

# Reshaping the outputs
y = y.reshape(
x_batch.size(0), input_fc)
y = y.reshape(x_batch.size(0), input_fc)

# Calculating the fully-connected outputs
y = fc(y)
Expand Down Expand Up @@ -131,8 +130,7 @@
y = model(x_batch)

# Reshaping the outputs
y = y.reshape(
x_batch.size(0), input_fc)
y = y.reshape(x_batch.size(0), input_fc)

# Calculating the fully-connected outputs
y = fc(y)
Expand Down
32 changes: 15 additions & 17 deletions examples/applications/deep/conv_dbn_classification.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,10 @@
import torch
import torchvision
import torch.nn as nn
import torch.optim as optim

import torchvision
from torch.utils.data import DataLoader
from tqdm import tqdm

from learnergy.models.deep import ConvDBN

# Creating training and testing dataset
Expand Down Expand Up @@ -33,7 +33,7 @@
momentum=(0, 0),
decay=(0, 0),
maxpooling=(True, False),
#pooling_kernel=(2, 0), # WORKING ON ...
# pooling_kernel=(2, 0), # WORKING ON ...
use_gpu=True,
)

Expand All @@ -46,22 +46,22 @@
model.fit(train, batch_size=batch_size, epochs=epochs)

# Reconstructing test set
#rec_mse, v = model.reconstruct(test)
# rec_mse, v = model.reconstruct(test)

# Saving model
torch.save(model, "model.pth")

# Creating the Fully Connected layer to append on top of DBN
h1 = model.models[len(model.models)-1].hidden_shape[0]
h2 = model.models[len(model.models)-1].hidden_shape[1]
nf = model.models[len(model.models)-1].n_filters
h1 = model.models[len(model.models) - 1].hidden_shape[0]
h2 = model.models[len(model.models) - 1].hidden_shape[1]
nf = model.models[len(model.models) - 1].n_filters

if model.models[len(model.models)-1].maxpooling:
input_fc = nf * (h1//2 + 1) * (h2//2 + 1)
print('pooling', input_fc)
if model.models[len(model.models) - 1].maxpooling:
input_fc = nf * (h1 // 2 + 1) * (h2 // 2 + 1)
print("pooling", input_fc)
else:
input_fc = nf * h1 * h2
fc = nn.Linear(input_fc , n_classes)
fc = nn.Linear(input_fc, n_classes)

# Check if model uses GPU
if model.device == "cuda":
Expand Down Expand Up @@ -101,10 +101,9 @@

# Passing the batch down the model
y = model(x_batch)

# Reshaping the outputs
y = y.reshape(
x_batch.size(0), input_fc)
y = y.reshape(x_batch.size(0), input_fc)

# Calculating the fully-connected outputs
y = fc(y)
Expand Down Expand Up @@ -133,10 +132,9 @@

# Passing the batch down the model
y = model(x_batch)

# Reshaping the outputs
y = y.reshape(
x_batch.size(0), input_fc)
y = y.reshape(x_batch.size(0), input_fc)

# Calculating the fully-connected outputs
y = fc(y)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,10 +52,10 @@
nf = model.n_filters

if model.maxpooling:
input_fc = nf * (h1//2 + 1) * (h2//2 + 1)
input_fc = nf * (h1 // 2 + 1) * (h2 // 2 + 1)
else:
input_fc = nf * h1 * h2
fc = nn.Linear(input_fc , n_classes)
fc = nn.Linear(input_fc, n_classes)

# Check if model uses GPU
if model.device == "cuda":
Expand Down Expand Up @@ -99,8 +99,7 @@
y = model(x_batch)

# Reshaping the outputs
y = y.reshape(
x_batch.size(0), input_fc)
y = y.reshape(x_batch.size(0), input_fc)

# Calculating the fully-connected outputs
y = fc(y)
Expand Down Expand Up @@ -131,8 +130,7 @@
y = model(x_batch)

# Reshaping the outputs
y = y.reshape(
x_batch.size(0), input_fc)
y = y.reshape(x_batch.size(0), input_fc)

# Calculating the fully-connected outputs
y = fc(y)
Expand Down
2 changes: 1 addition & 1 deletion learnergy/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
of several modules and sub-modules.
"""

__version__ = "1.1.3"
__version__ = "1.1.4"
13 changes: 8 additions & 5 deletions learnergy/core/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
"""

from typing import Optional, Tuple
from xmlrpc.client import Boolean

import numpy as np
import torch
Expand All @@ -17,15 +16,19 @@ class Dataset(torch.utils.data.Dataset):
"""A custom dataset class, inherited from PyTorch's dataset."""

def __init__(
self, data: np.array, targets: np.array,
transform: Optional[callable] = None, show_log: Optional[Boolean] = True
self,
data: np.array,
targets: np.array,
transform: Optional[callable] = None,
show_log: Optional[bool] = True,
) -> None:
"""Initialization method.
Args:
data: An n-dimensional array containing the data.
targets: An 1-dimensional array containing the data's labels.
transform: Optional transform to be applied over a sample.
show_log: Whether to show log information or not.
"""

Expand Down Expand Up @@ -83,7 +86,7 @@ def __getitem__(self, idx: int) -> Tuple[torch.Tensor, torch.Tensor]:
idx: The idx of desired sample.
Returns:
(Tuple[torch.Tensor, torch.Tensor]): Data and label tensors.
Data and label tensors.
"""

Expand All @@ -99,7 +102,7 @@ def __len__(self) -> int:
"""A private method that will be the base for PyTorch's iterator getting dataset's length.
Returns:
(int): Length of dataset.
Length of dataset.
"""

Expand Down
2 changes: 1 addition & 1 deletion learnergy/math/scale.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def unitary_scale(x: np.array) -> np.array:
x: A numpy array to be scaled.
Returns:
(np.array): Scaled array.
Scaled array.
"""

Expand Down
25 changes: 14 additions & 11 deletions learnergy/models/bernoulli/conv_rbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def __init__(
momentum: Momentum parameter.
decay: Weight decay used for penalization.
maxpooling: Whether MaxPooling2D should be used or not.
pooling_kernel: The kernel size of MaxPooling layer (when maxpooling=True).
pooling_kernel: The kernel size of MaxPooling2D layer (when maxpooling=True).
use_gpu: Whether GPU should be used or not.
"""
Expand All @@ -80,7 +80,9 @@ def __init__(
self.decay = decay

if maxpooling:
self.maxpol2d = nn.MaxPool2d(kernel_size=pooling_kernel, stride=2, padding=1)
self.maxpol2d = nn.MaxPool2d(
kernel_size=pooling_kernel, stride=2, padding=1
)
self.maxpooling = True
else:
self.maxpol2d = maxpooling
Expand Down Expand Up @@ -115,7 +117,8 @@ def __init__(
self.momentum,
self.decay,
self.maxpooling,
pooling_kernel, pooling_kernel,
pooling_kernel,
pooling_kernel,
)

@property
Expand Down Expand Up @@ -233,7 +236,7 @@ def decay(self, decay: float) -> None:

@property
def maxpooling(self) -> bool:
"""Usage of MaxPooling."""
"""Usage of MaxPooling2D."""

return self._maxpooling

Expand Down Expand Up @@ -291,7 +294,7 @@ def hidden_sampling(self, v: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
v: A tensor incoming from the visible layer.
Returns:
(Tuple[torch.Tensor, torch.Tensor]): The probabilities and states of the hidden layer sampling.
The probabilities and states of the hidden layer sampling.
"""

Expand All @@ -308,7 +311,7 @@ def visible_sampling(self, h: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]
h: A tensor incoming from the hidden layer.
Returns:
(Tuple[torch.Tensor, torch.Tensor]): The probabilities and states of the visible layer sampling.
The probabilities and states of the visible layer sampling.
"""

Expand All @@ -327,7 +330,7 @@ def gibbs_sampling(
v: A tensor incoming from the visible layer.
Returns:
(Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): The probabilities and states of the hidden layer sampling (positive),
The probabilities and states of the hidden layer sampling (positive),
the probabilities and states of the hidden layer sampling (negative)
and the states of the visible layer sampling (negative).
Expand Down Expand Up @@ -362,7 +365,7 @@ def energy(self, samples: torch.Tensor) -> torch.Tensor:
samples: Samples to be energy-freed.
Returns:
(torch.Tensor): The system's energy based on input samples.
The system's energy based on input samples.
"""

Expand Down Expand Up @@ -392,7 +395,7 @@ def fit(
epochs: Number of training epochs.
Returns:
(float): MSE (mean squared error) from the training step.
MSE (mean squared error) from the training step.
"""

Expand Down Expand Up @@ -454,7 +457,7 @@ def reconstruct(
dataset: A Dataset object containing the testing data.
Returns:
(Tuple[float, torch.Tensor]): Reconstruction error and visible probabilities, i.e., P(v|h).
Reconstruction error and visible probabilities, i.e., P(v|h).
"""

Expand Down Expand Up @@ -498,7 +501,7 @@ def forward(self, x: torch.Tensor) -> torch.Tensor:
x: An input tensor for computing the forward pass.
Returns:
(torch.Tensor): A tensor containing the Convolutional RBM's outputs.
A tensor containing the Convolutional RBM's outputs.
"""

Expand Down
14 changes: 7 additions & 7 deletions learnergy/models/bernoulli/discriminative_rbm.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def labels_sampling(self, samples: torch.Tensor) -> torch.Tensor:
samples: Samples to be labels-calculated.
Returns:
(torch.Tensor): Labels' probabilities based on input samples.
Labels' probabilities based on input samples.
"""

Expand Down Expand Up @@ -164,7 +164,7 @@ def fit(
epochs: Number of training epochs.
Returns:
(Tuple[float, float]): Loss and accuracy from the training step.
Loss and accuracy from the training step.
"""

Expand Down Expand Up @@ -223,7 +223,7 @@ def predict(
dataset: A Dataset object containing the testing data.
Returns:
(Tuple[float, torch.Tensor, torch.Tensor]): Accuracy, prediction probabilities and labels, i.e., P(y|v).
Accuracy, prediction probabilities and labels, i.e., P(y|v).
"""

Expand Down Expand Up @@ -334,7 +334,7 @@ def hidden_sampling(
scale: A boolean to decide whether temperature should be used or not.
Returns:
(Tuple[torch.Tensor, torch.Tensor]): The probabilities and states of the hidden layer sampling.
The probabilities and states of the hidden layer sampling.
"""

Expand All @@ -356,7 +356,7 @@ def class_sampling(self, h: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
h: A tensor incoming from the hidden layer.
Returns:
(Tuple[torch.Tensor, torch.Tensor]): The probabilities and states of the class layer sampling.
The probabilities and states of the class layer sampling.
"""

Expand All @@ -378,7 +378,7 @@ def gibbs_sampling(
y: A tensor incoming from the class layer.
Returns:
(Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]): The probabilities and states of the hidden layer sampling (positive),
The probabilities and states of the hidden layer sampling (positive),
the probabilities and states of the hidden layer sampling (negative)
and the states of the visible layer sampling (negative).
Expand Down Expand Up @@ -420,7 +420,7 @@ def fit(
epochs: Number of training epochs.
Returns:
(Tuple[float, float]): Loss and accuracy from the training step.
Loss and accuracy from the training step.
"""

Expand Down
Loading

0 comments on commit 34681ae

Please sign in to comment.