Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Hnn #2

Draft
wants to merge 9 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
lightning_logs/
examples/lightning_logs/

tags
Expand Down
283 changes: 283 additions & 0 deletions examples/doublependulum_emlp.ipynb

Large diffs are not rendered by default.

79 changes: 79 additions & 0 deletions examples/doublependulum_emlp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import sys

sys.path.append("../")

import torch
import torch.utils as utils

import pytorch_lightning as pl

from torchemlp.utils import DEFAULT_DEVICE, DEFAULT_DEVICE_STR
from torchemlp.reps import Scalar
from torchemlp.nn.utils import AutonomousWrapper
from torchemlp.nn.contdepth import Hamiltonian
from torchemlp.nn.equivariant import EMLP
from torchemlp.nn.runners import DynamicsL2RegressionLightning
from torchemlp.nn.utils import Standardize
from torchemlp.datasets import DoublePendulum

torch.set_default_dtype(torch.float32)

TRAINING_SET_SIZE = 5_000
VALIDATION_SET_SIZE = 1_000
TEST_SET_SIZE = 1_000

DT = 0.1
T = 30.0

BATCH_SIZE = 500

N_EPOCHS = 5 # min(int(900_000 / TRAINING_SET_SIZE), 1000)
# N_EPOCHS = 100 # min(int(900_000 / TRAINING_SET_SIZE), 1000)

# N_CHANNELS = 384
N_CHANNELS = 3
N_LAYERS = 3

DL_WORKERS = 0

dataset = DoublePendulum(
TRAINING_SET_SIZE + VALIDATION_SET_SIZE + TEST_SET_SIZE,
DT,
T,
)

print(f"Loaded dataset.")
print(f"Repin: {dataset.repin}")
print(f"Repout: {dataset.repout}")

split_data = utils.data.random_split(
dataset, [TRAINING_SET_SIZE, VALIDATION_SET_SIZE, TEST_SET_SIZE]
)

train_loader = utils.data.DataLoader(
split_data[0], batch_size=BATCH_SIZE, num_workers=DL_WORKERS, shuffle=True
)
val_loader = utils.data.DataLoader(
split_data[1], batch_size=BATCH_SIZE, num_workers=DL_WORKERS
)
test_loader = utils.data.DataLoader(
split_data[2], batch_size=BATCH_SIZE, num_workers=DL_WORKERS
)

model = Hamiltonian(
AutonomousWrapper(
Standardize(
EMLP(dataset.repin, Scalar, dataset.G, N_CHANNELS, N_LAYERS), dataset.stats
)
)
).to(DEFAULT_DEVICE)

plmodel = DynamicsL2RegressionLightning(model)

trainer = pl.Trainer(
limit_train_batches=BATCH_SIZE,
max_epochs=N_EPOCHS,
accelerator=DEFAULT_DEVICE_STR,
)
trainer.fit(plmodel, train_loader, val_loader)
trainer.test(plmodel, test_loader)
80 changes: 80 additions & 0 deletions examples/doublependulum_mlp.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
import sys

sys.path.append("../")

import torch
import torch.utils as utils
from torch.profiler import profile, record_function, ProfilerActivity

import pytorch_lightning as pl

from torchemlp.utils import DEFAULT_DEVICE, DEFAULT_DEVICE_STR
from torchemlp.nn.utils import MLP, AutonomousWrapper
from torchemlp.nn.contdepth import Hamiltonian
from torchemlp.nn.runners import DynamicsL2RegressionLightning
from torchemlp.nn.utils import Standardize
from torchemlp.datasets import DoublePendulum

# torch.set_default_dtype(torch.float32)

TRAINING_SET_SIZE = 5_000
VALIDATION_SET_SIZE = 1_000
TEST_SET_SIZE = 1_000

DT = 0.1
T = 30.0

BATCH_SIZE = 500

N_EPOCHS = 1 # min(int(900_000 / TRAINING_SET_SIZE), 1000)
# N_EPOCHS = 100 # min(int(900_000 / TRAINING_SET_SIZE), 1000)

N_CHANNELS = 384
N_LAYERS = 3

DL_WORKERS = 0

dataset = DoublePendulum(
TRAINING_SET_SIZE + VALIDATION_SET_SIZE + TEST_SET_SIZE,
DT,
T,
)

print(f"Loaded dataset.")
print(f"Repin: {dataset.repin}")
print(f"Repout: {dataset.repout}")

split_data = utils.data.random_split(
dataset, [TRAINING_SET_SIZE, VALIDATION_SET_SIZE, TEST_SET_SIZE]
)

train_loader = utils.data.DataLoader(
split_data[0], batch_size=BATCH_SIZE, num_workers=DL_WORKERS, shuffle=True
)
val_loader = utils.data.DataLoader(
split_data[1], batch_size=BATCH_SIZE, num_workers=DL_WORKERS
)
test_loader = utils.data.DataLoader(
split_data[2], batch_size=BATCH_SIZE, num_workers=DL_WORKERS
)

model = Hamiltonian(
AutonomousWrapper(Standardize(MLP(12, 1, 2, 2), dataset.stats))
).to(DEFAULT_DEVICE)

plmodel = DynamicsL2RegressionLightning(model)

trainer = pl.Trainer(
limit_train_batches=BATCH_SIZE,
max_epochs=N_EPOCHS,
accelerator=DEFAULT_DEVICE_STR,
)

with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True, profile_memory=True, use_cuda=True) as prof:
with record_function("model_train"):
trainer.fit(plmodel, train_loader, val_loader)

print(prof.key_averages().table(sort_by="self_cpu_memory_usage", row_limit=10))
prof.export_chrome_trace("trace.json")

trainer.test(plmodel, test_loader)
14 changes: 14 additions & 0 deletions examples/experiment.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import sys

sys.path.append("../")

import torch
import torch.utils as utils

import pytorch_lightning as pl

from torchemlp.groups import SO, O, S, Z
from torchemlp.nn.equivariant import EMLP
from torchemlp.nn.runners import RegressionLightning
from torchemlp.nn.utils import Standardize
from torchemlp.datasets import O5Synthetic
Loading