Skip to content

Commit

Permalink
Completed dataloader implementation. Model architecture in progress.
Browse files Browse the repository at this point in the history
  • Loading branch information
lisiyi777 committed Oct 14, 2024
1 parent e771321 commit 6276f60
Show file tree
Hide file tree
Showing 8 changed files with 211 additions and 63 deletions.
19 changes: 19 additions & 0 deletions configs/flow4d/argo/av2_tiny_debug.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
_base_ = "./flow4d_config.py"

train_sequence_dir = "/efs/argoverse2_tiny/val/"
test_dataset_root = "/efs/argoverse2_tiny/val/"

save_output_folder = "/efs/argoverse2_tiny/val_fastflow3d_rewrite_api/"

epochs = 1

train_dataset = dict(args=dict(root_dir=train_sequence_dir, use_gt_flow=True))
test_dataset = dict(
args=dict(
root_dir=test_dataset_root,
eval_args=dict(output_path="eval_results/bucketed_epe_tiny/supervised_rewrite_api/"),
)
)

# Limit batch size to 8 to fit on 24GB RTX3090
test_dataloader = dict(args=dict(batch_size=8, num_workers=0, shuffle=False, pin_memory=True))
35 changes: 18 additions & 17 deletions configs/flow4d/argo/flow4d_config.py
Original file line number Diff line number Diff line change
@@ -1,43 +1,42 @@
_base_ = ["../../pseudoimage.py"]

has_labels = True
is_trainable = True

epochs = 50
learning_rate = 1e-3
learning_rate = 2e-6
save_every = 500
validate_every = 500

# Direct mapping from the original config
SEQUENCE_LENGTH = 2

model = dict(
name="Flow4D",
name="FastFlow3D",
args=dict(
VOXEL_SIZE=[0.2, 0.2, 0.2],
POINT_CLOUD_RANGE=[-51.2, -51.2, -2.2, 51.2, 51.2, 4.2],
feature_channels=128,
num_frames=5,
variation=1,
),
VOXEL_SIZE={{_base_.VOXEL_SIZE}},
PSEUDO_IMAGE_DIMS={{_base_.PSEUDO_IMAGE_DIMS}},
POINT_CLOUD_RANGE={{_base_.POINT_CLOUD_RANGE}},
FEATURE_CHANNELS=32,
SEQUENCE_LENGTH=SEQUENCE_LENGTH,
),
)

######## TEST DATASET ########

test_dataset_root = "/efs/argoverse2/val/"
save_output_folder = "/efs/argoverse2/val_deflow_flow/"

test_dataset = dict(
name="BucketedSceneFlowDataset",
name="Flow4DSceneFlowDataset",
args=dict(
dataset_name="Argoverse2CausalSceneFlow",
root_dir=test_dataset_root,
with_ground=False,
with_rgb=False,
eval_type="bucketed_epe",
eval_args=dict(),
expected_camera_shape=(194, 256, 3),
# point_cloud_range=None,
eval_args=dict(output_path="eval_results/bucketed_epe/nsfp_distillation_1x/"),
),
)

test_dataloader = dict(args=dict(batch_size=1, num_workers=8, shuffle=False, pin_memory=True))
test_dataloader = dict(args=dict(batch_size=8, num_workers=8, shuffle=False, pin_memory=True))

######## TRAIN DATASET ########

Expand All @@ -49,9 +48,11 @@
dataset_name="Argoverse2CausalSceneFlow",
root_dir=train_sequence_dir,
with_ground=False,
use_gt_flow=True,
use_gt_flow=False,
with_rgb=False,
eval_type="bucketed_epe",
expected_camera_shape=(194, 256, 3),
# point_cloud_range=None,
eval_args=dict(),
),
)
Expand Down
4 changes: 3 additions & 1 deletion dataloaders/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
from .abstract_scene_flow_dataset import AbstractSceneFlowDataset, EvalWrapper
from .dataclasses import BucketedSceneFlowInputSequence, BucketedSceneFlowOutputSequence


# Defined before the importable classes to avoid circular imports if they use this function.
def construct_dataset(name: str, args: dict) -> AbstractSceneFlowDataset:
name = name.lower()
Expand All @@ -13,10 +12,12 @@ def construct_dataset(name: str, args: dict) -> AbstractSceneFlowDataset:


from .scene_trajectory_benchmark_scene_flow_dataset import BucketedSceneFlowDataset
from .flow4ddataclasses import Flow4DSceneFlowDataset


importable_classes = [
BucketedSceneFlowDataset,
Flow4DSceneFlowDataset
]

name_to_class_lookup = {cls.__name__.lower(): cls for cls in importable_classes}
Expand All @@ -30,4 +31,5 @@ def construct_dataset(name: str, args: dict) -> AbstractSceneFlowDataset:
"BucketedSceneFlowOutputSequence",
"construct_dataset",
"MiniBatchedSceneFlowInputSequence",
"Flow4DSceneFlowDataset",
]
17 changes: 16 additions & 1 deletion dataloaders/dataclasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,22 @@ def get_full_global_pc(self, idx: int) -> torch.Tensor:
ego_pc = torch.cat([ego_pc, torch.ones(ego_pc.shape[0], 1, device=ego_pc.device)], dim=1)
return torch.matmul(sensor_to_global, ego_pc.T).T[:, :3]

def get_full_target_pc(self, idx: int) -> torch.Tensor:
# Transform pointcloud at the specified index into pc1's frame
ego_pc = self.get_full_ego_pc(idx)

sensor_to_ego_pc0, ego_to_global_pc0 = self.get_pc_transform_matrices(idx)
sensor_to_ego_pc1, ego_to_global_pc1 = self.get_pc_transform_matrices(-1)

sensor_to_global_pc0 = torch.matmul(ego_to_global_pc0, sensor_to_ego_pc0)
sensor_to_global_pc1 = torch.matmul(ego_to_global_pc1, sensor_to_ego_pc1)

global_to_pc1 = torch.inverse(sensor_to_global_pc1)
sensor_to_pc1 = torch.matmul(global_to_pc1, sensor_to_global_pc0)

ego_pc = torch.cat([ego_pc, torch.ones(ego_pc.shape[0], 1, device=ego_pc.device)], dim=1)
return torch.matmul(sensor_to_pc1, ego_pc.T).T[:, :3]

def get_full_global_pc_gt_flowed(self, idx: int) -> torch.Tensor:
ego_pc = self.get_full_ego_pc_gt_flowed(idx)
sensor_to_ego, ego_to_global = self.get_pc_transform_matrices(idx)
Expand Down Expand Up @@ -321,7 +337,6 @@ def _concatenate_transforms(transforms: list[np.ndarray]) -> torch.Tensor:
for frame in frame_list
]
)

return BucketedSceneFlowInputSequence(
dataset_idx=idx,
sequence_log_id=dataset_log_id,
Expand Down
100 changes: 100 additions & 0 deletions dataloaders/flow4ddataclasses.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
from abc import ABC, abstractmethod

from .dataclasses import BucketedSceneFlowInputSequence, BucketedSceneFlowOutputSequence
from .abstract_scene_flow_dataset import AbstractSceneFlowDataset, EvalWrapper
from pathlib import Path
from typing import Optional, Union

from bucketed_scene_flow_eval.datasets import construct_dataset
from bucketed_scene_flow_eval.interfaces import LoaderType
from dataclasses import dataclass


@dataclass
class Flow4DdSceneFlowDatasetSplit:
split_idx: int
num_splits: int

def __post_init__(self):
# Type check the split index and number of splits.
assert isinstance(self.split_idx, int), f"Invalid split index type {type(self.split_idx)}."
assert isinstance(
self.num_splits, int
), f"Invalid number of splits type {type(self.num_splits)}."
assert (
0 <= self.split_idx < self.num_splits
), f"Invalid split index {self.split_idx} for {self.num_splits} splits."

def current_split_length(self, total_length: int) -> int:
# All splits should be the same base length but
# the remainder should be distributed across the splits.
base_length = total_length // self.num_splits
distributed_remainder = 1 if total_length % self.num_splits > self.split_idx else 0
return base_length + distributed_remainder

def _current_split_global_start(self, total_length: int) -> int:
base_length = total_length // self.num_splits
distributed_remainder = min(total_length % self.num_splits, self.split_idx)
return base_length * self.split_idx + distributed_remainder

def split_index_to_global_index(self, split_idx: int, global_length: int) -> int:
assert (
0 <= split_idx < self.current_split_length(global_length)
), f"Invalid split index {split_idx}."
return self._current_split_global_start(global_length) + split_idx


class Flow4DSceneFlowDataset(AbstractSceneFlowDataset):
def __init__(
self,
dataset_name: str,
root_dir: Path,
max_pc_points: int = 120000,
set_length: Optional[int] = None,
split: Union[Flow4DdSceneFlowDatasetSplit, dict[str, int]] = Flow4DdSceneFlowDatasetSplit(
0, 1
),
**kwargs,
):
self.dataset = construct_dataset(dataset_name, dict(root_dir=root_dir, subsequence_length=2, **kwargs))
# TODO: pad the first num_frames-2 entries
self.max_pc_points = max_pc_points
self.set_length = set_length
self.split = (
split
if isinstance(split, Flow4DdSceneFlowDatasetSplit)
else Flow4DdSceneFlowDatasetSplit(**split)
)

def _global_len(self):
global_length = len(self.dataset)
if self.set_length is not None:
global_length = min(global_length, self.set_length)
return global_length

def __len__(self):
return self.split.current_split_length(self._global_len())

def evaluator(self) -> EvalWrapper:
#TODO: modify the evalwrapper
return EvalWrapper(self.dataset)

def collate_fn(
self, batch: list[BucketedSceneFlowInputSequence]
) -> list[BucketedSceneFlowInputSequence]:
return batch

def __getitem__(self, split_idx) -> BucketedSceneFlowInputSequence:
assert isinstance(split_idx, int), f"Index must be an integer. Got {type(split_idx)}."
assert 0 <= split_idx < len(self), "Index out of range."
global_idx = self.split.split_index_to_global_index(split_idx, self._global_len())
frame_list = self.dataset[global_idx]
return BucketedSceneFlowInputSequence.from_frame_list(
global_idx,
frame_list,
pc_max_len=self.max_pc_points,
loader_type=self.dataset.loader_type(),
)

def loader_type(self) -> LoaderType:
return self.dataset.loader_type()
3 changes: 1 addition & 2 deletions docker/Dockerfile_cuda
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ RUN conda update -y conda

ENV TORCH_CUDA_ARCH_LIST="Ampere;Turing;Pascal"
ENV FORCE_CUDA="1"
RUN conda install -y numpy python=3.11 pytorch==2.3 torchvision torchaudio pytorch-cuda=11.8 fvcore iopath -c pytorch -c nvidia -c fvcore -c iopath -c conda-forge
RUN conda install -y numpy=1.23.5 python=3.11 pytorch==2.3 torchvision torchaudio pytorch-cuda=11.8 fvcore iopath -c pytorch -c nvidia -c fvcore -c iopath -c conda-forge

# Install PyTorch3d from source
RUN git clone https://github.com/facebookresearch/pytorch3d.git /pytorch3d
Expand Down Expand Up @@ -52,7 +52,6 @@ RUN pip install h5py
RUN pip install av2==0.2.1
RUN pip install dztimer
RUN pip install spconv-cu117
RUN pip install wandb
RUN pip install lightning==2.0.1

ENV PYTHONPATH=/project:/bucketed_scene_flow_eval:/:${PYTHONPATH}
Expand Down
93 changes: 52 additions & 41 deletions models/feed_forward/flow4d.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
"""
Copied with modification from: https://github.com/dgist-cvlab/Flow4D
"""


import torch
import dztimer
from models import BaseModel, ForwardMode
from typing import Any, List
from dataloaders import (
Expand All @@ -8,11 +14,30 @@
from pytorch_lightning.loggers import Logger

class Flow4D(BaseModel):
def __init__(self, cfg: Any, evaluator: Any = None):
def __init__(self, voxel_size = [0.2, 0.2, 0.2],
point_cloud_range = [-51.2, -51.2, -2.2, 51.2, 51.2, 4.2],
grid_feature_size = [512, 512, 32],
num_frames = 5):
super().__init__()
self.cfg = cfg

self.loss_fn = self.initialize_loss_fn(cfg)
point_output_ch = 16
voxel_output_ch = 16

self.num_frames = num_frames
print('voxel_size = {}, pseudo_dims = {}, input_num_frames = {}'.format(voxel_size, grid_feature_size, self.num_frames))

# TODO: Port 4D embedder, backbone network etc.
# self.embedder_4D = DynamicEmbedder_4D(voxel_size=voxel_size,
# pseudo_image_dims=[grid_feature_size[0], grid_feature_size[1], grid_feature_size[2], num_frames],
# point_cloud_range=point_cloud_range,
# feat_channels=point_output_ch)

# self.network_4D = Network_4D(in_channel=point_output_ch, out_channel=voxel_output_ch)
# self.seperate_feat = Seperate_to_3D(num_frames)
# self.pointhead_3D = Point_head(voxel_feat_dim=voxel_output_ch, point_feat_dim=point_output_ch)

self.timer = dztimer.Timing()
self.timer.start("Total")

def forward(
self,
Expand All @@ -27,55 +52,41 @@ def forward(
Returns:
A list (len=batch size) of BucketedSceneFlowOutputItem.
"""
if forward_mode == ForwardMode.TRAIN:
return self.train_forward(batched_sequence, logger)
elif forward_mode == ForwardMode.VAL:
return self.val_forward(batched_sequence, logger)
else:
raise ValueError(f"Unsupported forward mode: {forward_mode}")
raise NotImplementedError

def loss_fn(
self,
input_batch: List[BucketedSceneFlowInputSequence],
model_res: List[BucketedSceneFlowOutputSequence],
) -> dict[str, torch.Tensor]:

return self.loss_fn(input_batch, model_res)
raise NotImplementedError

def _model_forward(
self, batch, batch_idx
):
res_dict = self.model(batch)
self, batched_sequence: List[BucketedSceneFlowInputSequence]
) -> list[BucketedSceneFlowOutputSequence]:
"""
input: using the batch from dataloader, which is a dict
Detail: [pc0, pc1, pose0, pose1]
output: the predicted flow, pose_flow, and the valid point index of pc0
"""

# compute loss
total_loss = 0.0
total_flow_loss = 0.0
batch_sizes = len(batched_sequence)

batch_sizes = len(batch["pose0"])
gt_flow = batch['flow'] #gt_flow = ego+motion
pose_flows = []
transform_pc0s = []
transform_pc_m_frames = []

pose_flows = res_dict['pose_flow'] #pose_flow = ego-motion's flow
pc0_valid_idx = res_dict['pc0_valid_point_idxes'] # since padding
est_flow = res_dict['flow'] #network's output, motion flow
# TODO: Transform pcs to pc[-1] frame, get pose_flows=transform_pc0 - origin_pc0
transform_pc0s = [(e.get_full_target_pc(-2), e.get_full_pc_mask(-2)) for e in batched_sequence]
if self.num_frames > 2:
for i in range(self.num_frames-2):
transform_pc_mi = [(e.get_full_target_pc(i), e.get_full_pc_mask(i)) for e in batched_sequence]
transform_pc_m_frames.append(transform_pc_mi)
pc1s = [e.get_full_ego_pc(-1) for e in batched_sequence]


for batch_id in range(batch_sizes):
pc0_valid_from_pc2res = pc0_valid_idx[batch_id]
pose_flow_ = pose_flows[batch_id][pc0_valid_from_pc2res]
est_flow_ = est_flow[batch_id]
gt_flow_ = gt_flow[batch_id][pc0_valid_from_pc2res]
gt_flow_ = gt_flow_ - pose_flow_

res_dict = {'est_flow': est_flow_,
'gt_flow': gt_flow_,
'gt_classes': None if 'flow_category_indices' not in batch else batch['flow_category_indices'][batch_id][pc0_valid_from_pc2res], #CLASS 0~30
}

loss = self.loss_fn(res_dict)
total_flow_loss += loss.item()

total_loss += loss

self.log("trainer/loss", total_loss/batch_sizes, sync_dist=True, batch_size=self.batch_size)
# TODO: embedder_4D, network_4D, Seperate_to_3D, pointhead_3D

# TODO: the return type should be List[BucketedSceneFlowOutputSequence]

return total_loss
raise NotImplementedError
Loading

0 comments on commit 6276f60

Please sign in to comment.