From f895c015c277e736a759076e0840f07c67c0ed19 Mon Sep 17 00:00:00 2001 From: yan-gao-GY Date: Thu, 30 Nov 2023 12:38:29 +0000 Subject: [PATCH] Formatting --- .../conf/mmcv_conf/finetuning/model_r3d18.py | 60 ++--- .../r3d_18_ucf101/finetune_ucf101.py | 10 +- .../finetuning/r3d_18_ucf101/test_ucf101.py | 10 +- .../mmcv_conf/finetuning/runtime_ucf101.py | 236 +++++++++--------- 4 files changed, 158 insertions(+), 158 deletions(-) diff --git a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/model_r3d18.py b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/model_r3d18.py index 5675fd331348..bf85634b2c24 100644 --- a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/model_r3d18.py +++ b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/model_r3d18.py @@ -1,32 +1,32 @@ """Config file used for fine-tuning on UCF-101 dataset.""" -model = dict( - type="TSN", - backbone=dict( - type="R3D", - depth=18, - num_stages=4, - stem=dict( - temporal_kernel_size=3, - temporal_stride=1, - in_channels=3, - with_pool=False, - ), - down_sampling=[False, True, True, True], - channel_multiplier=1.0, - bottleneck_multiplier=1.0, - with_bn=True, - zero_init_residual=False, - pretrained=None, - ), - st_module=dict(spatial_type="avg", temporal_size=2, spatial_size=7), - cls_head=dict( - with_avg_pool=False, - temporal_feature_size=1, - spatial_feature_size=1, - dropout_ratio=0.5, - in_channels=512, - init_std=0.001, - num_classes=101, - ), -) +model = { + "type": "TSN", + "backbone": { + "type": "R3D", + "depth": 18, + "num_stages": 4, + "stem": { + "temporal_kernel_size": 3, + "temporal_stride": 1, + "in_channels": 3, + "with_pool": False, + }, + "down_sampling": [False, True, True, True], + "channel_multiplier": 1.0, + "bottleneck_multiplier": 1.0, + "with_bn": True, + "zero_init_residual": False, + "pretrained": None, + }, + "st_module": {"spatial_type": "avg", "temporal_size": 2, "spatial_size": 7}, + "cls_head": { + "with_avg_pool": False, + "temporal_feature_size": 1, + "spatial_feature_size": 1, + "dropout_ratio": 0.5, + "in_channels": 512, + "init_std": 0.001, + "num_classes": 101, + }, +} diff --git a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/finetune_ucf101.py b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/finetune_ucf101.py index 59c232d1d2a9..f3e98229e599 100644 --- a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/finetune_ucf101.py +++ b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/finetune_ucf101.py @@ -4,8 +4,8 @@ work_dir = "./finetune_results/" -model = dict( - backbone=dict( - pretrained="./model_pretrained.pth", - ), -) +model = { + "backbone": { + "pretrained": "./model_pretrained.pth", + }, +} diff --git a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/test_ucf101.py b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/test_ucf101.py index d04b703f3b66..2db8ed728a5f 100644 --- a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/test_ucf101.py +++ b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/r3d_18_ucf101/test_ucf101.py @@ -4,8 +4,8 @@ work_dir = "./finetune_results/" -model = dict( - backbone=dict( - pretrained="finetune_results/epoch_150.pth", - ), -) +model = { + "backbone": { + "pretrained": "finetune_results/epoch_150.pth", + }, +} diff --git a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/runtime_ucf101.py b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/runtime_ucf101.py index b028bafdcd44..2f1918cf103d 100644 --- a/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/runtime_ucf101.py +++ b/baselines/fedvssl/fedvssl/conf/mmcv_conf/finetuning/runtime_ucf101.py @@ -1,6 +1,6 @@ """Config file used for fine-tuning on UCF-101 dataset.""" -dist_params = dict(backend="nccl") +dist_params = {"backend": "nccl"} log_level = "INFO" load_from = None resume_from = None @@ -8,130 +8,130 @@ train_cfg = None test_cfg = None -evaluation = dict(interval=10) +evaluation = {"interval": 10} -data = dict( - videos_per_gpu=4, # total batch size 8*4 == 32 - workers_per_gpu=4, - train=dict( - type="TSNDataset", - name="ucf101_train_split1", - data_source=dict( - type="JsonClsDataSource", - ann_file="ucf101/annotations/train_split_1.json", - ), - backend=dict( - type="ZipBackend", - zip_fmt="ucf101/zips/{}.zip", - frame_fmt="img_{:05d}.jpg", - ), - frame_sampler=dict( - type="RandomFrameSampler", - num_clips=1, - clip_len=16, - strides=2, - temporal_jitter=False, - ), - test_mode=False, - transform_cfg=[ - dict(type="GroupScale", scales=[(149, 112), (171, 128), (192, 144)]), - dict(type="GroupFlip", flip_prob=0.35), - dict(type="RandomBrightness", prob=0.20, delta=32), - dict(type="RandomContrast", prob=0.20, delta=0.20), - dict( - type="RandomHueSaturation", - prob=0.20, - hue_delta=12, - saturation_delta=0.1, - ), - dict(type="GroupRandomCrop", out_size=112), - dict( - type="GroupToTensor", - switch_rgb_channels=True, - div255=True, - mean=(0.485, 0.456, 0.406), - std=(0.229, 0.224, 0.225), - ), +data = { + "videos_per_gpu": 4, # total batch size 8*4 == 32 + "workers_per_gpu": 4, + "train": { + "type": "TSNDataset", + "name": "ucf101_train_split1", + "data_source": { + "type": "JsonClsDataSource", + "ann_file": "ucf101/annotations/train_split_1.json", + }, + "backend": { + "type": "ZipBackend", + "zip_fmt": "ucf101/zips/{}.zip", + "frame_fmt": "img_{:05d}.jpg", + }, + "frame_sampler": { + "type": "RandomFrameSampler", + "num_clips": 1, + "clip_len": 16, + "strides": 2, + "temporal_jitter": False, + }, + "test_mode": False, + "transform_cfg": [ + {"type": "GroupScale", "scales": [(149, 112), (171, 128), (192, 144)]}, + {"type": "GroupFlip", "flip_prob": 0.35}, + {"type": "RandomBrightness", "prob": 0.20, "delta": 32}, + {"type": "RandomContrast", "prob": 0.20, "delta": 0.20}, + { + "type": "RandomHueSaturation", + "prob": 0.20, + "hue_delta": 12, + "saturation_delta": 0.1, + }, + {"type": "GroupRandomCrop", "out_size": 112}, + { + "type": "GroupToTensor", + "switch_rgb_channels": True, + "div255": True, + "mean": (0.485, 0.456, 0.406), + "std": (0.229, 0.224, 0.225), + }, ], - ), - val=dict( - type="TSNDataset", - name="ucf101_test_split1", - data_source=dict( - type="JsonClsDataSource", - ann_file="ucf101/annotations/test_split_1.json", - ), - backend=dict( - type="ZipBackend", - zip_fmt="ucf101/zips/{}.zip", - frame_fmt="img_{:05d}.jpg", - ), - frame_sampler=dict( - type="UniformFrameSampler", - num_clips=10, - clip_len=16, - strides=2, - temporal_jitter=False, - ), - test_mode=True, - transform_cfg=[ - dict(type="GroupScale", scales=[(171, 128)]), - dict(type="GroupCenterCrop", out_size=112), - dict( - type="GroupToTensor", - switch_rgb_channels=True, - div255=True, - mean=(0.485, 0.456, 0.406), - std=(0.229, 0.224, 0.225), - ), + }, + "val": { + "type": "TSNDataset", + "name": "ucf101_test_split1", + "data_source": { + "type": "JsonClsDataSource", + "ann_file": "ucf101/annotations/test_split_1.json", + }, + "backend": { + "type": "ZipBackend", + "zip_fmt": "ucf101/zips/{}.zip", + "frame_fmt": "img_{:05d}.jpg", + }, + "frame_sampler": { + "type": "UniformFrameSampler", + "num_clips": 10, + "clip_len": 16, + "strides": 2, + "temporal_jitter": False, + }, + "test_mode": True, + "transform_cfg": [ + {"type": "GroupScale", "scales": [(171, 128)]}, + {"type": "GroupCenterCrop", "out_size": 112}, + { + "type": "GroupToTensor", + "switch_rgb_channels": True, + "div255": True, + "mean": (0.485, 0.456, 0.406), + "std": (0.229, 0.224, 0.225), + }, ], - ), - test=dict( - type="TSNDataset", - name="ucf101_test_split1", - data_source=dict( - type="JsonClsDataSource", - ann_file="ucf101/annotations/test_split_1.json", - ), - backend=dict( - type="ZipBackend", - zip_fmt="ucf101/zips/{}.zip", - frame_fmt="img_{:05d}.jpg", - ), - frame_sampler=dict( - type="UniformFrameSampler", - num_clips=10, - clip_len=16, - strides=2, - temporal_jitter=False, - ), - test_mode=True, - transform_cfg=[ - dict(type="GroupScale", scales=[(171, 128)]), - dict(type="GroupCenterCrop", out_size=112), - dict( - type="GroupToTensor", - switch_rgb_channels=True, - div255=True, - mean=(0.485, 0.456, 0.406), - std=(0.229, 0.224, 0.225), - ), + }, + "test": { + "type": "TSNDataset", + "name": "ucf101_test_split1", + "data_source": { + "type": "JsonClsDataSource", + "ann_file": "ucf101/annotations/test_split_1.json", + }, + "backend": { + "type": "ZipBackend", + "zip_fmt": "ucf101/zips/{}.zip", + "frame_fmt": "img_{:05d}.jpg", + }, + "frame_sampler": { + "type": "UniformFrameSampler", + "num_clips": 10, + "clip_len": 16, + "strides": 2, + "temporal_jitter": False, + }, + "test_mode": True, + "transform_cfg": [ + {"type": "GroupScale", "scales": [(171, 128)]}, + {"type": "GroupCenterCrop", "out_size": 112}, + { + "type": "GroupToTensor", + "switch_rgb_channels": True, + "div255": True, + "mean": (0.485, 0.456, 0.406), + "std": (0.229, 0.224, 0.225), + }, ], - ), -) + }, +} # optimizer total_epochs = 150 -optimizer = dict(type="SGD", lr=0.01, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(grad_clip=dict(max_norm=40, norm_type=2)) +optimizer = {"type": "SGD", "lr": 0.01, "momentum": 0.9, "weight_decay": 5e-4} +optimizer_config = {"grad_clip": {"max_norm": 40, "norm_type": 2}} # learning policy -lr_config = dict(policy="step", step=[60, 120]) -checkpoint_config = dict(interval=1, max_keep_ckpts=1, create_symlink=False) +lr_config = {"policy": "step", "step": [60, 120]} +checkpoint_config = {"interval": 1, "max_keep_ckpts": 1, "create_symlink": False} workflow = [("train", 50)] -log_config = dict( - interval=10, - hooks=[ - dict(type="TextLoggerHook"), - dict(type="TensorboardLoggerHook"), +log_config = { + "interval": 10, + "hooks": [ + {"type": "TextLoggerHook"}, + {"type": "TensorboardLoggerHook"}, ], -) +}