-
Notifications
You must be signed in to change notification settings - Fork 1.3k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
IndexError: index 44 is out of bounds for axis 1 with size 24 #3119
Comments
This is the dataset confiuation:
}
|
Prerequisite
Environment
Package Version Editable project location
addict 2.4.0
aiofiles 23.2.1
aioice 0.9.0
aiortc 1.9.0
albucore 0.0.13
albumentations 1.4.14
aliyun-python-sdk-core 2.15.1
aliyun-python-sdk-kms 2.16.3
altair 5.3.0
annotated-types 0.7.0
anyio 4.4.0
attrs 23.2.0
av 12.3.0
blinker 1.8.2
cachetools 5.4.0
certifi 2022.12.7
cffi 1.16.0
charset-normalizer 2.1.1
chumpy 0.70
click 8.1.7
colorama 0.4.6
contourpy 1.2.1
coverage 7.5.4
crcmod 1.7
cryptography 42.0.8
cycler 0.12.1
Cython 3.0.10
dnspython 2.6.1
eval_type_backport 0.2.0
exceptiongroup 1.2.1
fastapi 0.112.0
ffmpeg 1.4
ffmpy 0.4.0
filelock 3.14.0
flake8 7.1.0
fonttools 4.53.0
fsspec 2024.6.1
gitdb 4.0.11
GitPython 3.1.43
google-crc32c 1.5.0
gradio 4.40.0
gradio_client 1.2.0
h11 0.14.0
httpcore 1.0.5
httpx 0.27.0
huggingface-hub 0.24.5
idna 3.4
ifaddr 0.2.0
imageio 2.34.2
importlib_metadata 7.2.1
importlib_resources 6.4.0
iniconfig 2.0.0
interrogate 1.7.0
isort 4.3.21
Jinja2 3.1.4
jmespath 0.10.0
joblib 1.4.2
json-tricks 3.17.3
jsonlint 0.1
jsonschema 4.23.0
jsonschema-specifications 2023.12.1
kiwisolver 1.4.5
lazy_loader 0.4
Markdown 3.6
markdown-it-py 3.0.0
MarkupSafe 2.1.5
matplotlib 3.9.0
mccabe 0.7.0
mdurl 0.1.2
mmcv 2.1.0
mmdet 3.2.0
mmengine 0.10.4
mmpose 1.3.2 /home/bhoomi/mmpose
model-index 0.1.11
munkres 1.1.4
networkx 3.3
numpy 1.26.3
opencv-python 4.10.0.84
opencv-python-headless 4.10.0.84
opendatalab 0.0.10
openmim 0.3.9
openxlab 0.1.0
ordered-set 4.1.0
orjson 3.10.6
oss2 2.17.0
packaging 24.1
pandas 2.2.2
parameterized 0.9.0
pillow 10.2.0
pip 22.0.2
platformdirs 4.2.2
pluggy 1.5.0
protobuf 5.27.3
py 1.11.0
pyarrow 17.0.0
pyav 12.1.0
pycocotools 2.0.8
pycodestyle 2.12.0
pycparser 2.22
pycryptodome 3.20.0
pydantic 2.7.4
pydantic_core 2.18.4
pydeck 0.9.1
pydub 0.25.1
pyee 11.1.0
pyflakes 3.2.0
Pygments 2.18.0
pylibsrtp 0.10.0
pyOpenSSL 24.2.1
pyparsing 3.1.2
pytest 8.2.2
pytest-runner 6.0.1
python-dateutil 2.9.0.post0
python-multipart 0.0.9
pytz 2023.4
PyYAML 6.0.1
referencing 0.35.1
requests 2.32.3
rich 13.4.2
rpds-py 0.19.1
ruff 0.5.6
scikit-image 0.24.0
scikit-learn 1.5.0
scipy 1.13.1
semantic-version 2.10.0
setuptools 60.2.0
shapely 2.0.4
shellingham 1.5.4
six 1.16.0
smmap 5.0.1
sniffio 1.3.1
starlette 0.37.2
streamlit 1.37.0
streamlit-webrtc 0.47.7
tabulate 0.9.0
tenacity 8.5.0
termcolor 2.4.0
terminaltables 3.1.10
threadpoolctl 3.5.0
tifffile 2024.6.18
toml 0.10.2
tomli 2.0.1
tomlkit 0.12.0
toolz 0.12.1
torch 1.11.0+cu115
torchaudio 0.11.0+cu115
torchvision 0.12.0+cu115
tornado 6.4.1
tqdm 4.65.2
typer 0.12.3
typing_extensions 4.9.0
tzdata 2024.1
urllib3 2.2.2
uvicorn 0.30.5
watchdog 4.0.1
websockets 12.0
xdoctest 1.1.5
xtcocotools 1.14.3
yapf 0.40.2
zipp 3.19.2
Reproduces the problem - code sample
base = ['../../../base/default_runtime.py']
runtime
max_epochs = 270
stage2_num_epochs = 30
base_lr = 4e-3
train_cfg = dict(max_epochs=max_epochs, val_interval=10)
randomness = dict(seed=21)
optimizer
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=base_lr, weight_decay=0.05),
paramwise_cfg=dict(
norm_decay_mult=0, bias_decay_mult=0, bypass_duplicate=True))
learning rate
param_scheduler = [
dict(
type='LinearLR',
start_factor=1.0e-5,
by_epoch=False,
begin=0,
end=1000),
dict(
type='CosineAnnealingLR',
eta_min=base_lr * 0.05,
begin=max_epochs // 2,
end=max_epochs,
T_max=max_epochs // 2,
by_epoch=True,
convert_to_iter_based=True),
]
automatically scaling LR based on the actual training batch size
auto_scale_lr = dict(base_batch_size=512)
codec settings
codec = dict(
type='SimCCLabel',
input_size=(192, 256),
sigma=(4.9, 5.66),
simcc_split_ratio=2.0,
normalize=False,
use_dark=False)
model settings
model = dict(
type='TopdownPoseEstimator',
data_preprocessor=dict(
type='PoseDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True
),
backbone=dict(
scope='mmdet',
type='CSPNeXt',
arch='P5',
expand_ratio=0.5,
deepen_factor=0.67,
widen_factor=0.75,
out_indices=(4, ),
channel_attention=True,
norm_cfg=dict(type='SyncBN'),
act_cfg=dict(type='SiLU'),
# Remove init_cfg for training from scratch
init_cfg=None # Ensure no pre-trained weights are used
),
head=dict(
type='RTMCCHead',
in_channels=768,
out_channels=64,
input_size=codec['input_size'],
in_featuremap_size=tuple([s // 32 for s in codec['input_size']]),
simcc_split_ratio=codec['simcc_split_ratio'],
final_layer_kernel_size=7,
gau_cfg=dict(
hidden_dims=256,
s=128,
expansion_factor=2,
dropout_rate=0.,
drop_path=0.,
act_fn='SiLU',
use_rel_bias=False,
pos_enc=False
),
loss=dict(
type='KLDiscretLoss',
use_target_weight=True,
beta=10.,
label_softmax=True
),
decoder=codec
),
test_cfg=dict(
flip_test=True
)
)
base dataset settings
dataset_type = 'CocoDataset'
data_mode = 'topdown'
data_root = 'data/coco/'
backend_args = dict(backend='local')
backend_args = dict(
backend='petrel',
path_mapping=dict({
f'{data_root}': 's3://openmmlab/datasets/detection/coco/',
f'{data_root}': 's3://openmmlab/datasets/detection/coco/'
}))
pipelines
train_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform', scale_factor=[0.6, 1.4], rotate_factor=80),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=1.0),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
val_pipeline = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='PackPoseInputs')
]
train_pipeline_stage2 = [
dict(type='LoadImage', backend_args=backend_args),
dict(type='GetBBoxCenterScale'),
dict(type='RandomFlip', direction='horizontal'),
dict(type='RandomHalfBody'),
dict(
type='RandomBBoxTransform',
shift_factor=0.,
scale_factor=[0.75, 1.25],
rotate_factor=60),
dict(type='TopdownAffine', input_size=codec['input_size']),
dict(type='mmdet.YOLOXHSVRandomAug'),
dict(
type='Albumentation',
transforms=[
dict(type='Blur', p=0.1),
dict(type='MedianBlur', p=0.1),
dict(
type='CoarseDropout',
max_holes=1,
max_height=0.4,
max_width=0.4,
min_holes=1,
min_height=0.2,
min_width=0.2,
p=0.5),
]),
dict(type='GenerateTarget', encoder=codec),
dict(type='PackPoseInputs')
]
data loaders
train_dataloader = dict(
batch_size=64,
num_workers=8,
persistent_workers=True,
sampler=dict(type='DefaultSampler', shuffle=True),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/coco_wholebody_train_v1.0.json',
val_dataloader = dict(
batch_size=32,
num_workers= 8,
persistent_workers=True,
drop_last=False,
sampler=dict(type='DefaultSampler', shuffle=False, round_up=False),
dataset=dict(
type=dataset_type,
data_root=data_root,
data_mode=data_mode,
ann_file='annotations/coco_wholebody_val_v1.0.json',
data_prefix=dict(img='val2017/'),
metainfo=dict(from_file='configs/base/datasets/custom.py'),
test_mode=True,
bbox_file='data/coco/person_detection_results/'
'COCO_val2017_detections_AP_H_56_person.json',
pipeline=val_pipeline,
))
test_dataloader = val_dataloader
hooks
default_hooks = dict(
checkpoint=dict(
save_best='coco-wholebody/AP', rule='greater', max_keep_ckpts=1))
custom_hooks = [
dict(
type='EMAHook',
ema_type='ExpMomentumEMA',
momentum=0.0002,
update_buffers=True,
priority=49),
dict(
type='mmdet.PipelineSwitchHook',
switch_epoch=max_epochs - stage2_num_epochs,
switch_pipeline=train_pipeline_stage2)
]
evaluators
val_evaluator = dict(
type='CocoWholeBodyMetric',
ann_file=data_root + 'annotations/coco_wholebody_val_v1.0.json')
test_evaluator = val_evaluator
Reproduces the problem - command or script
python3 tools/train.py configs/wholebody_2d_keypoint/rtmpose/coco-wholebody/custom.py
Reproduces the problem - error message
loading annotations into memory...
Done (t=0.80s)
creating index...
index created!
09/04 14:30:45 - mmengine - WARNING - "FileClient" will be deprecated in future. Please use io functions in https://mmengine.readthedocs.io/en/latest/api/fileio.html#file-io
09/04 14:30:45 - mmengine - WARNING - "HardDiskBackend" is the alias of "LocalBackend" and the former will be deprecated in future.
09/04 14:30:45 - mmengine - INFO - Checkpoints will be saved to /home/bhoomi/mmpose/work_dirs/custom.
Traceback (most recent call last):
File "/home/bhoomi/mmpose/tools/train.py", line 162, in
main()
File "/home/bhoomi/mmpose/tools/train.py", line 158, in main
runner.train()
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmengine/runner/runner.py", line 1777, in train
model = self.train_loop.run() # type: ignore
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmengine/runner/loops.py", line 96, in run
self.run_epoch()
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmengine/runner/loops.py", line 112, in run_epoch
for idx, data_batch in enumerate(self.dataloader):
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/utils/data/dataloader.py", line 530, in next
data = self._next_data()
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/utils/data/dataloader.py", line 1224, in _next_data
return self._process_data(data)
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/utils/data/dataloader.py", line 1250, in _process_data
data.reraise()
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/_utils.py", line 457, in reraise
raise exception
IndexError: Caught IndexError in DataLoader worker process 0.
Original Traceback (most recent call last):
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/utils/data/_utils/worker.py", line 287, in _worker_loop
data = fetcher.fetch(index)
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py", line 49, in fetch
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/bhoomi/pose/lib/python3.10/site-packages/torch/utils/data/_utils/fetch.py", line 49, in
data = [self.dataset[idx] for idx in possibly_batched_index]
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 410, in getitem
data = self.prepare_data(idx)
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 115, in wrapper
return old_func(obj, *args, **kwargs)
File "/home/bhoomi/mmpose/mmpose/datasets/datasets/base/base_coco_style_dataset.py", line 170, in prepare_data
return self.pipeline(data_info)
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmengine/dataset/base_dataset.py", line 60, in call
data = t(data)
File "/home/bhoomi/pose/lib/python3.10/site-packages/mmcv/transforms/base.py", line 12, in call
return self.transform(results)
File "/home/bhoomi/mmpose/mmpose/datasets/transforms/common_transforms.py", line 239, in transform
keypoints, keypoints_visible = flip_keypoints(
File "/home/bhoomi/mmpose/mmpose/structures/keypoint/transforms.py", line 52, in flip_keypoints
keypoints = keypoints.take(flip_indices, axis=ndim - 2)
IndexError: index 44 is out of bounds for axis 1 with size 24
Additional information
No response
The text was updated successfully, but these errors were encountered: