diff --git a/.github/workflows/citest.yaml b/.github/workflows/citest.yaml index 2f5f0d87..9141b154 100644 --- a/.github/workflows/citest.yaml +++ b/.github/workflows/citest.yaml @@ -39,7 +39,7 @@ concurrency: jobs: ut-torch180: # The type of runner that the job will run on - runs-on: [unittest-t4] + runs-on: [self-hosted] steps: - name: Checkout uses: actions/checkout@v2 @@ -64,22 +64,27 @@ jobs: # pip install -r requirements.txt #run test export CUDA_VISIBLE_DEVICES=7 - source ~/workspace/anaconda2/etc/profile.d/conda.sh + source ~/anaconda3/etc/profile.d/conda.sh conda activate easycv_torch1.8.0 # pip install pai-easycv pip uninstall -y pai-easycv + python setup.py install + python setup.py sdist bdist_wheel # move source code, ensure import easycv from site-package mv ./easycv ./easycv_src - PYTHONPATH=. python tests/run.py + PYTHONPATH=. python tests/run.py --skip_dir tests/test_toolkit/modelscope + + conda activate easycv_torch1.8.0_py37 + PYTHONPATH=. python tests/run.py --test_dir tests/test_toolkit/modelscope # blade test env will be updated! we do not support test with trt_efficient_nms ut-torch181-blade: # The type of runner that the job will run on - runs-on: [unittest-t4] + runs-on: [self-hosted] steps: - name: Checkout uses: actions/checkout@v2 @@ -105,7 +110,7 @@ jobs: # pip install -r requirements.txt #run test export CUDA_VISIBLE_DEVICES=6 - source ~/workspace/anaconda2/etc/profile.d/conda.sh + source ~/anaconda3/etc/profile.d/conda.sh conda activate torch1.8.1_blade - PYTHONPATH=. python tests/predictors/test_detector_blade.py - PYTHONPATH=. python tests/apis/test_export_blade.py + PYTHONPATH=. python tests/test_predictors/test_detector_blade.py + PYTHONPATH=. python tests/test_apis/test_export_blade.py diff --git a/README.md b/README.md index 96426319..73df5bc8 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,11 @@ EasyCV is an all-in-one computer vision toolbox based on PyTorch, mainly focuses ## What's New +[🔥 2023.05.09] + +* 09/05/2023 EasyCV v0.11.0 was released. +- Support EasyCV as a plug-in for [modelscope](https://github.com/modelscope/modelscope. + [🔥 2023.03.06] * 06/03/2023 EasyCV v0.10.0 was released. diff --git a/README_zh-CN.md b/README_zh-CN.md index 749174e8..405fd3fd 100644 --- a/README_zh-CN.md +++ b/README_zh-CN.md @@ -45,6 +45,11 @@ EasyCV是一个涵盖多个领域的基于Pytorch的计算机视觉工具箱, ## 最新进展 +[🔥 2023.05.09] + +* 09/05/2023 EasyCV v0.11.0 was released. +- 支持EasyCV作为[modelscope](https://github.com/modelscope/modelscope)插件接入模型. + [🔥 2023.03.06] * 06/03/2023 EasyCV v0.10.0 was released. diff --git a/configs/classification/imagenet/common/dataset/imagenet_classification.py b/configs/classification/imagenet/common/dataset/imagenet_classification.py index 55ef4398..c6d631bc 100644 --- a/configs/classification/imagenet/common/dataset/imagenet_classification.py +++ b/configs/classification/imagenet/common/dataset/imagenet_classification.py @@ -153,7 +153,7 @@ data=data['val'], dist_eval=True, evaluators=[ - dict(type='ClsEvaluator', topk=(1, 5), class_list=class_list) + dict(type='ClsEvaluator', topk=(1, ), class_list=class_list) ], ) ] diff --git a/configs/detection/fcos/coco_detection.py b/configs/detection/fcos/coco_detection.py index 229711f7..3de6d4c2 100644 --- a/configs/detection/fcos/coco_detection.py +++ b/configs/detection/fcos/coco_detection.py @@ -102,6 +102,7 @@ eval_pipelines = [ dict( mode='test', + data=data['val'], evaluators=[ dict(type='CocoDetectionEvaluator', classes=CLASSES), ], diff --git a/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12.py b/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12.py index 15d7f225..748acbe2 100644 --- a/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12.py +++ b/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12.py @@ -1,4 +1,55 @@ -_base_ = ['./fcn_r50-d8_512x512_8xb4_60e_voc12aug.py'] +_base_ = ['configs/base.py'] + +# model settings +num_classes = 21 + +# norm_cfg = dict(type='SyncBN', requires_grad=True) # multi gpus +norm_cfg = dict(type='BN', requires_grad=True) + +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(1, 2, 3, 4), + dilations=(1, 1, 2, 4), + strides=(1, 2, 1, 1), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True, + ), + decode_head=dict( + type='FCNHead', + in_channels=2048, + in_index=3, + channels=512, + num_convs=2, + concat_input=True, + dropout_ratio=0.1, + num_classes=num_classes, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=num_classes, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) CLASSES = [ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', @@ -8,21 +59,102 @@ # dataset settings dataset_type = 'SegDataset' -data_root = 'data/VOCdevkit/VOC2012/' +data_type = 'SegSourceRaw' +data_root = 'data/VOCdevkit/VOC2012' train_img_root = data_root + 'JPEGImages' train_label_root = data_root + 'SegmentationClass' train_list_file = data_root + 'ImageSets/Segmentation/train.txt' +val_img_root = data_root + 'JPEGImages' +val_label_root = data_root + 'SegmentationClass' +val_list_file = data_root + 'ImageSets/Segmentation/val.txt' + +test_batch_size = 2 + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (512, 512) +train_pipeline = [ + dict(type='MMResize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='SegRandomCrop', crop_size=(512, 512), cat_max_ratio=0.75), + dict(type='MMRandomFlip', flip_ratio=0.5), + dict(type='MMPhotoMetricDistortion'), + dict(type='MMNormalize', **img_norm_cfg), + dict(type='MMPad', size=(512, 512)), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg'], + meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', + 'pad_shape', 'scale_factor', 'flip', 'flip_direction', + 'img_norm_cfg')), +] +test_pipeline = [ + dict( + type='MMMultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='MMResize', keep_ratio=True), + dict(type='MMRandomFlip'), + dict(type='MMNormalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict( + type='Collect', + keys=['img'], + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg')), + ]) +] data = dict( + imgs_per_gpu=4, + workers_per_gpu=4, train=dict( type=dataset_type, ignore_index=255, data_source=dict( - _delete_=True, - type='SegSourceRaw', + type=data_type, img_root=train_img_root, label_root=train_label_root, split=train_list_file, classes=CLASSES), - )) + pipeline=train_pipeline), + val=dict( + imgs_per_gpu=test_batch_size, + ignore_index=255, + type=dataset_type, + data_source=dict( + type=data_type, + img_root=val_img_root, + label_root=val_label_root, + split=val_list_file, + classes=CLASSES, + ), + pipeline=test_pipeline)) + +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() + +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=True) + +# runtime settings +total_epochs = 60 +checkpoint_config = dict(interval=1) +eval_config = dict(interval=1, gpu_collect=False) +eval_pipelines = [ + dict( + mode='test', + evaluators=[ + dict( + type='SegmentationEvaluator', + classes=CLASSES, + metric_names=['mIoU']) + ], + ) +] diff --git a/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12aug.py b/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12aug.py index 351ebce2..80580aaa 100644 --- a/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12aug.py +++ b/configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12aug.py @@ -1,4 +1,4 @@ -_base_ = ['configs/base.py'] +_base_ = 'configs/base.py' CLASSES = [ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', diff --git a/configs/segmentation/segformer/segformer_b5_coco.py b/configs/segmentation/segformer/segformer_b5_coco.py index 83ccd9a3..5f96c9f8 100644 --- a/configs/segmentation/segformer/segformer_b5_coco.py +++ b/configs/segmentation/segformer/segformer_b5_coco.py @@ -1,5 +1,36 @@ _base_ = './segformer_b0_coco.py' +CLASSES = [ + 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', + 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', + 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', + 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', + 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', + 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', + 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', + 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', + 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', + 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', + 'hair drier', 'toothbrush', 'banner', 'blanket', 'branch', 'bridge', + 'building-other', 'bush', 'cabinet', 'cage', 'cardboard', 'carpet', + 'ceiling-other', 'ceiling-tile', 'cloth', 'clothes', 'clouds', 'counter', + 'cupboard', 'curtain', 'desk-stuff', 'dirt', 'door-stuff', 'fence', + 'floor-marble', 'floor-other', 'floor-stone', 'floor-tile', 'floor-wood', + 'flower', 'fog', 'food-other', 'fruit', 'furniture-other', 'grass', + 'gravel', 'ground-other', 'hill', 'house', 'leaves', 'light', 'mat', + 'metal', 'mirror-stuff', 'moss', 'mountain', 'mud', 'napkin', 'net', + 'paper', 'pavement', 'pillow', 'plant-other', 'plastic', 'platform', + 'playingfield', 'railing', 'railroad', 'river', 'road', 'rock', 'roof', + 'rug', 'salad', 'sand', 'sea', 'shelf', 'sky-other', 'skyscraper', 'snow', + 'solid-other', 'stairs', 'stone', 'straw', 'structural-other', 'table', + 'tent', 'textile-other', 'towel', 'tree', 'vegetable', 'wall-brick', + 'wall-concrete', 'wall-other', 'wall-panel', 'wall-stone', 'wall-tile', + 'wall-wood', 'water-other', 'waterdrops', 'window-blind', 'window-other', + 'wood' +] + model = dict( pretrained= 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b5_20220624-658746d9.pth', @@ -15,9 +46,11 @@ img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (2048, 640) crop_size = (640, 640) train_pipeline = [ - dict(type='MMResize', img_scale=(2048, 640), ratio_range=(0.5, 2.0)), + dict(type='MMResize', img_scale=img_scale, ratio_range=(0.5, 2.0)), dict(type='SegRandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='MMRandomFlip', flip_ratio=0.5), dict(type='MMPhotoMetricDistortion'), @@ -34,7 +67,7 @@ test_pipeline = [ dict( type='MMMultiScaleFlipAug', - img_scale=(2048, 640), + img_scale=img_scale, # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ @@ -50,3 +83,50 @@ 'flip_direction', 'img_norm_cfg')), ]) ] + +data_root = './data/coco_stuff164k/' +# dataset settings +data_type = 'SegSourceRaw' +data_root = 'data/VOCdevkit/VOC2012' + +train_img_root = data_root + 'JPEGImages' +train_label_root = data_root + 'SegmentationClass' +train_list_file = data_root + 'ImageSets/Segmentation/train.txt' + +val_img_root = data_root + 'JPEGImages' +val_label_root = data_root + 'SegmentationClass' +val_list_file = data_root + 'ImageSets/Segmentation/val.txt' + +test_batch_size = 2 + +data = dict( + imgs_per_gpu=2, + workers_per_gpu=2, + train=dict( + type='SegDataset', + ignore_index=255, + data_source=dict( + type=data_type, + img_suffix='.jpg', + label_suffix='.png', + img_root=train_img_root, + label_root=train_label_root, + split=train_list_file, + classes=CLASSES, + ), + pipeline=train_pipeline), + val=dict( + imgs_per_gpu=test_batch_size, + ignore_index=255, + type='SegDataset', + data_source=dict( + type=data_type, + img_suffix='.jpg', + label_suffix='.png', + img_root=val_img_root, + label_root=val_label_root, + split=val_list_file, + classes=CLASSES, + ), + pipeline=test_pipeline), +) diff --git a/configs/segmentation/upernet/upernet_r50_512x512_8xb4_60e_voc12.py b/configs/segmentation/upernet/upernet_r50_512x512_8xb4_60e_voc12.py index 96621d88..de26afe2 100644 --- a/configs/segmentation/upernet/upernet_r50_512x512_8xb4_60e_voc12.py +++ b/configs/segmentation/upernet/upernet_r50_512x512_8xb4_60e_voc12.py @@ -1,4 +1,4 @@ -_base_ = ['./upernet_r50_512x512_8xb4_60e_voc12aug.py'] +_base_ = ['configs/base.py'] CLASSES = [ 'background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', @@ -6,23 +6,153 @@ 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] +# model settings +num_classes = 21 +# norm_cfg = dict(type='SyncBN', requires_grad=True) # multi gpus +norm_cfg = dict(type='BN', requires_grad=True) + +model = dict( + type='EncoderDecoder', + pretrained='open-mmlab://resnet50_v1c', + backbone=dict( + type='ResNetV1c', + depth=50, + num_stages=4, + out_indices=(1, 2, 3, 4), + dilations=(1, 1, 1, 1), + strides=(1, 2, 2, 2), + norm_cfg=norm_cfg, + norm_eval=False, + style='pytorch', + contract_dilation=True, + ), + decode_head=dict( + type='UPerHead', + in_channels=[256, 512, 1024, 2048], + in_index=[0, 1, 2, 3], + pool_scales=(1, 2, 3, 6), + channels=512, + dropout_ratio=0.1, + num_classes=num_classes, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), + auxiliary_head=dict( + type='FCNHead', + in_channels=1024, + in_index=2, + channels=256, + num_convs=1, + concat_input=False, + dropout_ratio=0.1, + num_classes=21, + norm_cfg=norm_cfg, + align_corners=False, + loss_decode=dict( + type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), + # model training and testing settings + train_cfg=dict(), + test_cfg=dict(mode='whole')) + # dataset settings dataset_type = 'SegDataset' -data_root = 'data/VOCdevkit/VOC2012/' +data_type = 'SegSourceRaw' +data_root = 'data/VOCdevkit/VOC2012' train_img_root = data_root + 'JPEGImages' train_label_root = data_root + 'SegmentationClass' train_list_file = data_root + 'ImageSets/Segmentation/train.txt' +val_img_root = data_root + 'JPEGImages' +val_label_root = data_root + 'SegmentationClass' +val_list_file = data_root + 'ImageSets/Segmentation/val.txt' + +test_batch_size = 2 + +img_norm_cfg = dict( + mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) + +img_scale = (512, 512) +train_pipeline = [ + dict(type='MMResize', img_scale=img_scale, ratio_range=(0.5, 2.0)), + dict(type='SegRandomCrop', crop_size=(512, 512), cat_max_ratio=0.75), + dict(type='MMRandomFlip', flip_ratio=0.5), + dict(type='MMPhotoMetricDistortion'), + dict(type='MMNormalize', **img_norm_cfg), + dict(type='MMPad', size=(512, 512)), + dict(type='DefaultFormatBundle'), + dict( + type='Collect', + keys=['img', 'gt_semantic_seg'], + meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', + 'pad_shape', 'scale_factor', 'flip', 'flip_direction', + 'img_norm_cfg')), +] +test_pipeline = [ + dict( + type='MMMultiScaleFlipAug', + img_scale=img_scale, + # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], + flip=False, + transforms=[ + dict(type='MMResize', keep_ratio=True), + dict(type='MMRandomFlip'), + dict(type='MMNormalize', **img_norm_cfg), + dict(type='ImageToTensor', keys=['img']), + dict( + type='Collect', + keys=['img'], + meta_keys=('filename', 'ori_filename', 'ori_shape', + 'img_shape', 'pad_shape', 'scale_factor', 'flip', + 'flip_direction', 'img_norm_cfg')), + ]) +] data = dict( + imgs_per_gpu=4, + workers_per_gpu=4, train=dict( type=dataset_type, ignore_index=255, data_source=dict( - _delete_=True, - type='SegSourceRaw', + type=data_type, img_root=train_img_root, label_root=train_label_root, split=train_list_file, classes=CLASSES), - )) + pipeline=train_pipeline), + val=dict( + imgs_per_gpu=test_batch_size, + ignore_index=255, + type=dataset_type, + data_source=dict( + type=data_type, + img_root=val_img_root, + label_root=val_label_root, + split=val_list_file, + classes=CLASSES, + ), + pipeline=test_pipeline)) + +# optimizer +optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) +optimizer_config = dict() + +# learning policy +lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=True) + +# runtime settings +total_epochs = 60 +checkpoint_config = dict(interval=1) +eval_config = dict(interval=1, gpu_collect=False) +eval_pipelines = [ + dict( + mode='test', + evaluators=[ + dict( + type='SegmentationEvaluator', + classes=CLASSES, + metric_names=['mIoU']) + ], + ) +] diff --git a/docs/source/change_log.md b/docs/source/change_log.md index e372afac..57604cfc 100644 --- a/docs/source/change_log.md +++ b/docs/source/change_log.md @@ -1,3 +1,8 @@ +# v 0.11.0 (09/05/2023) + +## Highlights +- Support EasyCV as a plug-in for [modelscope](https://github.com/modelscope/modelscope. + # v 0.10.0 (06/03/2023) ## Highlights diff --git a/easycv/__init__.py b/easycv/__init__.py index 228f3a51..d2a58bd3 100644 --- a/easycv/__init__.py +++ b/easycv/__init__.py @@ -1,4 +1,9 @@ # Copyright (c) Alibaba, Inc. and its affiliates. +# flake8: noqa +# isort:skip_file +import os +os.environ['SETUPTOOLS_USE_DISTUTILS'] = 'stdlib' + from .version import __version__, short_version __all__ = ['__version__', 'short_version'] diff --git a/easycv/core/evaluation/classification_eval.py b/easycv/core/evaluation/classification_eval.py index 4b8f6e33..804b0116 100644 --- a/easycv/core/evaluation/classification_eval.py +++ b/easycv/core/evaluation/classification_eval.py @@ -146,10 +146,14 @@ def _evaluate_impl(self, predictions, gt_labels): matrix = confusion_matrix( valid_true, valid_pred, labels=self.class_list) - print_log( - 'recall:{}\nprecision:{}\nattend:{}\nTP:{}\nFN:{}\nFP:{}\nTN:{}\nrecall/mean:{}\nprecision/mean:{}\nF1/mean:{}\nconfusion_matrix:{}\n' - .format(recall, precision, attend, tp, fn, fp, tn, - recall_mean, precision_mean, f1_mean, matrix)) + # print_log( + # 'recall:{}\nprecision:{}\nattend:{}\nTP:{}\nFN:{}\nFP:{}\nTN:{}\nrecall/mean:{}\nprecision/mean:{}\nF1/mean:{}\nconfusion_matrix:{}\n' + # .format(recall, precision, attend, tp, fn, fp, tn, + # recall_mean, precision_mean, f1_mean, matrix)) + + eval_res[key] = \ + 'recall:{}\nprecision:{}\nattend:{}\nTP:{}\nFN:{}\nFP:{}\nTN:{}\nrecall/mean:{}\nprecision/mean:{}\nF1/mean:{}\nconfusion_matrix:{}\n'\ + .format(recall, precision, attend, tp, fn, fp, tn, recall_mean, precision_mean, f1_mean, matrix.tolist()) return eval_res diff --git a/easycv/datasets/loader/sampler.py b/easycv/datasets/loader/sampler.py index 61a88eff..38ba42d2 100644 --- a/easycv/datasets/loader/sampler.py +++ b/easycv/datasets/loader/sampler.py @@ -246,7 +246,7 @@ def set_uniform_indices(self, labels, num_classes): l, size_per_label, replace=(len(l) <= size_per_label))) indices = np.array(indices) np.random.shuffle(indices) - indices = indices[:N].astype(np.int).tolist() + indices = indices[:N].astype(np.int64).tolist() # add extra samples to make it evenly divisible assert len(indices) <= self.total_size, \ @@ -438,7 +438,7 @@ def set_uniform_indices(self, labels, num_classes): l, size_per_label, replace=(len(l) <= size_per_label))) indices = np.array(indices) np.random.shuffle(indices) - indices = indices[:N].astype(np.int) + indices = indices[:N].astype(np.int64) # repeat all_size = self.total_size * self.world_size indices = indices[:all_size] diff --git a/easycv/datasets/video_recognition/pipelines/loading.py b/easycv/datasets/video_recognition/pipelines/loading.py index 30465b86..4f9aa42b 100644 --- a/easycv/datasets/video_recognition/pipelines/loading.py +++ b/easycv/datasets/video_recognition/pipelines/loading.py @@ -161,7 +161,7 @@ def _get_train_clips(self, num_frames): ratio = (num_frames - ori_clip_len + 1.0) / self.num_clips clip_offsets = np.around(np.arange(self.num_clips) * ratio) else: - clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + clip_offsets = np.zeros((self.num_clips, ), dtype=np.int64) return clip_offsets @@ -180,11 +180,11 @@ def _get_test_clips(self, num_frames): avg_interval = (num_frames - ori_clip_len + 1) / float(self.num_clips) if num_frames > ori_clip_len - 1: base_offsets = np.arange(self.num_clips) * avg_interval - clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int) + clip_offsets = (base_offsets + avg_interval / 2.0).astype(np.int64) if self.twice_sample: clip_offsets = np.concatenate([clip_offsets, base_offsets]) else: - clip_offsets = np.zeros((self.num_clips, ), dtype=np.int) + clip_offsets = np.zeros((self.num_clips, ), dtype=np.int64) return clip_offsets def _sample_clips(self, num_frames): @@ -259,7 +259,7 @@ def __call__(self, results): start_index = results['start_index'] frame_inds = np.concatenate(frame_inds) + start_index - results['frame_inds'] = frame_inds.astype(np.int) + results['frame_inds'] = frame_inds.astype(np.int64) results['clip_len'] = self.clip_len results['frame_interval'] = self.frame_interval results['num_clips'] = self.num_clips diff --git a/easycv/datasets/video_recognition/pipelines/pose_transform.py b/easycv/datasets/video_recognition/pipelines/pose_transform.py index 8a1521b8..4ce81616 100644 --- a/easycv/datasets/video_recognition/pipelines/pose_transform.py +++ b/easycv/datasets/video_recognition/pipelines/pose_transform.py @@ -34,7 +34,7 @@ def __call__(self, results): inds = np.arange(start, start + self.clip_len) inds = np.mod(inds, num_frames) - results['frame_inds'] = inds.astype(np.int) + results['frame_inds'] = inds.astype(np.int64) results['clip_len'] = self.clip_len results['frame_interval'] = None results['num_clips'] = self.num_clips diff --git a/easycv/file/__init__.py b/easycv/file/__init__.py index 185eb34d..4634b13c 100644 --- a/easycv/file/__init__.py +++ b/easycv/file/__init__.py @@ -1,4 +1,4 @@ from .file_io import IO -from .utils import is_oss_path +from .utils import get_oss_config, is_oss_path io = IO() diff --git a/easycv/models/ocr/postprocess/db_postprocess.py b/easycv/models/ocr/postprocess/db_postprocess.py index 281c9cda..cc923588 100644 --- a/easycv/models/ocr/postprocess/db_postprocess.py +++ b/easycv/models/ocr/postprocess/db_postprocess.py @@ -135,10 +135,10 @@ def box_score_fast(self, bitmap, _box): ''' h, w = bitmap.shape[:2] box = _box.copy() - xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int), 0, w - 1) - xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int), 0, w - 1) - ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int), 0, h - 1) - ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int), 0, h - 1) + xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int32), 0, w - 1) + xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int32), 0, w - 1) + ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int32), 0, h - 1) + ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int32), 0, h - 1) mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8) box[:, 0] = box[:, 0] - xmin diff --git a/easycv/predictors/segmentation.py b/easycv/predictors/segmentation.py index 14ad6c61..b0100aa0 100644 --- a/easycv/predictors/segmentation.py +++ b/easycv/predictors/segmentation.py @@ -156,7 +156,7 @@ def process_single(self, inputs): ids = ids[legal_indices] labels = np.array([id % 1000 for id in ids], dtype=np.int64) segms = (pan_results[None] == ids[:, None, None]) - masks = [it.astype(np.int) for it in segms] + masks = [it.astype(np.int32) for it in segms] labels_txt = np.array(self.classes)[labels].tolist() output['masks'] = masks diff --git a/easycv/thirdparty/mot/bytetrack/matching.py b/easycv/thirdparty/mot/bytetrack/matching.py index 6b33cbd6..301740ac 100644 --- a/easycv/thirdparty/mot/bytetrack/matching.py +++ b/easycv/thirdparty/mot/bytetrack/matching.py @@ -25,7 +25,6 @@ import cv2 import numpy as np import scipy -from cython_bbox import bbox_overlaps as bbox_ious from scipy.spatial.distance import cdist from easycv.thirdparty.mot.bytetrack import kalman_filter @@ -91,6 +90,8 @@ def ious(atlbrs, btlbrs): if ious.size == 0: return ious + from cython_bbox import bbox_overlaps as bbox_ious + ious = bbox_ious( np.ascontiguousarray(atlbrs, dtype=np.float), np.ascontiguousarray(btlbrs, dtype=np.float)) diff --git a/easycv/toolkit/modelscope/__init__.py b/easycv/toolkit/modelscope/__init__.py new file mode 100644 index 00000000..345d722b --- /dev/null +++ b/easycv/toolkit/modelscope/__init__.py @@ -0,0 +1 @@ +from . import models, msdatasets, pipelines, trainers diff --git a/easycv/toolkit/modelscope/metainfo.py b/easycv/toolkit/modelscope/metainfo.py new file mode 100644 index 00000000..535dc97e --- /dev/null +++ b/easycv/toolkit/modelscope/metainfo.py @@ -0,0 +1,28 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.metainfo import CustomDatasets, Models, Pipelines + + +class EasyCVModels(Models): + yolox = 'YOLOX' + segformer = 'Segformer' + hand_2d_keypoints = 'HRNet-Hand2D-Keypoints' + image_object_detection_auto = 'image-object-detection-auto' + dino = 'DINO' + + +class EasyCVPipelines(Pipelines): + easycv_detection = 'easycv-detection' + easycv_segmentation = 'easycv-segmentation' + image_panoptic_segmentation_easycv = 'image-panoptic-segmentation-easycv' + + +class EasyCVCustomDatasets(CustomDatasets): + """ Names for different datasets. + """ + ClsDataset = 'ClsDataset' + Face2dKeypointsDataset = 'FaceKeypointDataset' + HandCocoWholeBodyDataset = 'HandCocoWholeBodyDataset' + HumanWholeBodyKeypointDataset = 'WholeBodyCocoTopDownDataset' + SegDataset = 'SegDataset' + DetDataset = 'DetDataset' + DetImagesMixDataset = 'DetImagesMixDataset' diff --git a/easycv/toolkit/modelscope/models/__init__.py b/easycv/toolkit/modelscope/models/__init__.py new file mode 100644 index 00000000..43ba0658 --- /dev/null +++ b/easycv/toolkit/modelscope/models/__init__.py @@ -0,0 +1,3 @@ +from . import (face_2d_keypoints, hand_2d_keypoints, human_wholebody_keypoints, + image_panoptic_segmentation, image_semantic_segmentation, + object_detection) diff --git a/easycv/toolkit/modelscope/models/base.py b/easycv/toolkit/modelscope/models/base.py new file mode 100644 index 00000000..854ab715 --- /dev/null +++ b/easycv/toolkit/modelscope/models/base.py @@ -0,0 +1,25 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.base import TorchModel + +from easycv.models.base import BaseModel +from easycv.utils.ms_utils import EasyCVMeta + + +class EasyCVBaseModel(BaseModel, TorchModel): + """Base model for EasyCV.""" + + def __init__(self, model_dir=None, args=(), kwargs={}): + kwargs.pop(EasyCVMeta.ARCH, None) # pop useless keys + BaseModel.__init__(self) + TorchModel.__init__(self, model_dir=model_dir) + + def forward(self, img, mode='train', **kwargs): + if self.training: + losses = self.forward_train(img, **kwargs) + loss, log_vars = self._parse_losses(losses) + return dict(loss=loss, log_vars=log_vars) + else: + return self.forward_test(img, **kwargs) + + def __call__(self, *args, **kwargs): + return self.forward(*args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/face_2d_keypoints/__init__.py b/easycv/toolkit/modelscope/models/face_2d_keypoints/__init__.py new file mode 100644 index 00000000..c60f2c4b --- /dev/null +++ b/easycv/toolkit/modelscope/models/face_2d_keypoints/__init__.py @@ -0,0 +1,3 @@ +from .face_2d_keypoints_align import Face2DKeypoints + +__all__ = ['Face2DKeypoints'] diff --git a/easycv/toolkit/modelscope/models/face_2d_keypoints/face_2d_keypoints_align.py b/easycv/toolkit/modelscope/models/face_2d_keypoints/face_2d_keypoints_align.py new file mode 100644 index 00000000..344faf98 --- /dev/null +++ b/easycv/toolkit/modelscope/models/face_2d_keypoints/face_2d_keypoints_align.py @@ -0,0 +1,16 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.face.face_keypoint import FaceKeypoint +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.face_2d_keypoints, module_name=Models.face_2d_keypoints) +class Face2DKeypoints(EasyCVBaseModel, FaceKeypoint): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + FaceKeypoint.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/hand_2d_keypoints/__init__.py b/easycv/toolkit/modelscope/models/hand_2d_keypoints/__init__.py new file mode 100644 index 00000000..aaa7d99d --- /dev/null +++ b/easycv/toolkit/modelscope/models/hand_2d_keypoints/__init__.py @@ -0,0 +1,3 @@ +from .hand_2d_keypoints import Hand2dKeyPoints + +__all__ = ['Hand2dKeyPoints'] diff --git a/easycv/toolkit/modelscope/models/hand_2d_keypoints/hand_2d_keypoints.py b/easycv/toolkit/modelscope/models/hand_2d_keypoints/hand_2d_keypoints.py new file mode 100644 index 00000000..253b441e --- /dev/null +++ b/easycv/toolkit/modelscope/models/hand_2d_keypoints/hand_2d_keypoints.py @@ -0,0 +1,16 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.pose import TopDown +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.hand_2d_keypoints, module_name=Models.hand_2d_keypoints) +class Hand2dKeyPoints(EasyCVBaseModel, TopDown): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + TopDown.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/human_wholebody_keypoints/__init__.py b/easycv/toolkit/modelscope/models/human_wholebody_keypoints/__init__.py new file mode 100644 index 00000000..40bb44a6 --- /dev/null +++ b/easycv/toolkit/modelscope/models/human_wholebody_keypoints/__init__.py @@ -0,0 +1,3 @@ +from .human_wholebody_keypoint import HumanWholeBodyKeypoint + +__all__ = ['HumanWholeBodyKeypoint'] diff --git a/easycv/toolkit/modelscope/models/human_wholebody_keypoints/human_wholebody_keypoint.py b/easycv/toolkit/modelscope/models/human_wholebody_keypoints/human_wholebody_keypoint.py new file mode 100644 index 00000000..ebf72eb4 --- /dev/null +++ b/easycv/toolkit/modelscope/models/human_wholebody_keypoints/human_wholebody_keypoint.py @@ -0,0 +1,17 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.pose.top_down import TopDown +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.human_wholebody_keypoint, + module_name=Models.human_wholebody_keypoint) +class HumanWholeBodyKeypoint(EasyCVBaseModel, TopDown): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + TopDown.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/image_panoptic_segmentation/__init__.py b/easycv/toolkit/modelscope/models/image_panoptic_segmentation/__init__.py new file mode 100644 index 00000000..e0587aec --- /dev/null +++ b/easycv/toolkit/modelscope/models/image_panoptic_segmentation/__init__.py @@ -0,0 +1,3 @@ +from .r50_panseg_model import R50PanopticSegmentation + +__all__ = ['R50PanopticSegmentation'] diff --git a/easycv/toolkit/modelscope/models/image_panoptic_segmentation/r50_panseg_model.py b/easycv/toolkit/modelscope/models/image_panoptic_segmentation/r50_panseg_model.py new file mode 100644 index 00000000..ed336d52 --- /dev/null +++ b/easycv/toolkit/modelscope/models/image_panoptic_segmentation/r50_panseg_model.py @@ -0,0 +1,18 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.segmentation import Mask2Former +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.image_segmentation, + module_name=Models.r50_panoptic_segmentation) +class R50PanopticSegmentation(EasyCVBaseModel, Mask2Former): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + Mask2Former.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/image_semantic_segmentation/__init__.py b/easycv/toolkit/modelscope/models/image_semantic_segmentation/__init__.py new file mode 100644 index 00000000..425420bb --- /dev/null +++ b/easycv/toolkit/modelscope/models/image_semantic_segmentation/__init__.py @@ -0,0 +1,3 @@ +from .segformer import Segformer + +__all__ = ['Segformer'] diff --git a/easycv/toolkit/modelscope/models/image_semantic_segmentation/segformer.py b/easycv/toolkit/modelscope/models/image_semantic_segmentation/segformer.py new file mode 100644 index 00000000..427d5ea5 --- /dev/null +++ b/easycv/toolkit/modelscope/models/image_semantic_segmentation/segformer.py @@ -0,0 +1,16 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.segmentation import EncoderDecoder +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.image_segmentation, module_name=Models.segformer) +class Segformer(EasyCVBaseModel, EncoderDecoder): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + EncoderDecoder.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/object_detection/__init__.py b/easycv/toolkit/modelscope/models/object_detection/__init__.py new file mode 100644 index 00000000..27b1acd2 --- /dev/null +++ b/easycv/toolkit/modelscope/models/object_detection/__init__.py @@ -0,0 +1,4 @@ +from .dino import DINO +from .yolox_pai import YOLOX + +__all__ = ['YOLOX', 'DINO'] diff --git a/easycv/toolkit/modelscope/models/object_detection/dino.py b/easycv/toolkit/modelscope/models/object_detection/dino.py new file mode 100644 index 00000000..a870899f --- /dev/null +++ b/easycv/toolkit/modelscope/models/object_detection/dino.py @@ -0,0 +1,16 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.detection.detectors import Detection as _Detection +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.image_object_detection, module_name=Models.dino) +class DINO(EasyCVBaseModel, _Detection): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + _Detection.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/models/object_detection/yolox_pai.py b/easycv/toolkit/modelscope/models/object_detection/yolox_pai.py new file mode 100644 index 00000000..7ba2dc94 --- /dev/null +++ b/easycv/toolkit/modelscope/models/object_detection/yolox_pai.py @@ -0,0 +1,21 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.models.builder import MODELS +from modelscope.utils.constant import Tasks + +from easycv.models.detection.detectors import YOLOX as _YOLOX +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.models.base import EasyCVBaseModel + + +@MODELS.register_module( + group_key=Tasks.image_object_detection, module_name=Models.yolox) +@MODELS.register_module( + group_key=Tasks.image_object_detection, + module_name=Models.image_object_detection_auto) +@MODELS.register_module( + group_key=Tasks.domain_specific_object_detection, module_name=Models.yolox) +class YOLOX(EasyCVBaseModel, _YOLOX): + + def __init__(self, model_dir=None, *args, **kwargs): + EasyCVBaseModel.__init__(self, model_dir, args, kwargs) + _YOLOX.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/msdatasets/__init__.py b/easycv/toolkit/modelscope/msdatasets/__init__.py new file mode 100644 index 00000000..858e229d --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/__init__.py @@ -0,0 +1,3 @@ +from . import (face_2d_keypoints, hand_2d_keypoints, human_wholebody_keypoints, + image_classification, image_semantic_segmentation, + object_detection) diff --git a/easycv/toolkit/modelscope/msdatasets/face_2d_keypoints/__init__.py b/easycv/toolkit/modelscope/msdatasets/face_2d_keypoints/__init__.py new file mode 100644 index 00000000..49771efb --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/face_2d_keypoints/__init__.py @@ -0,0 +1,3 @@ +from .face_2d_keypoints_dataset import FaceKeypointDataset + +__all__ = ['FaceKeypointDataset'] diff --git a/easycv/toolkit/modelscope/msdatasets/face_2d_keypoints/face_2d_keypoints_dataset.py b/easycv/toolkit/modelscope/msdatasets/face_2d_keypoints/face_2d_keypoints_dataset.py new file mode 100644 index 00000000..17320e71 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/face_2d_keypoints/face_2d_keypoints_dataset.py @@ -0,0 +1,39 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.msdatasets.dataset_cls.custom_datasets import CUSTOM_DATASETS +from modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base import \ + EasyCVBaseDataset +from modelscope.utils.constant import Tasks + +from easycv.datasets.face import FaceKeypointDataset as _FaceKeypointDataset +from easycv.toolkit.modelscope.metainfo import \ + EasyCVCustomDatasets as CustomDatasets + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.face_2d_keypoints, + module_name=CustomDatasets.Face2dKeypointsDataset) +class FaceKeypointDataset(EasyCVBaseDataset, _FaceKeypointDataset): + """EasyCV dataset for face 2d keypoints. + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _FaceKeypointDataset.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/msdatasets/hand_2d_keypoints/__init__.py b/easycv/toolkit/modelscope/msdatasets/hand_2d_keypoints/__init__.py new file mode 100644 index 00000000..a7bbc766 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/hand_2d_keypoints/__init__.py @@ -0,0 +1,3 @@ +from .hand_2d_keypoints_dataset import HandCocoWholeBodyDataset + +__all__ = ['HandCocoWholeBodyDataset'] diff --git a/easycv/toolkit/modelscope/msdatasets/hand_2d_keypoints/hand_2d_keypoints_dataset.py b/easycv/toolkit/modelscope/msdatasets/hand_2d_keypoints/hand_2d_keypoints_dataset.py new file mode 100644 index 00000000..131305f5 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/hand_2d_keypoints/hand_2d_keypoints_dataset.py @@ -0,0 +1,40 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.msdatasets.dataset_cls.custom_datasets import CUSTOM_DATASETS +from modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base import \ + EasyCVBaseDataset +from modelscope.utils.constant import Tasks + +from easycv.datasets.pose import \ + HandCocoWholeBodyDataset as _HandCocoWholeBodyDataset +from easycv.toolkit.modelscope.metainfo import \ + EasyCVCustomDatasets as CustomDatasets + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.hand_2d_keypoints, + module_name=CustomDatasets.HandCocoWholeBodyDataset) +class HandCocoWholeBodyDataset(EasyCVBaseDataset, _HandCocoWholeBodyDataset): + """EasyCV dataset for human hand 2d keypoints. + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _HandCocoWholeBodyDataset.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/msdatasets/human_wholebody_keypoints/__init__.py b/easycv/toolkit/modelscope/msdatasets/human_wholebody_keypoints/__init__.py new file mode 100644 index 00000000..3b9a34db --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/human_wholebody_keypoints/__init__.py @@ -0,0 +1,3 @@ +from .human_wholebody_keypoint_dataset import WholeBodyCocoTopDownDataset + +__all__ = ['WholeBodyCocoTopDownDataset'] diff --git a/easycv/toolkit/modelscope/msdatasets/human_wholebody_keypoints/human_wholebody_keypoint_dataset.py b/easycv/toolkit/modelscope/msdatasets/human_wholebody_keypoints/human_wholebody_keypoint_dataset.py new file mode 100644 index 00000000..010f7638 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/human_wholebody_keypoints/human_wholebody_keypoint_dataset.py @@ -0,0 +1,41 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.msdatasets.dataset_cls.custom_datasets import CUSTOM_DATASETS +from modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base import \ + EasyCVBaseDataset +from modelscope.utils.constant import Tasks + +from easycv.datasets.pose import \ + WholeBodyCocoTopDownDataset as _WholeBodyCocoTopDownDataset +from easycv.toolkit.modelscope.metainfo import \ + EasyCVCustomDatasets as CustomDatasets + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.human_wholebody_keypoint, + module_name=CustomDatasets.HumanWholeBodyKeypointDataset) +class WholeBodyCocoTopDownDataset(EasyCVBaseDataset, + _WholeBodyCocoTopDownDataset): + """EasyCV dataset for human whole body 2d keypoints. + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _WholeBodyCocoTopDownDataset.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/msdatasets/image_classification/__init__.py b/easycv/toolkit/modelscope/msdatasets/image_classification/__init__.py new file mode 100644 index 00000000..24349e28 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/image_classification/__init__.py @@ -0,0 +1,3 @@ +from .classification_dataset import ClsDataset + +__all__ = ['ClsDataset'] diff --git a/easycv/toolkit/modelscope/msdatasets/image_classification/classification_dataset.py b/easycv/toolkit/modelscope/msdatasets/image_classification/classification_dataset.py new file mode 100644 index 00000000..f5515031 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/image_classification/classification_dataset.py @@ -0,0 +1,39 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.msdatasets.dataset_cls.custom_datasets import CUSTOM_DATASETS +from modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base import \ + EasyCVBaseDataset +from modelscope.utils.constant import Tasks + +from easycv.datasets.classification import ClsDataset as _ClsDataset +from easycv.toolkit.modelscope.metainfo import \ + EasyCVCustomDatasets as CustomDatasets + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.image_classification, + module_name=CustomDatasets.ClsDataset) +class ClsDataset(_ClsDataset): + """EasyCV dataset for classification. + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _ClsDataset.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/msdatasets/image_semantic_segmentation/__init__.py b/easycv/toolkit/modelscope/msdatasets/image_semantic_segmentation/__init__.py new file mode 100644 index 00000000..c73cffbb --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/image_semantic_segmentation/__init__.py @@ -0,0 +1,3 @@ +from .segmentation_dataset import SegDataset + +__all__ = ['SegDataset'] diff --git a/easycv/toolkit/modelscope/msdatasets/image_semantic_segmentation/segmentation_dataset.py b/easycv/toolkit/modelscope/msdatasets/image_semantic_segmentation/segmentation_dataset.py new file mode 100644 index 00000000..920510ed --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/image_semantic_segmentation/segmentation_dataset.py @@ -0,0 +1,44 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.msdatasets.dataset_cls.custom_datasets import CUSTOM_DATASETS +from modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base import \ + EasyCVBaseDataset +from modelscope.utils.constant import Tasks + +from easycv.datasets.segmentation import SegDataset as _SegDataset +from easycv.toolkit.modelscope.metainfo import \ + EasyCVCustomDatasets as CustomDatasets + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.image_segmentation, module_name=CustomDatasets.SegDataset) +class SegDataset(EasyCVBaseDataset, _SegDataset): + """EasyCV dataset for Sementic segmentation. + For more details, please refer to : + https://github.com/alibaba/EasyCV/blob/master/easycv/datasets/segmentation/raw.py . + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + data_source: Data source config to parse input data. + pipeline: Sequence of transform object or config dict to be composed. + ignore_index (int): Label index to be ignored. + profiling: If set True, will print transform time. + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _SegDataset.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/msdatasets/object_detection/__init__.py b/easycv/toolkit/modelscope/msdatasets/object_detection/__init__.py new file mode 100644 index 00000000..474db84d --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/object_detection/__init__.py @@ -0,0 +1,3 @@ +from .detection_dataset import DetDataset, DetImagesMixDataset + +__all__ = ['DetDataset', 'DetImagesMixDataset'] diff --git a/easycv/toolkit/modelscope/msdatasets/object_detection/detection_dataset.py b/easycv/toolkit/modelscope/msdatasets/object_detection/detection_dataset.py new file mode 100644 index 00000000..a7a19b64 --- /dev/null +++ b/easycv/toolkit/modelscope/msdatasets/object_detection/detection_dataset.py @@ -0,0 +1,99 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + +from modelscope.msdatasets.dataset_cls.custom_datasets import CUSTOM_DATASETS +from modelscope.msdatasets.dataset_cls.custom_datasets.easycv_base import \ + EasyCVBaseDataset +from modelscope.utils.constant import Tasks + +from easycv.datasets.detection import DetDataset as _DetDataset +from easycv.datasets.detection import \ + DetImagesMixDataset as _DetImagesMixDataset +from easycv.toolkit.modelscope.metainfo import \ + EasyCVCustomDatasets as CustomDatasets + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.image_object_detection, + module_name=CustomDatasets.DetDataset) +@CUSTOM_DATASETS.register_module( + group_key=Tasks.image_segmentation, module_name=CustomDatasets.DetDataset) +class DetDataset(EasyCVBaseDataset, _DetDataset): + """EasyCV dataset for object detection. + For more details, please refer to https://github.com/alibaba/EasyCV/blob/master/easycv/datasets/detection/raw.py . + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + data_source: Data source config to parse input data. + pipeline: Transform config list + profiling: If set True, will print pipeline time + classes: A list of class names, used in evaluation for result and groundtruth visualization + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _DetDataset.__init__(self, *args, **kwargs) + + +@CUSTOM_DATASETS.register_module( + group_key=Tasks.image_object_detection, + module_name=CustomDatasets.DetImagesMixDataset) +@CUSTOM_DATASETS.register_module( + group_key=Tasks.domain_specific_object_detection, + module_name=CustomDatasets.DetImagesMixDataset) +class DetImagesMixDataset(EasyCVBaseDataset, _DetImagesMixDataset): + """EasyCV dataset for object detection, a wrapper of multiple images mixed dataset. + Suitable for training on multiple images mixed data augmentation like + mosaic and mixup. For the augmentation pipeline of mixed image data, + the `get_indexes` method needs to be provided to obtain the image + indexes, and you can set `skip_flags` to change the pipeline running + process. At the same time, we provide the `dynamic_scale` parameter + to dynamically change the output image size. + output boxes format: cx, cy, w, h + + For more details, please refer to https://github.com/alibaba/EasyCV/blob/master/easycv/datasets/detection/mix.py . + + Args: + split_config (dict): Dataset root path from MSDataset, e.g. + {"train":"local cache path"} or {"evaluation":"local cache path"}. + preprocessor (Preprocessor): An optional preprocessor instance, please make sure the preprocessor fits for + the model if supplied. Not support yet. + mode: Training or Evaluation. + data_source (:obj:`DetSourceCoco`): Data source config to parse input data. + pipeline (Sequence[dict]): Sequence of transform object or + config dict to be composed. + dynamic_scale (tuple[int], optional): The image scale can be changed + dynamically. Default to None. + skip_type_keys (list[str], optional): Sequence of type string to + be skip pipeline. Default to None. + label_padding: out labeling padding [N, 120, 5] + """ + + def __init__(self, + split_config=None, + preprocessor=None, + mode=None, + *args, + **kwargs) -> None: + EasyCVBaseDataset.__init__( + self, + split_config=split_config, + preprocessor=preprocessor, + mode=mode, + args=args, + kwargs=kwargs) + _DetImagesMixDataset.__init__(self, *args, **kwargs) diff --git a/easycv/toolkit/modelscope/pipelines/__init__.py b/easycv/toolkit/modelscope/pipelines/__init__.py new file mode 100644 index 00000000..3e796b20 --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/__init__.py @@ -0,0 +1,13 @@ +from .detection_pipeline import EasyCVDetectionPipeline +from .face_2d_keypoints_pipeline import Face2DKeypointsPipeline +from .hand_2d_keypoints_pipeline import Hand2DKeypointsPipeline +from .human_wholebody_keypoint_pipeline import HumanWholebodyKeypointsPipeline +from .image_panoptic_segmentation_pipeline import \ + ImagePanopticSegmentationEasyCVPipeline +from .segmentation_pipeline import EasyCVSegmentationPipeline + +__all__ = [ + 'EasyCVDetectionPipeline', 'EasyCVSegmentationPipeline', + 'Face2DKeypointsPipeline', 'HumanWholebodyKeypointsPipeline', + 'Hand2DKeypointsPipeline', 'ImagePanopticSegmentationEasyCVPipeline' +] diff --git a/easycv/toolkit/modelscope/pipelines/base.py b/easycv/toolkit/modelscope/pipelines/base.py new file mode 100644 index 00000000..ad0f375d --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/base.py @@ -0,0 +1,123 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import os.path as osp +from typing import Any + +import numpy as np +from modelscope.hub.snapshot_download import snapshot_download +from modelscope.pipelines.util import is_official_hub_path +from modelscope.utils.config import Config +from modelscope.utils.constant import (DEFAULT_MODEL_REVISION, Invoke, + ModelFile, ThirdParty) +from modelscope.utils.device import create_device +from PIL import ImageFile + +from easycv.utils.ms_utils import EasyCVMeta + + +class EasyCVPipeline(object): + """Base pipeline for EasyCV. + Loading configuration file of modelscope style by default, + but it is actually use the predictor api of easycv to predict. + So here we do some adaptation work for configuration and predict api. + """ + + def __init__(self, model: str, model_file_pattern='*.pt', *args, **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + + """ + self.model_file_pattern = model_file_pattern + + assert isinstance(model, str) + if osp.exists(model): + model_dir = model + else: + assert is_official_hub_path( + model), 'Only support local model path and official hub path!' + model_dir = snapshot_download( + model_id=model, + revision=DEFAULT_MODEL_REVISION, + user_agent={ + Invoke.KEY: Invoke.PIPELINE, + ThirdParty.KEY: ThirdParty.EASYCV + }) + + assert osp.isdir(model_dir) + model_files = glob.glob( + os.path.join(model_dir, self.model_file_pattern)) + assert len( + model_files + ) == 1, f'Need one model file, but find {len(model_files)}: {model_files}' + + model_path = model_files[0] + self.model_path = model_path + self.model_dir = model_dir + + # get configuration file from source model dir + self.config_file = os.path.join(model_dir, ModelFile.CONFIGURATION) + assert os.path.exists( + self.config_file + ), f'Not find "{ModelFile.CONFIGURATION}" in model directory!' + + self.cfg = Config.from_file(self.config_file) + if 'device' in kwargs: + kwargs['device'] = create_device(kwargs['device']) + if 'predictor_config' in kwargs: + kwargs.pop('predictor_config') + self.predict_op = self._build_predict_op(**kwargs) + + def _build_predict_op(self, **kwargs): + """Build EasyCV predictor.""" + from easycv.predictors.builder import build_predictor + + easycv_config = self._to_easycv_config() + pipeline_op = build_predictor(self.cfg.pipeline.predictor_config, { + 'model_path': self.model_path, + 'config_file': easycv_config, + **kwargs + }) + return pipeline_op + + def _to_easycv_config(self): + """Adapt to EasyCV predictor.""" + # TODO: refine config compatibility problems + + easycv_arch = self.cfg.model.pop(EasyCVMeta.ARCH, None) + model_cfg = self.cfg.model + # Revert to the configuration of easycv + if easycv_arch is not None: + model_cfg.update(easycv_arch) + + easycv_config = Config(dict(model=model_cfg)) + + reserved_keys = [] + if hasattr(self.cfg, EasyCVMeta.META): + easycv_meta_cfg = getattr(self.cfg, EasyCVMeta.META) + reserved_keys = easycv_meta_cfg.get(EasyCVMeta.RESERVED_KEYS, []) + for key in reserved_keys: + easycv_config.merge_from_dict({key: getattr(self.cfg, key)}) + if 'test_pipeline' not in reserved_keys: + easycv_config.merge_from_dict( + {'test_pipeline': self.cfg.dataset.val.get('pipeline', [])}) + + return easycv_config + + def _is_single_inputs(self, inputs): + if isinstance(inputs, str) or (isinstance(inputs, list) + and len(inputs) == 1) or isinstance( + inputs, np.ndarray) or isinstance( + inputs, ImageFile.ImageFile): + return True + + return False + + def __call__(self, inputs) -> Any: + outputs = self.predict_op(inputs) + + if self._is_single_inputs(inputs): + outputs = outputs[0] + + return outputs diff --git a/easycv/toolkit/modelscope/pipelines/detection_pipeline.py b/easycv/toolkit/modelscope/pipelines/detection_pipeline.py new file mode 100644 index 00000000..77fd1c06 --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/detection_pipeline.py @@ -0,0 +1,67 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import Any + +from modelscope.outputs import OutputKeys +from modelscope.pipelines.builder import PIPELINES +from modelscope.utils.constant import ModelFile, Tasks +from modelscope.utils.cv.image_utils import \ + show_image_object_detection_auto_result + +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines +from .base import EasyCVPipeline + + +@PIPELINES.register_module( + Tasks.image_object_detection, module_name=Pipelines.easycv_detection) +@PIPELINES.register_module( + Tasks.image_object_detection, + module_name=Pipelines.image_object_detection_auto) +@PIPELINES.register_module( + Tasks.domain_specific_object_detection, + module_name=Pipelines.hand_detection) +class EasyCVDetectionPipeline(EasyCVPipeline): + """Pipeline for easycv detection task.""" + + def __init__(self, + model: str, + model_file_pattern=ModelFile.TORCH_MODEL_FILE, + *args, + **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + """ + + super(EasyCVDetectionPipeline, self).__init__( + model=model, + model_file_pattern=model_file_pattern, + *args, + **kwargs) + + def show_result(self, img_path, result, save_path=None): + show_image_object_detection_auto_result(img_path, result, save_path) + + def __call__(self, inputs) -> Any: + outputs = self.predict_op(inputs) + + scores = [] + labels = [] + boxes = [] + for output in outputs: + for score, label, box in zip(output['detection_scores'], + output['detection_classes'], + output['detection_boxes']): + scores.append(score) + labels.append(self.cfg.CLASSES[label]) + boxes.append([b for b in box]) + + results = [{ + OutputKeys.SCORES: scores, + OutputKeys.LABELS: labels, + OutputKeys.BOXES: boxes + } for output in outputs] + + if self._is_single_inputs(inputs): + results = results[0] + + return results diff --git a/easycv/toolkit/modelscope/pipelines/face_2d_keypoints_pipeline.py b/easycv/toolkit/modelscope/pipelines/face_2d_keypoints_pipeline.py new file mode 100644 index 00000000..9e11efa8 --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/face_2d_keypoints_pipeline.py @@ -0,0 +1,244 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import copy +import math +from typing import Any + +import cv2 +import numpy as np +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.pipelines.builder import PIPELINES +from modelscope.preprocessors import LoadImage +from modelscope.utils.constant import ModelFile, Tasks +from modelscope.utils.logger import get_logger + +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines +from .base import EasyCVPipeline + +logger = get_logger() + + +@PIPELINES.register_module( + Tasks.face_2d_keypoints, module_name=Pipelines.face_2d_keypoints) +class Face2DKeypointsPipeline(EasyCVPipeline): + """Pipeline for face 2d keypoints detection.""" + + def __init__(self, + model: str, + model_file_pattern=ModelFile.TORCH_MODEL_FILE, + *args, + **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + """ + + super(Face2DKeypointsPipeline, self).__init__( + model=model, + model_file_pattern=model_file_pattern, + *args, + **kwargs) + + # face detect pipeline + det_model_id = 'damo/cv_resnet_facedetection_scrfd10gkps' + self.face_detection = pipeline( + Tasks.face_detection, model=det_model_id) + + def show_result(self, img, points, scale=2, save_path=None): + return self.predict_op.show_result(img, points, scale, save_path) + + def _choose_face(self, det_result, min_face=10): + """ + choose face with maximum area + Args: + det_result: output of face detection pipeline + min_face: minimum size of valid face w/h + """ + bboxes = np.array(det_result[OutputKeys.BOXES]) + landmarks = np.array(det_result[OutputKeys.KEYPOINTS]) + if bboxes.shape[0] == 0: + logger.warning('No face detected!') + return None + # face idx with enough size + face_idx = [] + for i in range(bboxes.shape[0]): + box = bboxes[i] + if (box[2] - box[0]) >= min_face and (box[3] - box[1]) >= min_face: + face_idx += [i] + if len(face_idx) == 0: + logger.warning( + f'Face size not enough, less than {min_face}x{min_face}!') + return None + bboxes = bboxes[face_idx] + landmarks = landmarks[face_idx] + + return bboxes, landmarks + + def expend_box(self, box, w, h, scalex=0.3, scaley=0.5): + x1 = box[0] + y1 = box[1] + wb = box[2] - x1 + hb = box[3] - y1 + deltax = int(wb * scalex) + deltay1 = int(hb * scaley) + deltay2 = int(hb * scalex) + x1 = x1 - deltax + y1 = y1 - deltay1 + if x1 < 0: + deltax = deltax + x1 + x1 = 0 + if y1 < 0: + deltay1 = deltay1 + y1 + y1 = 0 + x2 = x1 + wb + 2 * deltax + y2 = y1 + hb + deltay1 + deltay2 + x2 = np.clip(x2, 0, w - 1) + y2 = np.clip(y2, 0, h - 1) + return [x1, y1, x2, y2] + + def rotate_point(self, angle, center, landmark): + rad = angle * np.pi / 180.0 + alpha = np.cos(rad) + beta = np.sin(rad) + M = np.zeros((2, 3), dtype=np.float32) + M[0, 0] = alpha + M[0, 1] = beta + M[0, 2] = (1 - alpha) * center[0] - beta * center[1] + M[1, 0] = -beta + M[1, 1] = alpha + M[1, 2] = beta * center[0] + (1 - alpha) * center[1] + + landmark_ = np.asarray([(M[0, 0] * x + M[0, 1] * y + M[0, 2], + M[1, 0] * x + M[1, 1] * y + M[1, 2]) + for (x, y) in landmark]) + return M, landmark_ + + def rotate_crop_img(self, img, pts, M): + imgT = cv2.warpAffine(img, M, (int(img.shape[1]), int(img.shape[0]))) + + x1 = pts[5][0] + x2 = pts[5][0] + y1 = pts[5][1] + y2 = pts[5][1] + for i in range(0, 9): + x1 = min(x1, pts[i][0]) + x2 = max(x2, pts[i][0]) + y1 = min(y1, pts[i][1]) + y2 = max(y2, pts[i][1]) + + height, width, _ = imgT.shape + x1 = min(max(0, int(x1)), width) + y1 = min(max(0, int(y1)), height) + x2 = min(max(0, int(x2)), width) + y2 = min(max(0, int(y2)), height) + sub_imgT = imgT[y1:y2, x1:x2] + + return sub_imgT, imgT, [x1, y1, x2, y2] + + def crop_img(self, imgT, pts): + enlarge_ratio = 1.1 + + x1 = np.min(pts[:, 0]) + x2 = np.max(pts[:, 0]) + y1 = np.min(pts[:, 1]) + y2 = np.max(pts[:, 1]) + w = x2 - x1 + 1 + h = y2 - y1 + 1 + x1 = int(x1 - (enlarge_ratio - 1.0) / 2.0 * w) + y1 = int(y1 - (enlarge_ratio - 1.0) / 2.0 * h) + x1 = max(0, x1) + y1 = max(0, y1) + + new_w = int(enlarge_ratio * w) + new_h = int(enlarge_ratio * h) + new_x1 = x1 + new_y1 = y1 + new_x2 = new_x1 + new_w + new_y2 = new_y1 + new_h + + height, width, _ = imgT.shape + + new_x1 = min(max(0, new_x1), width) + new_y1 = min(max(0, new_y1), height) + new_x2 = max(min(width, new_x2), 0) + new_y2 = max(min(height, new_y2), 0) + + sub_imgT = imgT[new_y1:new_y2, new_x1:new_x2] + + return sub_imgT, [new_x1, new_y1, new_x2, new_y2] + + def __call__(self, inputs) -> Any: + img = LoadImage.convert_to_ndarray(inputs) + h, w, c = img.shape + img_rgb = copy.deepcopy(img) + img_rgb = img_rgb[:, :, ::-1] + det_result = self.face_detection(img_rgb) + + bboxes = np.array(det_result[OutputKeys.BOXES]) + if bboxes.shape[0] == 0: + logger.warning('No face detected!') + results = { + OutputKeys.KEYPOINTS: [], + OutputKeys.POSES: [], + OutputKeys.BOXES: [] + } + return results + + boxes, keypoints = self._choose_face(det_result) + + output_boxes = [] + output_keypoints = [] + output_poses = [] + for index, box_ori in enumerate(boxes): + box = self.expend_box(box_ori, w, h, scalex=0.1, scaley=0.1) + y0 = int(box[1]) + y1 = int(box[3]) + x0 = int(box[0]) + x1 = int(box[2]) + sub_img = img[y0:y1, x0:x1] + + keypoint = keypoints[index] + pts = [[keypoint[0], keypoint[1]], [keypoint[2], keypoint[3]], + [keypoint[4], keypoint[5]], [keypoint[6], keypoint[7]], + [keypoint[8], keypoint[9]], [box[0], box[1]], + [box[2], box[1]], [box[0], box[3]], [box[2], box[3]]] + # radian + angle = math.atan2((pts[1][1] - pts[0][1]), + (pts[1][0] - pts[0][0])) + # angle + theta = angle * (180 / np.pi) + + center = [w // 2, h // 2] + cx, cy = center + M, landmark_ = self.rotate_point(theta, (cx, cy), pts) + sub_imgT, imgT, bbox = self.rotate_crop_img(img, landmark_, M) + + outputs = self.predict_op([sub_imgT])[0] + tmp_keypoints = outputs['point'] + + for idx in range(0, len(tmp_keypoints)): + tmp_keypoints[idx][0] += bbox[0] + tmp_keypoints[idx][1] += bbox[1] + + for idx in range(0, 6): + sub_img, bbox = self.crop_img(imgT, tmp_keypoints) + outputs = self.predict_op([sub_img])[0] + tmp_keypoints = outputs['point'] + for idx in range(0, len(tmp_keypoints)): + tmp_keypoints[idx][0] += bbox[0] + tmp_keypoints[idx][1] += bbox[1] + + M2, tmp_keypoints = self.rotate_point(-theta, (cx, cy), + tmp_keypoints) + + output_keypoints.append(np.array(tmp_keypoints)) + output_poses.append(np.array(outputs['pose'])) + output_boxes.append(np.array(box_ori)) + + results = { + OutputKeys.KEYPOINTS: output_keypoints, + OutputKeys.POSES: output_poses, + OutputKeys.BOXES: output_boxes + } + + return results diff --git a/easycv/toolkit/modelscope/pipelines/hand_2d_keypoints_pipeline.py b/easycv/toolkit/modelscope/pipelines/hand_2d_keypoints_pipeline.py new file mode 100644 index 00000000..c4ce93ea --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/hand_2d_keypoints_pipeline.py @@ -0,0 +1,52 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os.path + +from modelscope.pipelines.builder import PIPELINES +from modelscope.utils.constant import ModelFile, Tasks + +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines +from .base import EasyCVPipeline + + +@PIPELINES.register_module( + Tasks.hand_2d_keypoints, module_name=Pipelines.hand_2d_keypoints) +class Hand2DKeypointsPipeline(EasyCVPipeline): + """Pipeline for hand pose keypoint task.""" + + def __init__(self, + model: str, + model_file_pattern=ModelFile.TORCH_MODEL_FILE, + *args, + **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + """ + super(Hand2DKeypointsPipeline, self).__init__( + model=model, + model_file_pattern=model_file_pattern, + *args, + **kwargs) + + def _build_predict_op(self, **kwargs): + """Build EasyCV predictor.""" + from easycv.predictors.builder import build_predictor + detection_predictor_type = self.cfg['DETECTION']['type'] + detection_model_path = os.path.join( + self.model_dir, self.cfg['DETECTION']['model_path']) + detection_cfg_file = os.path.join(self.model_dir, + self.cfg['DETECTION']['config_file']) + detection_score_threshold = self.cfg['DETECTION']['score_threshold'] + self.cfg.pipeline.predictor_config[ + 'detection_predictor_config'] = dict( + type=detection_predictor_type, + model_path=detection_model_path, + config_file=detection_cfg_file, + score_threshold=detection_score_threshold) + easycv_config = self._to_easycv_config() + pipeline_op = build_predictor(self.cfg.pipeline.predictor_config, { + 'model_path': self.model_path, + 'config_file': easycv_config, + **kwargs + }) + return pipeline_op diff --git a/easycv/toolkit/modelscope/pipelines/human_wholebody_keypoint_pipeline.py b/easycv/toolkit/modelscope/pipelines/human_wholebody_keypoint_pipeline.py new file mode 100644 index 00000000..d724c3b9 --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/human_wholebody_keypoint_pipeline.py @@ -0,0 +1,68 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os.path +from typing import Any + +from modelscope.outputs import OutputKeys +from modelscope.pipelines.builder import PIPELINES +from modelscope.utils.constant import ModelFile, Tasks + +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines +from .base import EasyCVPipeline + + +@PIPELINES.register_module( + Tasks.human_wholebody_keypoint, + module_name=Pipelines.human_wholebody_keypoint) +class HumanWholebodyKeypointsPipeline(EasyCVPipeline): + """Pipeline for human wholebody 2d keypoints detection.""" + + def __init__(self, + model: str, + model_file_pattern=ModelFile.TORCH_MODEL_FILE, + *args, + **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + """ + super(HumanWholebodyKeypointsPipeline, self).__init__( + model=model, + model_file_pattern=model_file_pattern, + *args, + **kwargs) + + def _build_predict_op(self, **kwargs): + """Build EasyCV predictor.""" + from easycv.predictors.builder import build_predictor + detection_predictor_type = self.cfg['DETECTION']['type'] + detection_model_path = os.path.join( + self.model_dir, self.cfg['DETECTION']['model_path']) + detection_cfg_file = os.path.join(self.model_dir, + self.cfg['DETECTION']['config_file']) + detection_score_threshold = self.cfg['DETECTION']['score_threshold'] + self.cfg.pipeline.predictor_config[ + 'detection_predictor_config'] = dict( + type=detection_predictor_type, + model_path=detection_model_path, + config_file=detection_cfg_file, + score_threshold=detection_score_threshold) + easycv_config = self._to_easycv_config() + pipeline_op = build_predictor(self.cfg.pipeline.predictor_config, { + 'model_path': self.model_path, + 'config_file': easycv_config, + **kwargs + }) + return pipeline_op + + def __call__(self, inputs) -> Any: + outputs = self.predict_op(inputs) + + results = [{ + OutputKeys.KEYPOINTS: output['keypoints'], + OutputKeys.BOXES: output['boxes'] + } for output in outputs] + + if self._is_single_inputs(inputs): + results = results[0] + + return results diff --git a/easycv/toolkit/modelscope/pipelines/image_panoptic_segmentation_pipeline.py b/easycv/toolkit/modelscope/pipelines/image_panoptic_segmentation_pipeline.py new file mode 100644 index 00000000..ecebbd37 --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/image_panoptic_segmentation_pipeline.py @@ -0,0 +1,45 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import Any + +from modelscope.outputs import OutputKeys +from modelscope.pipelines.builder import PIPELINES +from modelscope.utils.constant import Tasks +from modelscope.utils.logger import get_logger + +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines +from .base import EasyCVPipeline + +logger = get_logger() + + +@PIPELINES.register_module( + Tasks.image_segmentation, + module_name=Pipelines.image_panoptic_segmentation_easycv) +class ImagePanopticSegmentationEasyCVPipeline(EasyCVPipeline): + """Pipeline built upon easycv for image segmentation.""" + + def __init__(self, model: str, model_file_pattern='*.pt', *args, **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + """ + super(ImagePanopticSegmentationEasyCVPipeline, self).__init__( + model=model, + model_file_pattern=model_file_pattern, + *args, + **kwargs) + + def __call__(self, inputs) -> Any: + outputs = self.predict_op(inputs) + easycv_results = outputs[0] + + results = { + OutputKeys.MASKS: + easycv_results[OutputKeys.MASKS], + OutputKeys.LABELS: + easycv_results[OutputKeys.LABELS], + OutputKeys.SCORES: + [0.999 for _ in range(len(easycv_results[OutputKeys.LABELS]))] + } + + return results diff --git a/easycv/toolkit/modelscope/pipelines/segmentation_pipeline.py b/easycv/toolkit/modelscope/pipelines/segmentation_pipeline.py new file mode 100644 index 00000000..b10bf945 --- /dev/null +++ b/easycv/toolkit/modelscope/pipelines/segmentation_pipeline.py @@ -0,0 +1,47 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import Any + +import numpy as np +from modelscope.outputs import OutputKeys +from modelscope.pipelines.builder import PIPELINES +from modelscope.utils.constant import Tasks + +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines +from .base import EasyCVPipeline + + +@PIPELINES.register_module( + Tasks.image_segmentation, module_name=Pipelines.easycv_segmentation) +class EasyCVSegmentationPipeline(EasyCVPipeline): + """Pipeline for easycv segmentation task.""" + + def __init__(self, model: str, model_file_pattern='*.pt', *args, **kwargs): + """ + model (str): model id on modelscope hub or local model path. + model_file_pattern (str): model file pattern. + """ + + super(EasyCVSegmentationPipeline, self).__init__( + model=model, + model_file_pattern=model_file_pattern, + *args, + **kwargs) + + def __call__(self, inputs) -> Any: + outputs = self.predict_op(inputs) + + semantic_result = outputs[0]['seg_pred'] + + ids = np.unique(semantic_result)[::-1] + legal_indices = ids != len(self.predict_op.CLASSES) # for VOID label + ids = ids[legal_indices] + segms = (semantic_result[None] == ids[:, None, None]) + masks = [it.astype(np.int32) for it in segms] + labels_txt = np.array(self.predict_op.CLASSES)[ids].tolist() + + results = { + OutputKeys.MASKS: masks, + OutputKeys.LABELS: labels_txt, + OutputKeys.SCORES: [0.999 for _ in range(len(labels_txt))] + } + return results diff --git a/easycv/toolkit/modelscope/trainers/__init__.py b/easycv/toolkit/modelscope/trainers/__init__.py new file mode 100644 index 00000000..db616054 --- /dev/null +++ b/easycv/toolkit/modelscope/trainers/__init__.py @@ -0,0 +1,4 @@ +from .trainer import EasyCVEpochBasedTrainer +from .utils import AddLrLogHook, EasyCVMetric + +__all__ = ['AddLrLogHook', 'EasyCVMetric', 'EasyCVEpochBasedTrainer'] diff --git a/easycv/toolkit/modelscope/trainers/trainer.py b/easycv/toolkit/modelscope/trainers/trainer.py new file mode 100644 index 00000000..adb669b6 --- /dev/null +++ b/easycv/toolkit/modelscope/trainers/trainer.py @@ -0,0 +1,182 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from copy import deepcopy +from functools import partial +from typing import Callable, Optional, Tuple, Union + +import torch +from modelscope.models.base import TorchModel +from modelscope.msdatasets import MsDataset +from modelscope.preprocessors import Preprocessor +from modelscope.trainers import EpochBasedTrainer +from modelscope.trainers.base import TRAINERS +from modelscope.trainers.hooks import HOOKS +from modelscope.trainers.parallel.builder import build_parallel +from modelscope.trainers.parallel.utils import is_parallel +from modelscope.utils.config import Config +from modelscope.utils.constant import DEFAULT_MODEL_REVISION +from modelscope.utils.import_utils import LazyImportModule +from modelscope.utils.registry import default_group +from torch import nn +from torch.utils.data import Dataset + +from easycv.utils.checkpoint import load_checkpoint as ev_load_checkpoint +from .utils import register_util + + +@TRAINERS.register_module(module_name='easycv') +class EasyCVEpochBasedTrainer(EpochBasedTrainer): + """Epoch based Trainer for EasyCV. + + Args: + cfg_file(str): The config file of EasyCV. + model (:obj:`torch.nn.Module` or :obj:`TorchModel` or `str`): The model to be run, or a valid model dir + or a model id. If model is None, build_model method will be called. + train_dataset (`MsDataset` or `torch.utils.data.Dataset`, *optional*): + The dataset to use for training. + Note that if it's a `torch.utils.data.IterableDataset` with some randomization and you are training in a + distributed fashion, your iterable dataset should either use a internal attribute `generator` that is a + `torch.Generator` for the randomization that must be identical on all processes (and the Trainer will + manually set the seed of this `generator` at each epoch) or have a `set_epoch()` method that internally + sets the seed of the RNGs used. + eval_dataset (`MsDataset` or `torch.utils.data.Dataset`, *optional*): The dataset to use for evaluation. + preprocessor (:obj:`Preprocessor`, *optional*): The optional preprocessor. + NOTE: If the preprocessor has been called before the dataset fed into this trainer by user's custom code, + this parameter should be None, meanwhile remove the 'preprocessor' key from the cfg_file. + Else the preprocessor will be instantiated from the cfg_file or assigned from this parameter and + this preprocessing action will be executed every time the dataset's __getitem__ is called. + optimizers (`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler._LRScheduler]`, *optional*): A tuple + containing the optimizer and the scheduler to use. + max_epochs: (int, optional): Total training epochs. + """ + + def __init__( + self, + cfg_file: Optional[str] = None, + model: Optional[Union[TorchModel, nn.Module, str]] = None, + arg_parse_fn: Optional[Callable] = None, + train_dataset: Optional[Union[MsDataset, Dataset]] = None, + eval_dataset: Optional[Union[MsDataset, Dataset]] = None, + preprocessor: Optional[Preprocessor] = None, + optimizers: Tuple[torch.optim.Optimizer, + torch.optim.lr_scheduler._LRScheduler] = (None, + None), + model_revision: Optional[str] = DEFAULT_MODEL_REVISION, + **kwargs): + + register_util.register_parallel() + register_util.register_part_mmcv_hooks_to_ms() + + super(EasyCVEpochBasedTrainer, self).__init__( + model=model, + cfg_file=cfg_file, + arg_parse_fn=arg_parse_fn, + preprocessor=preprocessor, + optimizers=optimizers, + model_revision=model_revision, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + **kwargs) + + # reset data_collator + from mmcv.parallel import collate + + self.train_data_collator = partial( + collate, + samples_per_gpu=self.cfg.train.dataloader.batch_size_per_gpu) + self.eval_data_collator = partial( + collate, + samples_per_gpu=self.cfg.evaluation.dataloader.batch_size_per_gpu) + + # load pretrained model + load_from = self.cfg.get('load_from', None) + if load_from is not None: + ev_load_checkpoint( + self.model, + filename=load_from, + map_location=self.device, + strict=False, + ) + + # reset parallel + if not self._dist: + assert not is_parallel( + self.model + ), 'Not support model wrapped by custom parallel if not in distributed mode!' + dp_cfg = dict( + type='MMDataParallel', + module=self.model, + device_ids=[torch.cuda.current_device()]) + self.model = build_parallel(dp_cfg) + + def rebuild_config(self, cfg: Config): + cfg = super().rebuild_config(cfg) + # Register easycv hooks dynamicly. If the hook already exists in modelscope, + # the hook in modelscope will be used, otherwise register easycv hook into ms. + # We must manually trigger lazy import to detect whether the hook is in modelscope. + # TODO: use ast index to detect whether the hook is in modelscope + for h_i in cfg.train.get('hooks', []): + sig = ('HOOKS', default_group, h_i['type']) + LazyImportModule.import_module(sig) + if h_i['type'] not in HOOKS._modules[default_group]: + if h_i['type'] in [ + 'TensorboardLoggerHookV2', 'WandbLoggerHookV2' + ]: + raise ValueError( + 'Not support hook %s now, we will support it in the future!' + % h_i['type']) + register_util.register_hook_to_ms(h_i['type']) + return cfg + + def create_optimizer_and_scheduler(self): + """ Create optimizer and lr scheduler + """ + optimizer, lr_scheduler = self.optimizers + if optimizer is None: + optimizer_cfg = self.cfg.train.get('optimizer', None) + else: + optimizer_cfg = None + + optim_options = {} + if optimizer_cfg is not None: + optim_options = optimizer_cfg.pop('options', {}) + from easycv.apis.train import build_optimizer + optimizer = build_optimizer(self.model, optimizer_cfg) + + if lr_scheduler is None: + lr_scheduler_cfg = self.cfg.train.get('lr_scheduler', None) + else: + lr_scheduler_cfg = None + + lr_options = {} + # Adapt to mmcv lr scheduler hook. + # Please refer to: https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py + if lr_scheduler_cfg is not None: + assert optimizer is not None + lr_options = lr_scheduler_cfg.pop('options', {}) + assert 'policy' in lr_scheduler_cfg + policy_type = lr_scheduler_cfg.pop('policy') + if policy_type == policy_type.lower(): + policy_type = policy_type.title() + hook_type = policy_type + 'LrUpdaterHook' + lr_scheduler_cfg['type'] = hook_type + + self.cfg.train.lr_scheduler_hook = lr_scheduler_cfg + + self.optimizer = optimizer + self.lr_scheduler = lr_scheduler + + return self.optimizer, self.lr_scheduler, optim_options, lr_options + + def to_parallel(self, model) -> Union[nn.Module, TorchModel]: + if self.cfg.get('parallel', None) is not None: + dp_cfg = deepcopy(self.cfg['parallel']) + dp_cfg.update( + dict(module=model, device_ids=[torch.cuda.current_device()])) + return build_parallel(dp_cfg) + + dp_cfg = dict( + type='MMDistributedDataParallel', + module=model, + device_ids=[torch.cuda.current_device()]) + + return build_parallel(dp_cfg) diff --git a/easycv/toolkit/modelscope/trainers/utils/__init__.py b/easycv/toolkit/modelscope/trainers/utils/__init__.py new file mode 100644 index 00000000..23cfa36a --- /dev/null +++ b/easycv/toolkit/modelscope/trainers/utils/__init__.py @@ -0,0 +1,21 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from typing import TYPE_CHECKING + +from modelscope.utils.import_utils import LazyImportModule + +if TYPE_CHECKING: + from .hooks import AddLrLogHook + from .metric import EasyCVMetric + +else: + _import_structure = {'hooks': ['AddLrLogHook'], 'metric': ['EasyCVMetric']} + + import sys + + sys.modules[__name__] = LazyImportModule( + __name__, + globals()['__file__'], + _import_structure, + module_spec=__spec__, + extra_objects={}, + ) diff --git a/easycv/toolkit/modelscope/trainers/utils/hooks.py b/easycv/toolkit/modelscope/trainers/utils/hooks.py new file mode 100644 index 00000000..1f1a5c95 --- /dev/null +++ b/easycv/toolkit/modelscope/trainers/utils/hooks.py @@ -0,0 +1,29 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +from modelscope.trainers.hooks import HOOKS, Priority +from modelscope.trainers.hooks.lr_scheduler_hook import LrSchedulerHook +from modelscope.utils.constant import LogKeys + + +@HOOKS.register_module(module_name='AddLrLogHook') +class AddLrLogHook(LrSchedulerHook): + """For EasyCV to adapt to ModelScope, the lr log of EasyCV is added in the trainer, + but the trainer of ModelScope does not and it is added in the lr scheduler hook. + But The lr scheduler hook used by EasyCV is the hook of mmcv, and there is no lr log. + It will be deleted in the future. + """ + PRIORITY = Priority.NORMAL + + def __init__(self): + pass + + def before_run(self, trainer): + pass + + def after_train_iter(self, trainer): + trainer.log_buffer.output[LogKeys.LR] = self._get_log_lr(trainer) + + def before_train_epoch(self, trainer): + trainer.log_buffer.output[LogKeys.LR] = self._get_log_lr(trainer) + + def after_train_epoch(self, trainer): + pass diff --git a/easycv/toolkit/modelscope/trainers/utils/metric.py b/easycv/toolkit/modelscope/trainers/utils/metric.py new file mode 100644 index 00000000..1b37f476 --- /dev/null +++ b/easycv/toolkit/modelscope/trainers/utils/metric.py @@ -0,0 +1,61 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import itertools +from typing import Dict + +import numpy as np +import torch +from modelscope.metrics.base import Metric +from modelscope.metrics.builder import METRICS + + +@METRICS.register_module(module_name='EasyCVMetric') +class EasyCVMetric(Metric): + """Adapt to ModelScope Metric for EasyCV evaluator. + """ + + def __init__(self, trainer=None, evaluators=None, *args, **kwargs): + from easycv.core.evaluation.builder import build_evaluator + + self.trainer = trainer + self.evaluators = build_evaluator(evaluators) + self.preds = [] + self.grountruths = [] + + def add(self, outputs: Dict, inputs: Dict): + self.preds.append(outputs) + del inputs + + def evaluate(self): + results = {} + for _, batch in enumerate(self.preds): + for k, v in batch.items(): + if k not in results: + results[k] = [] + results[k].append(v) + + for k, v in results.items(): + if len(v) == 0: + raise ValueError(f'empty result for {k}') + + if isinstance(v[0], torch.Tensor): + results[k] = torch.cat(v, 0) + elif isinstance(v[0], (list, np.ndarray)): + results[k] = list(itertools.chain.from_iterable(v)) + else: + raise ValueError( + f'value of batch prediction dict should only be tensor or list, {k} type is {v[0]}' + ) + + metric_values = self.trainer.eval_dataset.evaluate( + results, self.evaluators) + return metric_values + + def merge(self, other: 'EasyCVMetric'): + self.preds.extend(other.preds) + + def __getstate__(self): + return self.preds + + def __setstate__(self, state): + self.__init__() + self.preds = state diff --git a/easycv/toolkit/modelscope/trainers/utils/register_util.py b/easycv/toolkit/modelscope/trainers/utils/register_util.py new file mode 100644 index 00000000..04bf719b --- /dev/null +++ b/easycv/toolkit/modelscope/trainers/utils/register_util.py @@ -0,0 +1,97 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import inspect +import logging + +from modelscope.trainers.hooks import HOOKS +from modelscope.trainers.parallel.builder import PARALLEL +from modelscope.utils.registry import default_group + + +class _RegisterManager: + + def __init__(self): + self.registries = {} + + def add(self, module, name, group_key=default_group): + if module.name not in self.registries: + self.registries[module.name] = {} + if group_key not in self.registries[module.name]: + self.registries[module.name][group_key] = [] + + self.registries[module.name][group_key].append(name) + + def exists(self, module, name, group_key=default_group): + if self.registries.get(module.name, None) is None: + return False + if self.registries[module.name].get(group_key, None) is None: + return False + if name in self.registries[module.name][group_key]: + return True + + return False + + +_dynamic_register = _RegisterManager() + + +def register_parallel(): + from mmcv.parallel import MMDistributedDataParallel, MMDataParallel + + mmddp = 'MMDistributedDataParallel' + mmdp = 'MMDataParallel' + + if not _dynamic_register.exists(PARALLEL, mmddp): + _dynamic_register.add(PARALLEL, mmddp) + PARALLEL.register_module( + module_name=mmddp, module_cls=MMDistributedDataParallel) + if not _dynamic_register.exists(PARALLEL, mmdp): + _dynamic_register.add(PARALLEL, mmdp) + PARALLEL.register_module(module_name=mmdp, module_cls=MMDataParallel) + + +def register_hook_to_ms(hook_name, logger=None): + """Register EasyCV hook to ModelScope.""" + from easycv.hooks import HOOKS as _EV_HOOKS + + if hook_name not in _EV_HOOKS._module_dict: + raise ValueError( + f'Not found hook "{hook_name}" in EasyCV hook registries!') + + if _dynamic_register.exists(HOOKS, hook_name): + return + _dynamic_register.add(HOOKS, hook_name) + + obj = _EV_HOOKS._module_dict[hook_name] + HOOKS.register_module(module_name=hook_name, module_cls=obj) + + log_str = f'Register hook "{hook_name}" to modelscope hooks.' + logger.info(log_str) if logger is not None else logging.info(log_str) + + +def register_part_mmcv_hooks_to_ms(): + """Register required mmcv hooks to ModelScope. + Currently we only registered all lr scheduler hooks in EasyCV and mmcv. + Please refer to: + EasyCV: https://github.com/alibaba/EasyCV/blob/master/easycv/hooks/lr_update_hook.py + mmcv: https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py + """ + from mmcv.runner.hooks import lr_updater + from mmcv.runner.hooks import HOOKS as _MMCV_HOOKS + from easycv.hooks import StepFixCosineAnnealingLrUpdaterHook, YOLOXLrUpdaterHook + + mmcv_hooks_in_easycv = [('StepFixCosineAnnealingLrUpdaterHook', + StepFixCosineAnnealingLrUpdaterHook), + ('YOLOXLrUpdaterHook', YOLOXLrUpdaterHook)] + + members = inspect.getmembers(lr_updater) + members.extend(mmcv_hooks_in_easycv) + + for name, obj in members: + if name in _MMCV_HOOKS._module_dict: + if _dynamic_register.exists(HOOKS, name): + continue + _dynamic_register.add(HOOKS, name) + HOOKS.register_module( + module_name=name, + module_cls=obj, + ) diff --git a/easycv/utils/config_tools.py b/easycv/utils/config_tools.py index b4a1bca2..e673f49a 100644 --- a/easycv/utils/config_tools.py +++ b/easycv/utils/config_tools.py @@ -531,6 +531,12 @@ def validate_export_config(cfg): 'FCOS_ITAG_EASY': 'configs/detection/fcos/fcos_r50_torch_1x_pai.py', 'FCOS_COCO_EASY': 'configs/detection/fcos/fcos_r50_torch_1x_coco.py', + # segmentation + 'FCN_SEG': 'configs/segmentation/fcn/fcn_r50-d8_512x512_8xb4_60e_voc12.py', + 'UPERNET_SEG': + 'configs/segmentation/upernet/upernet_r50_512x512_8xb4_60e_voc12.py', + 'SEGFORMER_SEG': 'configs/segmentation/segformer/segformer_b5_coco.py', + # ssl 'MOCO_R50_TFRECORD': 'configs/config_templates/moco_r50_tfrecord.py', 'MOCO_R50_TFRECORD_OSS': diff --git a/easycv/utils/ms_utils.py b/easycv/utils/ms_utils.py index eea8448c..55539ca0 100644 --- a/easycv/utils/ms_utils.py +++ b/easycv/utils/ms_utils.py @@ -100,6 +100,7 @@ def to_ms_config(cfg, dict( task=task, framework='pytorch', + plugins=['pai-easycv'], preprocessor={}, # adapt to modelscope, do nothing model={ 'type': ms_model_name, diff --git a/easycv/version.py b/easycv/version.py index ac62e2ba..b9f3853d 100644 --- a/easycv/version.py +++ b/easycv/version.py @@ -2,5 +2,5 @@ # GENERATED VERSION FILE # TIME: Thu Nov 5 14:17:50 2020 -__version__ = '0.10.0' -short_version = '0.10.0' +__version__ = '0.11.3' +short_version = '0.11.3' diff --git a/requirements/optional.txt b/requirements/optional.txt index cf923ee6..e9a9661a 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,9 +1,11 @@ +cython_bbox # for mot http://pai-nni.oss-cn-zhangjiakou.aliyuncs.com/release/2.6.1/pai_nni-2.6.1-py3-none-manylinux1_x86_64.whl http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/EasyCV/pkgs/whl/panopticapi/panopticapi-0.1-py3-none-any.whl http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/third_party/blade_compression-0.0.2-py3-none-any.whl https://developer.download.nvidia.com/compute/redist/nvidia-dali-cuda100/nvidia_dali_cuda100-0.25.0-1535750-py3-none-manylinux2014_x86_64.whl lap +modelscope nuscenes-devkit open3d pyquaternion diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 766004da..edff212e 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,6 +1,5 @@ albumentations cityscapesscripts -cython_bbox dataclasses decord einops @@ -9,6 +8,7 @@ fvcore h5py imgaug json_tricks +jsonplus lmdb numba numpy @@ -23,8 +23,9 @@ pycocotools pytorch_metric_learning>=0.9.89 rapidfuzz scikit-image +scikit-learn seaborn -sklearn +shapely==1.8.4 tensorboard thop timm==0.5.4 diff --git a/tests/run.py b/tests/run.py index 22eb50bc..bcc0a721 100644 --- a/tests/run.py +++ b/tests/run.py @@ -8,15 +8,26 @@ from fnmatch import fnmatch -def gather_test_cases(test_dir, pattern, list_tests): +def get_skip_file(skip_dir, pattern=None): case_list = [] + if skip_dir: + for path in skip_dir: + for dirpath, dirnames, filenames in os.walk(path): + for file in filenames: + if fnmatch(file, pattern): + case_list.append(file) + return case_list + + +def gather_test_cases(test_dir, pattern, list_tests, skip_dir): + case_list = [] + skip_list = get_skip_file(skip_dir, pattern) for dirpath, dirnames, filenames in os.walk(test_dir): for file in filenames: - if fnmatch(file, pattern): + if fnmatch(file, pattern) and file not in skip_list: case_list.append(file) test_suite = unittest.TestSuite() - for case in case_list: test_case = unittest.defaultTestLoader.discover( start_dir=test_dir, pattern=case) @@ -34,7 +45,8 @@ def gather_test_cases(test_dir, pattern, list_tests): def main(args): runner = unittest.TextTestRunner() test_suite = gather_test_cases( - os.path.abspath(args.test_dir), args.pattern, args.list_tests) + os.path.abspath(args.test_dir), args.pattern, args.list_tests, + args.skip_dir) if not args.list_tests: result = runner.run(test_suite) if len(result.failures) > 0 or len(result.errors) > 0: @@ -49,5 +61,7 @@ def main(args): '--pattern', default='test_*.py', help='test file pattern') parser.add_argument( '--test_dir', default='tests', help='directory to be tested') + parser.add_argument( + '--skip_dir', nargs='+', required=False, help='it`s not run testcase') args = parser.parse_args() main(args) diff --git a/tests/apis/__init__.py b/tests/test_apis/__init__.py similarity index 100% rename from tests/apis/__init__.py rename to tests/test_apis/__init__.py diff --git a/tests/apis/test_export.py b/tests/test_apis/test_export.py similarity index 100% rename from tests/apis/test_export.py rename to tests/test_apis/test_export.py diff --git a/tests/apis/test_export_blade.py b/tests/test_apis/test_export_blade.py similarity index 100% rename from tests/apis/test_export_blade.py rename to tests/test_apis/test_export_blade.py diff --git a/tests/configs/__init__.py b/tests/test_configs/__init__.py similarity index 100% rename from tests/configs/__init__.py rename to tests/test_configs/__init__.py diff --git a/tests/configs/test_check_base_cfg_path.py b/tests/test_configs/test_check_base_cfg_path.py similarity index 100% rename from tests/configs/test_check_base_cfg_path.py rename to tests/test_configs/test_check_base_cfg_path.py diff --git a/tests/core/__init__.py b/tests/test_core/__init__.py similarity index 100% rename from tests/core/__init__.py rename to tests/test_core/__init__.py diff --git a/tests/core/evaluation/__init__.py b/tests/test_core/evaluation/__init__.py similarity index 100% rename from tests/core/evaluation/__init__.py rename to tests/test_core/evaluation/__init__.py diff --git a/tests/core/evaluation/test_auc_eval.py b/tests/test_core/evaluation/test_auc_eval.py similarity index 100% rename from tests/core/evaluation/test_auc_eval.py rename to tests/test_core/evaluation/test_auc_eval.py diff --git a/tests/core/evaluation/test_classification_eval.py b/tests/test_core/evaluation/test_classification_eval.py similarity index 100% rename from tests/core/evaluation/test_classification_eval.py rename to tests/test_core/evaluation/test_classification_eval.py diff --git a/tests/core/evaluation/test_coco_evaluation.py b/tests/test_core/evaluation/test_coco_evaluation.py similarity index 100% rename from tests/core/evaluation/test_coco_evaluation.py rename to tests/test_core/evaluation/test_coco_evaluation.py diff --git a/tests/core/evaluation/test_coco_tools.py b/tests/test_core/evaluation/test_coco_tools.py similarity index 100% rename from tests/core/evaluation/test_coco_tools.py rename to tests/test_core/evaluation/test_coco_tools.py diff --git a/tests/core/evaluation/test_keypoint_eval.py b/tests/test_core/evaluation/test_keypoint_eval.py similarity index 100% rename from tests/core/evaluation/test_keypoint_eval.py rename to tests/test_core/evaluation/test_keypoint_eval.py diff --git a/tests/core/evaluation/test_metrics.py b/tests/test_core/evaluation/test_metrics.py similarity index 100% rename from tests/core/evaluation/test_metrics.py rename to tests/test_core/evaluation/test_metrics.py diff --git a/tests/core/evaluation/test_mse_eval.py b/tests/test_core/evaluation/test_mse_eval.py similarity index 100% rename from tests/core/evaluation/test_mse_eval.py rename to tests/test_core/evaluation/test_mse_eval.py diff --git a/tests/core/evaluation/test_nuscenes_eval.py b/tests/test_core/evaluation/test_nuscenes_eval.py similarity index 100% rename from tests/core/evaluation/test_nuscenes_eval.py rename to tests/test_core/evaluation/test_nuscenes_eval.py diff --git a/tests/core/evaluation/test_retrival_topk_eval.py b/tests/test_core/evaluation/test_retrival_topk_eval.py similarity index 100% rename from tests/core/evaluation/test_retrival_topk_eval.py rename to tests/test_core/evaluation/test_retrival_topk_eval.py diff --git a/tests/core/evaluation/test_top_down_eval.py b/tests/test_core/evaluation/test_top_down_eval.py similarity index 100% rename from tests/core/evaluation/test_top_down_eval.py rename to tests/test_core/evaluation/test_top_down_eval.py diff --git a/tests/core/optimizer/__init__.py b/tests/test_core/optimizer/__init__.py similarity index 100% rename from tests/core/optimizer/__init__.py rename to tests/test_core/optimizer/__init__.py diff --git a/tests/core/optimizer/test_optimizers.py b/tests/test_core/optimizer/test_optimizers.py similarity index 100% rename from tests/core/optimizer/test_optimizers.py rename to tests/test_core/optimizer/test_optimizers.py diff --git a/tests/core/post_processing/__init__.py b/tests/test_core/post_processing/__init__.py similarity index 100% rename from tests/core/post_processing/__init__.py rename to tests/test_core/post_processing/__init__.py diff --git a/tests/core/post_processing/test_nms.py b/tests/test_core/post_processing/test_nms.py similarity index 100% rename from tests/core/post_processing/test_nms.py rename to tests/test_core/post_processing/test_nms.py diff --git a/tests/core/sailfish/__init__.py b/tests/test_core/sailfish/__init__.py similarity index 100% rename from tests/core/sailfish/__init__.py rename to tests/test_core/sailfish/__init__.py diff --git a/tests/core/sailfish/test_arcface.py b/tests/test_core/sailfish/test_arcface.py similarity index 100% rename from tests/core/sailfish/test_arcface.py rename to tests/test_core/sailfish/test_arcface.py diff --git a/tests/core/sailfish/test_linear.py b/tests/test_core/sailfish/test_linear.py similarity index 100% rename from tests/core/sailfish/test_linear.py rename to tests/test_core/sailfish/test_linear.py diff --git a/tests/core/visualization/__init__.py b/tests/test_core/visualization/__init__.py similarity index 100% rename from tests/core/visualization/__init__.py rename to tests/test_core/visualization/__init__.py diff --git a/tests/core/visualization/test_image.py b/tests/test_core/visualization/test_image.py similarity index 100% rename from tests/core/visualization/test_image.py rename to tests/test_core/visualization/test_image.py diff --git a/tests/datasets/__init__.py b/tests/test_datasets/__init__.py similarity index 100% rename from tests/datasets/__init__.py rename to tests/test_datasets/__init__.py diff --git a/tests/datasets/classification/__init__.py b/tests/test_datasets/classification/__init__.py similarity index 100% rename from tests/datasets/classification/__init__.py rename to tests/test_datasets/classification/__init__.py diff --git a/tests/datasets/classification/data_sources/__init__.py b/tests/test_datasets/classification/data_sources/__init__.py similarity index 100% rename from tests/datasets/classification/data_sources/__init__.py rename to tests/test_datasets/classification/data_sources/__init__.py diff --git a/tests/datasets/classification/data_sources/test_cls_caltech_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_caltech_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_caltech_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_caltech_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_cifar_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_cifar_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_cifar_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_cifar_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_class_list_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_class_list_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_class_list_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_class_list_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_flower_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_flower_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_flower_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_flower_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_image_list_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_image_list_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_image_list_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_image_list_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_image_npy_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_image_npy_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_image_npy_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_image_npy_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_imagenet_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_imagenet_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_imagenet_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_imagenet_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_itag_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_itag_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_itag_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_itag_datasource.py diff --git a/tests/datasets/classification/data_sources/test_cls_mnist_datasource.py b/tests/test_datasets/classification/data_sources/test_cls_mnist_datasource.py similarity index 100% rename from tests/datasets/classification/data_sources/test_cls_mnist_datasource.py rename to tests/test_datasets/classification/data_sources/test_cls_mnist_datasource.py diff --git a/tests/datasets/classification/test_cls_raw_dataset.py b/tests/test_datasets/classification/test_cls_raw_dataset.py similarity index 100% rename from tests/datasets/classification/test_cls_raw_dataset.py rename to tests/test_datasets/classification/test_cls_raw_dataset.py diff --git a/tests/datasets/detection/__init__.py b/tests/test_datasets/detection/__init__.py similarity index 100% rename from tests/datasets/detection/__init__.py rename to tests/test_datasets/detection/__init__.py diff --git a/tests/datasets/detection/data_sources/__init__.py b/tests/test_datasets/detection/data_sources/__init__.py similarity index 100% rename from tests/datasets/detection/data_sources/__init__.py rename to tests/test_datasets/detection/data_sources/__init__.py diff --git a/tests/datasets/detection/data_sources/test_det_african_wildlife_datasource.py b/tests/test_datasets/detection/data_sources/test_det_african_wildlife_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_african_wildlife_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_african_wildlife_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_artaxor_datasource.py b/tests/test_datasets/detection/data_sources/test_det_artaxor_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_artaxor_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_artaxor_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_coco_datasource.py b/tests/test_datasets/detection/data_sources/test_det_coco_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_coco_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_coco_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_coco_lvis_datasource.py b/tests/test_datasets/detection/data_sources/test_det_coco_lvis_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_coco_lvis_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_coco_lvis_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_concat_datasource.py b/tests/test_datasets/detection/data_sources/test_det_concat_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_concat_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_concat_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_crowd_human_datasource.py b/tests/test_datasets/detection/data_sources/test_det_crowd_human_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_crowd_human_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_crowd_human_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_fruit_datasource.py b/tests/test_datasets/detection/data_sources/test_det_fruit_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_fruit_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_fruit_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_object365_datasource.py b/tests/test_datasets/detection/data_sources/test_det_object365_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_object365_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_object365_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_pai_format_datasource.py b/tests/test_datasets/detection/data_sources/test_det_pai_format_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_pai_format_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_pai_format_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_pet_datasource.py b/tests/test_datasets/detection/data_sources/test_det_pet_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_pet_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_pet_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_raw_datasource.py b/tests/test_datasets/detection/data_sources/test_det_raw_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_raw_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_raw_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_ting_person_datasource.py b/tests/test_datasets/detection/data_sources/test_det_ting_person_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_ting_person_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_ting_person_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_voc_datasource.py b/tests/test_datasets/detection/data_sources/test_det_voc_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_voc_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_voc_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_wider_face_datasource.py b/tests/test_datasets/detection/data_sources/test_det_wider_face_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_wider_face_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_wider_face_datasource.py diff --git a/tests/datasets/detection/data_sources/test_det_wider_person_datasource.py b/tests/test_datasets/detection/data_sources/test_det_wider_person_datasource.py similarity index 100% rename from tests/datasets/detection/data_sources/test_det_wider_person_datasource.py rename to tests/test_datasets/detection/data_sources/test_det_wider_person_datasource.py diff --git a/tests/datasets/detection/test_det_mix_dataset.py b/tests/test_datasets/detection/test_det_mix_dataset.py similarity index 100% rename from tests/datasets/detection/test_det_mix_dataset.py rename to tests/test_datasets/detection/test_det_mix_dataset.py diff --git a/tests/datasets/detection/test_det_raw_dataset.py b/tests/test_datasets/detection/test_det_raw_dataset.py similarity index 100% rename from tests/datasets/detection/test_det_raw_dataset.py rename to tests/test_datasets/detection/test_det_raw_dataset.py diff --git a/tests/datasets/detection3d/__init__.py b/tests/test_datasets/detection3d/__init__.py similarity index 100% rename from tests/datasets/detection3d/__init__.py rename to tests/test_datasets/detection3d/__init__.py diff --git a/tests/datasets/detection3d/test_nuscenes_dataset.py b/tests/test_datasets/detection3d/test_nuscenes_dataset.py similarity index 100% rename from tests/datasets/detection3d/test_nuscenes_dataset.py rename to tests/test_datasets/detection3d/test_nuscenes_dataset.py diff --git a/tests/datasets/ocr/__init__.py b/tests/test_datasets/ocr/__init__.py similarity index 100% rename from tests/datasets/ocr/__init__.py rename to tests/test_datasets/ocr/__init__.py diff --git a/tests/datasets/ocr/test_ocr_cls_dataset.py b/tests/test_datasets/ocr/test_ocr_cls_dataset.py similarity index 100% rename from tests/datasets/ocr/test_ocr_cls_dataset.py rename to tests/test_datasets/ocr/test_ocr_cls_dataset.py diff --git a/tests/datasets/ocr/test_ocr_det_dataset.py b/tests/test_datasets/ocr/test_ocr_det_dataset.py similarity index 100% rename from tests/datasets/ocr/test_ocr_det_dataset.py rename to tests/test_datasets/ocr/test_ocr_det_dataset.py diff --git a/tests/datasets/ocr/test_ocr_rec_dataset.py b/tests/test_datasets/ocr/test_ocr_rec_dataset.py similarity index 100% rename from tests/datasets/ocr/test_ocr_rec_dataset.py rename to tests/test_datasets/ocr/test_ocr_rec_dataset.py diff --git a/tests/datasets/pose/__init__.py b/tests/test_datasets/pose/__init__.py similarity index 100% rename from tests/datasets/pose/__init__.py rename to tests/test_datasets/pose/__init__.py diff --git a/tests/datasets/pose/data_sources/__init__.py b/tests/test_datasets/pose/data_sources/__init__.py similarity index 100% rename from tests/datasets/pose/data_sources/__init__.py rename to tests/test_datasets/pose/data_sources/__init__.py diff --git a/tests/datasets/pose/data_sources/test_coco_hand.py b/tests/test_datasets/pose/data_sources/test_coco_hand.py similarity index 100% rename from tests/datasets/pose/data_sources/test_coco_hand.py rename to tests/test_datasets/pose/data_sources/test_coco_hand.py diff --git a/tests/datasets/pose/data_sources/test_pose_coco_datasource.py b/tests/test_datasets/pose/data_sources/test_pose_coco_datasource.py similarity index 100% rename from tests/datasets/pose/data_sources/test_pose_coco_datasource.py rename to tests/test_datasets/pose/data_sources/test_pose_coco_datasource.py diff --git a/tests/datasets/pose/data_sources/test_pose_crowd_pose_datasource.py b/tests/test_datasets/pose/data_sources/test_pose_crowd_pose_datasource.py similarity index 100% rename from tests/datasets/pose/data_sources/test_pose_crowd_pose_datasource.py rename to tests/test_datasets/pose/data_sources/test_pose_crowd_pose_datasource.py diff --git a/tests/datasets/pose/data_sources/test_pose_mpii_datasource.py b/tests/test_datasets/pose/data_sources/test_pose_mpii_datasource.py similarity index 100% rename from tests/datasets/pose/data_sources/test_pose_mpii_datasource.py rename to tests/test_datasets/pose/data_sources/test_pose_mpii_datasource.py diff --git a/tests/datasets/pose/data_sources/test_pose_oc_human_datasource.py b/tests/test_datasets/pose/data_sources/test_pose_oc_human_datasource.py similarity index 100% rename from tests/datasets/pose/data_sources/test_pose_oc_human_datasource.py rename to tests/test_datasets/pose/data_sources/test_pose_oc_human_datasource.py diff --git a/tests/datasets/pose/data_sources/test_pose_top_down_datasource.py b/tests/test_datasets/pose/data_sources/test_pose_top_down_datasource.py similarity index 100% rename from tests/datasets/pose/data_sources/test_pose_top_down_datasource.py rename to tests/test_datasets/pose/data_sources/test_pose_top_down_datasource.py diff --git a/tests/datasets/pose/pipelines/test_transforms.py b/tests/test_datasets/pose/pipelines/test_transforms.py similarity index 100% rename from tests/datasets/pose/pipelines/test_transforms.py rename to tests/test_datasets/pose/pipelines/test_transforms.py diff --git a/tests/datasets/pose/test_coco_whole_body_hand_dataset.py b/tests/test_datasets/pose/test_coco_whole_body_hand_dataset.py similarity index 100% rename from tests/datasets/pose/test_coco_whole_body_hand_dataset.py rename to tests/test_datasets/pose/test_coco_whole_body_hand_dataset.py diff --git a/tests/datasets/pose/test_pose_top_down_dataset.py b/tests/test_datasets/pose/test_pose_top_down_dataset.py similarity index 100% rename from tests/datasets/pose/test_pose_top_down_dataset.py rename to tests/test_datasets/pose/test_pose_top_down_dataset.py diff --git a/tests/datasets/pose/test_wholebody_topdown_coco_dataset.py b/tests/test_datasets/pose/test_wholebody_topdown_coco_dataset.py similarity index 100% rename from tests/datasets/pose/test_wholebody_topdown_coco_dataset.py rename to tests/test_datasets/pose/test_wholebody_topdown_coco_dataset.py diff --git a/tests/datasets/segmentation/__init__.py b/tests/test_datasets/segmentation/__init__.py similarity index 100% rename from tests/datasets/segmentation/__init__.py rename to tests/test_datasets/segmentation/__init__.py diff --git a/tests/datasets/segmentation/data_sources/__init__.py b/tests/test_datasets/segmentation/data_sources/__init__.py similarity index 100% rename from tests/datasets/segmentation/data_sources/__init__.py rename to tests/test_datasets/segmentation/data_sources/__init__.py diff --git a/tests/datasets/segmentation/data_sources/test_seg_cityscapes_datasource.py b/tests/test_datasets/segmentation/data_sources/test_seg_cityscapes_datasource.py similarity index 100% rename from tests/datasets/segmentation/data_sources/test_seg_cityscapes_datasource.py rename to tests/test_datasets/segmentation/data_sources/test_seg_cityscapes_datasource.py diff --git a/tests/datasets/segmentation/data_sources/test_seg_coco_datasource.py b/tests/test_datasets/segmentation/data_sources/test_seg_coco_datasource.py similarity index 100% rename from tests/datasets/segmentation/data_sources/test_seg_coco_datasource.py rename to tests/test_datasets/segmentation/data_sources/test_seg_coco_datasource.py diff --git a/tests/datasets/segmentation/data_sources/test_seg_coco_stuff_datasource.py b/tests/test_datasets/segmentation/data_sources/test_seg_coco_stuff_datasource.py similarity index 100% rename from tests/datasets/segmentation/data_sources/test_seg_coco_stuff_datasource.py rename to tests/test_datasets/segmentation/data_sources/test_seg_coco_stuff_datasource.py diff --git a/tests/datasets/segmentation/data_sources/test_seg_raw_datasource.py b/tests/test_datasets/segmentation/data_sources/test_seg_raw_datasource.py similarity index 100% rename from tests/datasets/segmentation/data_sources/test_seg_raw_datasource.py rename to tests/test_datasets/segmentation/data_sources/test_seg_raw_datasource.py diff --git a/tests/datasets/segmentation/data_sources/test_seg_voc_datasource.py b/tests/test_datasets/segmentation/data_sources/test_seg_voc_datasource.py similarity index 100% rename from tests/datasets/segmentation/data_sources/test_seg_voc_datasource.py rename to tests/test_datasets/segmentation/data_sources/test_seg_voc_datasource.py diff --git a/tests/datasets/segmentation/test_seg_raw_dataset.py b/tests/test_datasets/segmentation/test_seg_raw_dataset.py similarity index 100% rename from tests/datasets/segmentation/test_seg_raw_dataset.py rename to tests/test_datasets/segmentation/test_seg_raw_dataset.py diff --git a/tests/datasets/selfsup/__init__.py b/tests/test_datasets/selfsup/__init__.py similarity index 100% rename from tests/datasets/selfsup/__init__.py rename to tests/test_datasets/selfsup/__init__.py diff --git a/tests/datasets/selfsup/data_sources/__init__.py b/tests/test_datasets/selfsup/data_sources/__init__.py similarity index 100% rename from tests/datasets/selfsup/data_sources/__init__.py rename to tests/test_datasets/selfsup/data_sources/__init__.py diff --git a/tests/datasets/selfsup/data_sources/test_ssl_image_list_datasource.py b/tests/test_datasets/selfsup/data_sources/test_ssl_image_list_datasource.py similarity index 100% rename from tests/datasets/selfsup/data_sources/test_ssl_image_list_datasource.py rename to tests/test_datasets/selfsup/data_sources/test_ssl_image_list_datasource.py diff --git a/tests/datasets/selfsup/data_sources/test_ssl_imagenet_feature_datasource.py b/tests/test_datasets/selfsup/data_sources/test_ssl_imagenet_feature_datasource.py similarity index 100% rename from tests/datasets/selfsup/data_sources/test_ssl_imagenet_feature_datasource.py rename to tests/test_datasets/selfsup/data_sources/test_ssl_imagenet_feature_datasource.py diff --git a/tests/datasets/shared/__init__.py b/tests/test_datasets/shared/__init__.py similarity index 100% rename from tests/datasets/shared/__init__.py rename to tests/test_datasets/shared/__init__.py diff --git a/tests/datasets/shared/pipelines/__init__.py b/tests/test_datasets/shared/pipelines/__init__.py similarity index 100% rename from tests/datasets/shared/pipelines/__init__.py rename to tests/test_datasets/shared/pipelines/__init__.py diff --git a/tests/datasets/shared/pipelines/test_transforms.py b/tests/test_datasets/shared/pipelines/test_transforms.py similarity index 100% rename from tests/datasets/shared/pipelines/test_transforms.py rename to tests/test_datasets/shared/pipelines/test_transforms.py diff --git a/tests/datasets/shared/test_dali_tfrecord_imagenet.py b/tests/test_datasets/shared/test_dali_tfrecord_imagenet.py similarity index 100% rename from tests/datasets/shared/test_dali_tfrecord_imagenet.py rename to tests/test_datasets/shared/test_dali_tfrecord_imagenet.py diff --git a/tests/datasets/shared/test_dali_tfrecord_multi_view.py b/tests/test_datasets/shared/test_dali_tfrecord_multi_view.py similarity index 100% rename from tests/datasets/shared/test_dali_tfrecord_multi_view.py rename to tests/test_datasets/shared/test_dali_tfrecord_multi_view.py diff --git a/tests/datasets/shared/test_multi_view.py b/tests/test_datasets/shared/test_multi_view.py similarity index 100% rename from tests/datasets/shared/test_multi_view.py rename to tests/test_datasets/shared/test_multi_view.py diff --git a/tests/datasets/shared/test_odps_dataset.py b/tests/test_datasets/shared/test_odps_dataset.py similarity index 100% rename from tests/datasets/shared/test_odps_dataset.py rename to tests/test_datasets/shared/test_odps_dataset.py diff --git a/tests/datasets/shared/test_raw.py b/tests/test_datasets/shared/test_raw.py similarity index 100% rename from tests/datasets/shared/test_raw.py rename to tests/test_datasets/shared/test_raw.py diff --git a/tests/datasets/shared/test_tfrecord_util.py b/tests/test_datasets/shared/test_tfrecord_util.py similarity index 100% rename from tests/datasets/shared/test_tfrecord_util.py rename to tests/test_datasets/shared/test_tfrecord_util.py diff --git a/tests/datasets/video_recognition/__init__.py b/tests/test_datasets/video_recognition/__init__.py similarity index 100% rename from tests/datasets/video_recognition/__init__.py rename to tests/test_datasets/video_recognition/__init__.py diff --git a/tests/datasets/video_recognition/test_videodataset.py b/tests/test_datasets/video_recognition/test_videodataset.py similarity index 100% rename from tests/datasets/video_recognition/test_videodataset.py rename to tests/test_datasets/video_recognition/test_videodataset.py diff --git a/tests/file/__init__.py b/tests/test_file/__init__.py similarity index 100% rename from tests/file/__init__.py rename to tests/test_file/__init__.py diff --git a/tests/file/test_file_io.py b/tests/test_file/test_file_io.py similarity index 100% rename from tests/file/test_file_io.py rename to tests/test_file/test_file_io.py diff --git a/tests/file/test_image.py b/tests/test_file/test_image.py similarity index 100% rename from tests/file/test_image.py rename to tests/test_file/test_image.py diff --git a/tests/framework/__init__.py b/tests/test_framework/__init__.py similarity index 100% rename from tests/framework/__init__.py rename to tests/test_framework/__init__.py diff --git a/tests/framework/test_errors.py b/tests/test_framework/test_errors.py similarity index 100% rename from tests/framework/test_errors.py rename to tests/test_framework/test_errors.py diff --git a/tests/hooks/__init__.py b/tests/test_hooks/__init__.py similarity index 100% rename from tests/hooks/__init__.py rename to tests/test_hooks/__init__.py diff --git a/tests/hooks/test_best_ckpt_saver_hook.py b/tests/test_hooks/test_best_ckpt_saver_hook.py similarity index 100% rename from tests/hooks/test_best_ckpt_saver_hook.py rename to tests/test_hooks/test_best_ckpt_saver_hook.py diff --git a/tests/hooks/test_byol_hook.py b/tests/test_hooks/test_byol_hook.py similarity index 100% rename from tests/hooks/test_byol_hook.py rename to tests/test_hooks/test_byol_hook.py diff --git a/tests/hooks/test_dino_hook.py b/tests/test_hooks/test_dino_hook.py similarity index 100% rename from tests/hooks/test_dino_hook.py rename to tests/test_hooks/test_dino_hook.py diff --git a/tests/hooks/test_ema_hook.py b/tests/test_hooks/test_ema_hook.py similarity index 100% rename from tests/hooks/test_ema_hook.py rename to tests/test_hooks/test_ema_hook.py diff --git a/tests/hooks/test_export_hook.py b/tests/test_hooks/test_export_hook.py similarity index 100% rename from tests/hooks/test_export_hook.py rename to tests/test_hooks/test_export_hook.py diff --git a/tests/hooks/test_oss_sync_hook.py b/tests/test_hooks/test_oss_sync_hook.py similarity index 100% rename from tests/hooks/test_oss_sync_hook.py rename to tests/test_hooks/test_oss_sync_hook.py diff --git a/tests/hooks/test_swav_hook.py b/tests/test_hooks/test_swav_hook.py similarity index 100% rename from tests/hooks/test_swav_hook.py rename to tests/test_hooks/test_swav_hook.py diff --git a/tests/hooks/test_sync_norm_hook.py b/tests/test_hooks/test_sync_norm_hook.py similarity index 100% rename from tests/hooks/test_sync_norm_hook.py rename to tests/test_hooks/test_sync_norm_hook.py diff --git a/tests/hooks/test_sync_random_size_hook.py b/tests/test_hooks/test_sync_random_size_hook.py similarity index 100% rename from tests/hooks/test_sync_random_size_hook.py rename to tests/test_hooks/test_sync_random_size_hook.py diff --git a/tests/models/__init__.py b/tests/test_models/__init__.py similarity index 100% rename from tests/models/__init__.py rename to tests/test_models/__init__.py diff --git a/tests/models/backbones/__init__.py b/tests/test_models/backbones/__init__.py similarity index 100% rename from tests/models/backbones/__init__.py rename to tests/test_models/backbones/__init__.py diff --git a/tests/models/backbones/test_benchmark_mlp.py b/tests/test_models/backbones/test_benchmark_mlp.py similarity index 100% rename from tests/models/backbones/test_benchmark_mlp.py rename to tests/test_models/backbones/test_benchmark_mlp.py diff --git a/tests/models/backbones/test_bninception.py b/tests/test_models/backbones/test_bninception.py similarity index 100% rename from tests/models/backbones/test_bninception.py rename to tests/test_models/backbones/test_bninception.py diff --git a/tests/models/backbones/test_deitiii.py b/tests/test_models/backbones/test_deitiii.py similarity index 100% rename from tests/models/backbones/test_deitiii.py rename to tests/test_models/backbones/test_deitiii.py diff --git a/tests/models/backbones/test_edgevit.py b/tests/test_models/backbones/test_edgevit.py similarity index 100% rename from tests/models/backbones/test_edgevit.py rename to tests/test_models/backbones/test_edgevit.py diff --git a/tests/models/backbones/test_efficientformer.py b/tests/test_models/backbones/test_efficientformer.py similarity index 100% rename from tests/models/backbones/test_efficientformer.py rename to tests/test_models/backbones/test_efficientformer.py diff --git a/tests/models/backbones/test_genet.py b/tests/test_models/backbones/test_genet.py similarity index 100% rename from tests/models/backbones/test_genet.py rename to tests/test_models/backbones/test_genet.py diff --git a/tests/models/backbones/test_hrnet.py b/tests/test_models/backbones/test_hrnet.py similarity index 100% rename from tests/models/backbones/test_hrnet.py rename to tests/test_models/backbones/test_hrnet.py diff --git a/tests/models/backbones/test_hydraAttention.py b/tests/test_models/backbones/test_hydraAttention.py similarity index 100% rename from tests/models/backbones/test_hydraAttention.py rename to tests/test_models/backbones/test_hydraAttention.py diff --git a/tests/models/backbones/test_inceptionv3.py b/tests/test_models/backbones/test_inceptionv3.py similarity index 100% rename from tests/models/backbones/test_inceptionv3.py rename to tests/test_models/backbones/test_inceptionv3.py diff --git a/tests/models/backbones/test_lighthrnet.py b/tests/test_models/backbones/test_lighthrnet.py similarity index 100% rename from tests/models/backbones/test_lighthrnet.py rename to tests/test_models/backbones/test_lighthrnet.py diff --git a/tests/models/backbones/test_mae_vit_transformer.py b/tests/test_models/backbones/test_mae_vit_transformer.py similarity index 100% rename from tests/models/backbones/test_mae_vit_transformer.py rename to tests/test_models/backbones/test_mae_vit_transformer.py diff --git a/tests/models/backbones/test_mnasnet.py b/tests/test_models/backbones/test_mnasnet.py similarity index 100% rename from tests/models/backbones/test_mnasnet.py rename to tests/test_models/backbones/test_mnasnet.py diff --git a/tests/models/backbones/test_mobilenetv2.py b/tests/test_models/backbones/test_mobilenetv2.py similarity index 100% rename from tests/models/backbones/test_mobilenetv2.py rename to tests/test_models/backbones/test_mobilenetv2.py diff --git a/tests/models/backbones/test_pytorch_image_models_wrapper.py b/tests/test_models/backbones/test_pytorch_image_models_wrapper.py similarity index 100% rename from tests/models/backbones/test_pytorch_image_models_wrapper.py rename to tests/test_models/backbones/test_pytorch_image_models_wrapper.py diff --git a/tests/models/backbones/test_resnest.py b/tests/test_models/backbones/test_resnest.py similarity index 100% rename from tests/models/backbones/test_resnest.py rename to tests/test_models/backbones/test_resnest.py diff --git a/tests/models/backbones/test_resnet.py b/tests/test_models/backbones/test_resnet.py similarity index 100% rename from tests/models/backbones/test_resnet.py rename to tests/test_models/backbones/test_resnet.py diff --git a/tests/models/backbones/test_resnext.py b/tests/test_models/backbones/test_resnext.py similarity index 100% rename from tests/models/backbones/test_resnext.py rename to tests/test_models/backbones/test_resnext.py diff --git a/tests/models/backbones/test_swintransformer3d.py b/tests/test_models/backbones/test_swintransformer3d.py similarity index 100% rename from tests/models/backbones/test_swintransformer3d.py rename to tests/test_models/backbones/test_swintransformer3d.py diff --git a/tests/models/backbones/test_vitdet.py b/tests/test_models/backbones/test_vitdet.py similarity index 100% rename from tests/models/backbones/test_vitdet.py rename to tests/test_models/backbones/test_vitdet.py diff --git a/tests/models/backbones/test_x3d.py b/tests/test_models/backbones/test_x3d.py similarity index 100% rename from tests/models/backbones/test_x3d.py rename to tests/test_models/backbones/test_x3d.py diff --git a/tests/models/classification/__init__.py b/tests/test_models/classification/__init__.py similarity index 100% rename from tests/models/classification/__init__.py rename to tests/test_models/classification/__init__.py diff --git a/tests/models/classification/test_classification.py b/tests/test_models/classification/test_classification.py similarity index 100% rename from tests/models/classification/test_classification.py rename to tests/test_models/classification/test_classification.py diff --git a/tests/models/detection/__init__.py b/tests/test_models/detection/__init__.py similarity index 100% rename from tests/models/detection/__init__.py rename to tests/test_models/detection/__init__.py diff --git a/tests/models/detection/detr/__init__.py b/tests/test_models/detection/detr/__init__.py similarity index 100% rename from tests/models/detection/detr/__init__.py rename to tests/test_models/detection/detr/__init__.py diff --git a/tests/models/detection/detr/test_detr.py b/tests/test_models/detection/detr/test_detr.py similarity index 100% rename from tests/models/detection/detr/test_detr.py rename to tests/test_models/detection/detr/test_detr.py diff --git a/tests/models/detection/fcos/__init__.py b/tests/test_models/detection/fcos/__init__.py similarity index 100% rename from tests/models/detection/fcos/__init__.py rename to tests/test_models/detection/fcos/__init__.py diff --git a/tests/models/detection/fcos/test_fcos.py b/tests/test_models/detection/fcos/test_fcos.py similarity index 100% rename from tests/models/detection/fcos/test_fcos.py rename to tests/test_models/detection/fcos/test_fcos.py diff --git a/tests/models/detection/yolox/__init__.py b/tests/test_models/detection/yolox/__init__.py similarity index 100% rename from tests/models/detection/yolox/__init__.py rename to tests/test_models/detection/yolox/__init__.py diff --git a/tests/models/detection/yolox/test_yolox.py b/tests/test_models/detection/yolox/test_yolox.py similarity index 100% rename from tests/models/detection/yolox/test_yolox.py rename to tests/test_models/detection/yolox/test_yolox.py diff --git a/tests/models/detection/yolox_edge/__init__.py b/tests/test_models/detection/yolox_edge/__init__.py similarity index 100% rename from tests/models/detection/yolox_edge/__init__.py rename to tests/test_models/detection/yolox_edge/__init__.py diff --git a/tests/models/detection/yolox_edge/test_yolox_edge.py b/tests/test_models/detection/yolox_edge/test_yolox_edge.py similarity index 100% rename from tests/models/detection/yolox_edge/test_yolox_edge.py rename to tests/test_models/detection/yolox_edge/test_yolox_edge.py diff --git a/tests/models/detection3d/__init__.py b/tests/test_models/detection3d/__init__.py similarity index 100% rename from tests/models/detection3d/__init__.py rename to tests/test_models/detection3d/__init__.py diff --git a/tests/models/detection3d/test_bevformer.py b/tests/test_models/detection3d/test_bevformer.py similarity index 100% rename from tests/models/detection3d/test_bevformer.py rename to tests/test_models/detection3d/test_bevformer.py diff --git a/tests/models/heads/__init__.py b/tests/test_models/heads/__init__.py similarity index 100% rename from tests/models/heads/__init__.py rename to tests/test_models/heads/__init__.py diff --git a/tests/models/heads/test_cls_head.py b/tests/test_models/heads/test_cls_head.py similarity index 100% rename from tests/models/heads/test_cls_head.py rename to tests/test_models/heads/test_cls_head.py diff --git a/tests/models/pose/__init__.py b/tests/test_models/pose/__init__.py similarity index 100% rename from tests/models/pose/__init__.py rename to tests/test_models/pose/__init__.py diff --git a/tests/models/pose/test_top_down.py b/tests/test_models/pose/test_top_down.py similarity index 100% rename from tests/models/pose/test_top_down.py rename to tests/test_models/pose/test_top_down.py diff --git a/tests/models/segmentation/__init__.py b/tests/test_models/segmentation/__init__.py similarity index 100% rename from tests/models/segmentation/__init__.py rename to tests/test_models/segmentation/__init__.py diff --git a/tests/models/segmentation/heads/__init__.py b/tests/test_models/segmentation/heads/__init__.py similarity index 100% rename from tests/models/segmentation/heads/__init__.py rename to tests/test_models/segmentation/heads/__init__.py diff --git a/tests/models/segmentation/heads/test_fcn_head.py b/tests/test_models/segmentation/heads/test_fcn_head.py similarity index 100% rename from tests/models/segmentation/heads/test_fcn_head.py rename to tests/test_models/segmentation/heads/test_fcn_head.py diff --git a/tests/models/segmentation/heads/test_uper_head.py b/tests/test_models/segmentation/heads/test_uper_head.py similarity index 100% rename from tests/models/segmentation/heads/test_uper_head.py rename to tests/test_models/segmentation/heads/test_uper_head.py diff --git a/tests/models/segmentation/test_stdc.py b/tests/test_models/segmentation/test_stdc.py similarity index 100% rename from tests/models/segmentation/test_stdc.py rename to tests/test_models/segmentation/test_stdc.py diff --git a/tests/models/selfsup/__init__.py b/tests/test_models/selfsup/__init__.py similarity index 100% rename from tests/models/selfsup/__init__.py rename to tests/test_models/selfsup/__init__.py diff --git a/tests/models/selfsup/test_byol.py b/tests/test_models/selfsup/test_byol.py similarity index 100% rename from tests/models/selfsup/test_byol.py rename to tests/test_models/selfsup/test_byol.py diff --git a/tests/models/selfsup/test_dino.py b/tests/test_models/selfsup/test_dino.py similarity index 100% rename from tests/models/selfsup/test_dino.py rename to tests/test_models/selfsup/test_dino.py diff --git a/tests/models/selfsup/test_mae.py b/tests/test_models/selfsup/test_mae.py similarity index 100% rename from tests/models/selfsup/test_mae.py rename to tests/test_models/selfsup/test_mae.py diff --git a/tests/models/selfsup/test_mixco.py b/tests/test_models/selfsup/test_mixco.py similarity index 100% rename from tests/models/selfsup/test_mixco.py rename to tests/test_models/selfsup/test_mixco.py diff --git a/tests/models/selfsup/test_moby.py b/tests/test_models/selfsup/test_moby.py similarity index 100% rename from tests/models/selfsup/test_moby.py rename to tests/test_models/selfsup/test_moby.py diff --git a/tests/models/selfsup/test_moco.py b/tests/test_models/selfsup/test_moco.py similarity index 100% rename from tests/models/selfsup/test_moco.py rename to tests/test_models/selfsup/test_moco.py diff --git a/tests/models/selfsup/test_simclr.py b/tests/test_models/selfsup/test_simclr.py similarity index 100% rename from tests/models/selfsup/test_simclr.py rename to tests/test_models/selfsup/test_simclr.py diff --git a/tests/models/selfsup/test_swav.py b/tests/test_models/selfsup/test_swav.py similarity index 100% rename from tests/models/selfsup/test_swav.py rename to tests/test_models/selfsup/test_swav.py diff --git a/tests/models/video_recognition/__init__.py b/tests/test_models/video_recognition/__init__.py similarity index 100% rename from tests/models/video_recognition/__init__.py rename to tests/test_models/video_recognition/__init__.py diff --git a/tests/models/video_recognition/test_recognizer3d.py b/tests/test_models/video_recognition/test_recognizer3d.py similarity index 100% rename from tests/models/video_recognition/test_recognizer3d.py rename to tests/test_models/video_recognition/test_recognizer3d.py diff --git a/tests/models/video_recognition/test_stdcn.py b/tests/test_models/video_recognition/test_stdcn.py similarity index 100% rename from tests/models/video_recognition/test_stdcn.py rename to tests/test_models/video_recognition/test_stdcn.py diff --git a/tests/predictors/__init__.py b/tests/test_predictors/__init__.py similarity index 100% rename from tests/predictors/__init__.py rename to tests/test_predictors/__init__.py diff --git a/tests/predictors/test_bevformer_predictor.py b/tests/test_predictors/test_bevformer_predictor.py similarity index 100% rename from tests/predictors/test_bevformer_predictor.py rename to tests/test_predictors/test_bevformer_predictor.py diff --git a/tests/predictors/test_classifier.py b/tests/test_predictors/test_classifier.py similarity index 100% rename from tests/predictors/test_classifier.py rename to tests/test_predictors/test_classifier.py diff --git a/tests/predictors/test_detector.py b/tests/test_predictors/test_detector.py similarity index 100% rename from tests/predictors/test_detector.py rename to tests/test_predictors/test_detector.py diff --git a/tests/predictors/test_detector_blade.py b/tests/test_predictors/test_detector_blade.py similarity index 100% rename from tests/predictors/test_detector_blade.py rename to tests/test_predictors/test_detector_blade.py diff --git a/tests/predictors/test_detector_easy_infer.py b/tests/test_predictors/test_detector_easy_infer.py similarity index 100% rename from tests/predictors/test_detector_easy_infer.py rename to tests/test_predictors/test_detector_easy_infer.py diff --git a/tests/predictors/test_face_keypoints_predictor.py b/tests/test_predictors/test_face_keypoints_predictor.py similarity index 100% rename from tests/predictors/test_face_keypoints_predictor.py rename to tests/test_predictors/test_face_keypoints_predictor.py diff --git a/tests/predictors/test_feature_extractor.py b/tests/test_predictors/test_feature_extractor.py similarity index 100% rename from tests/predictors/test_feature_extractor.py rename to tests/test_predictors/test_feature_extractor.py diff --git a/tests/predictors/test_hand_keypoints_predictor.py b/tests/test_predictors/test_hand_keypoints_predictor.py similarity index 100% rename from tests/predictors/test_hand_keypoints_predictor.py rename to tests/test_predictors/test_hand_keypoints_predictor.py diff --git a/tests/predictors/test_mot_predictor.py b/tests/test_predictors/test_mot_predictor.py similarity index 100% rename from tests/predictors/test_mot_predictor.py rename to tests/test_predictors/test_mot_predictor.py diff --git a/tests/predictors/test_ocr_predictor.py b/tests/test_predictors/test_ocr_predictor.py similarity index 100% rename from tests/predictors/test_ocr_predictor.py rename to tests/test_predictors/test_ocr_predictor.py diff --git a/tests/predictors/test_pose_predictor.py b/tests/test_predictors/test_pose_predictor.py similarity index 100% rename from tests/predictors/test_pose_predictor.py rename to tests/test_predictors/test_pose_predictor.py diff --git a/tests/predictors/test_reid_predictor.py b/tests/test_predictors/test_reid_predictor.py similarity index 100% rename from tests/predictors/test_reid_predictor.py rename to tests/test_predictors/test_reid_predictor.py diff --git a/tests/predictors/test_segmentation.py b/tests/test_predictors/test_segmentation.py similarity index 100% rename from tests/predictors/test_segmentation.py rename to tests/test_predictors/test_segmentation.py diff --git a/tests/predictors/test_video_classifier.py b/tests/test_predictors/test_video_classifier.py similarity index 100% rename from tests/predictors/test_video_classifier.py rename to tests/test_predictors/test_video_classifier.py diff --git a/tests/predictors/test_wholebody_keypoints_predictor.py b/tests/test_predictors/test_wholebody_keypoints_predictor.py similarity index 100% rename from tests/predictors/test_wholebody_keypoints_predictor.py rename to tests/test_predictors/test_wholebody_keypoints_predictor.py diff --git a/tests/toolkit/__init__.py b/tests/test_toolkit/__init__.py similarity index 100% rename from tests/toolkit/__init__.py rename to tests/test_toolkit/__init__.py diff --git a/tests/toolkit/torchacc/__init__.py b/tests/test_toolkit/modelscope/__init__.py similarity index 100% rename from tests/toolkit/torchacc/__init__.py rename to tests/test_toolkit/modelscope/__init__.py diff --git a/tests/tools/__init__.py b/tests/test_toolkit/modelscope/msdatasets/__init__.py similarity index 100% rename from tests/tools/__init__.py rename to tests/test_toolkit/modelscope/msdatasets/__init__.py diff --git a/tests/test_toolkit/modelscope/msdatasets/test_ms_dataset.py b/tests/test_toolkit/modelscope/msdatasets/test_ms_dataset.py new file mode 100644 index 00000000..bd11a5c7 --- /dev/null +++ b/tests/test_toolkit/modelscope/msdatasets/test_ms_dataset.py @@ -0,0 +1,25 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import unittest + +from modelscope.msdatasets import MsDataset +from modelscope.utils.constant import DownloadMode + + +class MsDatasetTest(unittest.TestCase): + + def setUp(self): + print(('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + + def test_streaming_load_coco(self): + small_coco_for_test = MsDataset.load( + dataset_name='EasyCV/small_coco_for_test', + split='train', + use_streaming=True, + download_mode=DownloadMode.FORCE_REDOWNLOAD) + dataset_sample_dict = next(iter(small_coco_for_test)) + print(dataset_sample_dict) + assert dataset_sample_dict.values() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/utils/__init__.py b/tests/test_toolkit/modelscope/pipelines/__init__.py similarity index 100% rename from tests/utils/__init__.py rename to tests/test_toolkit/modelscope/pipelines/__init__.py diff --git a/tests/test_toolkit/modelscope/pipelines/test_panoptic_segmentation_pipeline.py b/tests/test_toolkit/modelscope/pipelines/test_panoptic_segmentation_pipeline.py new file mode 100644 index 00000000..4de349b1 --- /dev/null +++ b/tests/test_toolkit/modelscope/pipelines/test_panoptic_segmentation_pipeline.py @@ -0,0 +1,41 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os +import tempfile +import unittest + +import cv2 +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.utils.cv.image_utils import panoptic_seg_masks_to_image +from modelscope.utils.demo_utils import DemoCompatibilityCheck +from modelscope.utils.test_utils import test_level +from tests.ut_config import BASE_LOCAL_PATH + + +class EasyCVPanopticSegmentationPipelineTest(unittest.TestCase, + DemoCompatibilityCheck): + img_path = os.path.join( + BASE_LOCAL_PATH, 'data/test_images/image_semantic_segmentation.jpg') + + def setUp(self) -> None: + self.task = Tasks.image_segmentation + self.model_id = 'damo/cv_r50_panoptic-segmentation_cocopan' + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_r50(self): + segmentor = pipeline(task=self.task, model=self.model_id) + outputs = segmentor(self.img_path) + draw_img = panoptic_seg_masks_to_image(outputs[OutputKeys.MASKS]) + with tempfile.NamedTemporaryFile(suffix='.jpg') as tmp_file: + tmp_save_path = tmp_file.name + cv2.imwrite(tmp_save_path, draw_img) + print('print ' + self.model_id + ' success') + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_demo_compatibility(self): + self.compatibility_check() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/pipelines/test_segmentation_pipeline.py b/tests/test_toolkit/modelscope/pipelines/test_segmentation_pipeline.py new file mode 100644 index 00000000..5e2ac3ca --- /dev/null +++ b/tests/test_toolkit/modelscope/pipelines/test_segmentation_pipeline.py @@ -0,0 +1,91 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os +import tempfile +import unittest + +import cv2 +import numpy as np +from modelscope.outputs import OutputKeys +from modelscope.pipelines import pipeline +from modelscope.utils.constant import Tasks +from modelscope.utils.cv.image_utils import semantic_seg_masks_to_image +from modelscope.utils.demo_utils import DemoCompatibilityCheck +from modelscope.utils.test_utils import test_level +from PIL import Image +from tests.ut_config import BASE_LOCAL_PATH + + +class EasyCVSegmentationPipelineTest(unittest.TestCase, + DemoCompatibilityCheck): + img_path = os.path.join(BASE_LOCAL_PATH, + 'data/test_images/image_segmentation.jpg') + + def setUp(self) -> None: + self.task = Tasks.image_segmentation + self.model_id = 'damo/cv_segformer-b0_image_semantic-segmentation_coco-stuff164k' + + def _internal_test_(self, model_id): + semantic_seg = pipeline(task=Tasks.image_segmentation, model=model_id) + outputs = semantic_seg(self.img_path) + + draw_img = semantic_seg_masks_to_image(outputs[OutputKeys.MASKS]) + with tempfile.NamedTemporaryFile(suffix='.jpg') as tmp_file: + tmp_save_path = tmp_file.name + cv2.imwrite(tmp_save_path, draw_img) + print('test ' + model_id + ' DONE') + + def _internal_test_batch_(self, model_id, num_samples=2, batch_size=2): + # TODO: support in the future + img = np.asarray(Image.open(self.img_path)) + num_samples = num_samples + batch_size = batch_size + semantic_seg = pipeline( + task=Tasks.image_segmentation, + model=model_id, + batch_size=batch_size) + outputs = semantic_seg([self.img_path] * num_samples) + + self.assertEqual(semantic_seg.predict_op.batch_size, batch_size) + self.assertEqual(len(outputs), num_samples) + + for output in outputs: + self.assertListEqual( + list(img.shape)[:2], list(output['seg_pred'].shape)) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_segformer_b0(self): + model_id = 'damo/cv_segformer-b0_image_semantic-segmentation_coco-stuff164k' + self._internal_test_(model_id) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_segformer_b1(self): + model_id = 'damo/cv_segformer-b1_image_semantic-segmentation_coco-stuff164k' + self._internal_test_(model_id) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_segformer_b2(self): + model_id = 'damo/cv_segformer-b2_image_semantic-segmentation_coco-stuff164k' + self._internal_test_(model_id) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_segformer_b3(self): + model_id = 'damo/cv_segformer-b3_image_semantic-segmentation_coco-stuff164k' + self._internal_test_(model_id) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_segformer_b4(self): + model_id = 'damo/cv_segformer-b4_image_semantic-segmentation_coco-stuff164k' + self._internal_test_(model_id) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_segformer_b5(self): + model_id = 'damo/cv_segformer-b5_image_semantic-segmentation_coco-stuff164k' + self._internal_test_(model_id) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_demo_compatibility(self): + self.compatibility_check() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/__init__.py b/tests/test_toolkit/modelscope/trainers/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer.py new file mode 100644 index 00000000..0d80b297 --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer.py @@ -0,0 +1,243 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import json +import os +import shutil +import tempfile +import unittest + +import torch +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.config import Config +from modelscope.utils.constant import LogKeys, ModeKeys, Tasks +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import DistributedTestCase, test_level +from modelscope.utils.torch_utils import is_master + +from easycv.toolkit.modelscope.metainfo import EasyCVModels as Models +from easycv.toolkit.modelscope.metainfo import EasyCVPipelines as Pipelines + + +def train_func(work_dir, dist=False, log_interval=3, imgs_per_gpu=4): + import easycv + + easycv_dir = os.path.dirname(easycv.__file__) + config_file = 'configs/detection/yolox/yolox_s_8xb16_300e_coco.py' + if os.path.exists(os.path.join(easycv_dir, config_file)): + config_path = os.path.join(easycv_dir, config_file) + else: + config_path = os.path.join(os.path.dirname(easycv_dir), config_file) + + cfg = Config.from_file(config_path) + + cfg.log_config.update( + dict(hooks=[ + dict(type='TextLoggerHook'), + dict(type='TensorboardLoggerHook') + ])) # not support TensorboardLoggerHookV2 + + ms_cfg_file = os.path.join(work_dir, 'ms_yolox_s_8xb16_300e_coco.json') + from easycv.utils.ms_utils import to_ms_config + + if is_master(): + to_ms_config( + cfg, + dump=True, + task=Tasks.image_object_detection, + ms_model_name=Models.yolox, + pipeline_name=Pipelines.easycv_detection, + save_path=ms_cfg_file) + + trainer_name = 'easycv' + train_dataset = MsDataset.load( + dataset_name='small_coco_for_test', namespace='EasyCV', split='train') + eval_dataset = MsDataset.load( + dataset_name='small_coco_for_test', + namespace='EasyCV', + split='validation') + + cfg_options = { + 'train.max_epochs': + 2, + 'train.dataloader.batch_size_per_gpu': + imgs_per_gpu, + 'evaluation.dataloader.batch_size_per_gpu': + 2, + 'train.hooks': [ + { + 'type': 'CheckpointHook', + 'interval': 1 + }, + { + 'type': 'EvaluationHook', + 'interval': 1 + }, + { + 'type': 'TextLoggerHook', + 'ignore_rounding_keys': None, + 'interval': log_interval + }, + ] + } + kwargs = dict( + cfg_file=ms_cfg_file, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=work_dir, + cfg_options=cfg_options, + launcher='pytorch' if dist else None) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestSingleGpu(unittest.TestCase): + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmp_dir, ignore_errors=True) + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_single_gpu(self): + train_func(self.tmp_dir) + + results_files = os.listdir(self.tmp_dir) + json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + + with open(json_files[0], 'r', encoding='utf-8') as f: + lines = [i.strip() for i in f.readlines()] + + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.TRAIN, + LogKeys.EPOCH: 1, + LogKeys.ITER: 3, + LogKeys.LR: 0.00029 + }, json.loads(lines[0])) + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.EVAL, + LogKeys.EPOCH: 1, + LogKeys.ITER: 10 + }, json.loads(lines[1])) + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.TRAIN, + LogKeys.EPOCH: 2, + LogKeys.ITER: 3, + LogKeys.LR: 0.00205 + }, json.loads(lines[2])) + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.EVAL, + LogKeys.EPOCH: 2, + LogKeys.ITER: 10 + }, json.loads(lines[3])) + self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files) + self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files) + for i in [0, 2]: + self.assertIn(LogKeys.DATA_LOAD_TIME, lines[i]) + self.assertIn(LogKeys.ITER_TIME, lines[i]) + self.assertIn(LogKeys.MEMORY, lines[i]) + self.assertIn('total_loss', lines[i]) + for i in [1, 3]: + self.assertIn( + 'CocoDetectionEvaluator_DetectionBoxes_Precision/mAP', + lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP', lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP@.50IOU', lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP@.75IOU', lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP (small)', lines[i]) + + +@unittest.skipIf(not torch.cuda.is_available() + or torch.cuda.device_count() <= 1, 'distributed unittest') +class EasyCVTrainerTestMultiGpus(DistributedTestCase): + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmp_dir, ignore_errors=True) + + @unittest.skipUnless(False, 'skip test in current test level') + def test_multi_gpus(self): + self.start( + train_func, + num_gpus=2, + work_dir=self.tmp_dir, + dist=True, + log_interval=2, + imgs_per_gpu=5) + + results_files = os.listdir(self.tmp_dir) + json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + + with open(json_files[0], 'r', encoding='utf-8') as f: + lines = [i.strip() for i in f.readlines()] + + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.TRAIN, + LogKeys.EPOCH: 1, + LogKeys.ITER: 2, + LogKeys.LR: 0.0002 + }, json.loads(lines[0])) + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.EVAL, + LogKeys.EPOCH: 1, + LogKeys.ITER: 5 + }, json.loads(lines[1])) + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.TRAIN, + LogKeys.EPOCH: 2, + LogKeys.ITER: 2, + LogKeys.LR: 0.0018 + }, json.loads(lines[2])) + self.assertDictContainsSubset( + { + LogKeys.MODE: ModeKeys.EVAL, + LogKeys.EPOCH: 2, + LogKeys.ITER: 5 + }, json.loads(lines[3])) + + self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files) + self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files) + + for i in [0, 2]: + self.assertIn(LogKeys.DATA_LOAD_TIME, lines[i]) + self.assertIn(LogKeys.ITER_TIME, lines[i]) + self.assertIn(LogKeys.MEMORY, lines[i]) + self.assertIn('total_loss', lines[i]) + for i in [1, 3]: + self.assertIn( + 'CocoDetectionEvaluator_DetectionBoxes_Precision/mAP', + lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP', lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP@.50IOU', lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP@.75IOU', lines[i]) + self.assertIn('DetectionBoxes_Precision/mAP (small)', lines[i]) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_detection_dino.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_detection_dino.py new file mode 100644 index 00000000..7717cc01 --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_detection_dino.py @@ -0,0 +1,67 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import tempfile +import unittest + +import torch +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import LogKeys +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import test_level + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestDetectionDino(unittest.TestCase): + model_id = 'damo/cv_swinl_image-object-detection_dino' + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + + def _train(self, tmp_dir): + cfg_options = {'train.max_epochs': 1} + + trainer_name = 'easycv' + + train_dataset = MsDataset.load( + dataset_name='small_coco_for_test', + namespace='EasyCV', + split='train') + eval_dataset = MsDataset.load( + dataset_name='small_coco_for_test', + namespace='EasyCV', + split='validation') + + kwargs = dict( + model=self.model_id, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=tmp_dir, + use_fp16=True, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_trainer_single_gpu(self): + temp_file_dir = tempfile.TemporaryDirectory() + tmp_dir = temp_file_dir.name + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + self._train(tmp_dir) + + results_files = os.listdir(tmp_dir) + json_files = glob.glob(os.path.join(tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files) + + temp_file_dir.cleanup() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_face_2d_keypoints.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_face_2d_keypoints.py new file mode 100644 index 00000000..037dd07d --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_face_2d_keypoints.py @@ -0,0 +1,68 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import tempfile +import unittest + +import torch +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import DownloadMode, LogKeys +from modelscope.utils.logger import get_logger + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestFace2DKeypoints(unittest.TestCase): + model_id = 'damo/cv_mobilenet_face-2d-keypoints_alignment' + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + + def _train(self, tmp_dir): + cfg_options = {'train.max_epochs': 2} + + trainer_name = 'easycv' + + train_dataset = MsDataset.load( + dataset_name='face_2d_keypoints_dataset', + namespace='modelscope', + split='train', + download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) + eval_dataset = MsDataset.load( + dataset_name='face_2d_keypoints_dataset', + namespace='modelscope', + split='train', + download_mode=DownloadMode.REUSE_DATASET_IF_EXISTS) + + kwargs = dict( + model=self.model_id, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=tmp_dir, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + @unittest.skip( + 'skip since face_2d_keypoints_dataset is set to private for now') + def test_trainer_single_gpu(self): + temp_file_dir = tempfile.TemporaryDirectory() + tmp_dir = temp_file_dir.name + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + self._train(tmp_dir) + + results_files = os.listdir(tmp_dir) + json_files = glob.glob(os.path.join(tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files) + + temp_file_dir.cleanup() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_hand_2d_keypoints.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_hand_2d_keypoints.py new file mode 100644 index 00000000..1184747b --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_hand_2d_keypoints.py @@ -0,0 +1,70 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import shutil +import tempfile +import unittest + +import torch +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import DownloadMode, LogKeys +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import test_level + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestHand2dKeypoints(unittest.TestCase): + model_id = 'damo/cv_hrnetw18_hand-pose-keypoints_coco-wholebody' + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmp_dir, ignore_errors=True) + + def _train(self): + cfg_options = {'train.max_epochs': 20} + + trainer_name = 'easycv' + + train_dataset = MsDataset.load( + dataset_name='cv_hand_2d_keypoints_coco_wholebody', + namespace='chenhyer', + split='subtrain', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + eval_dataset = MsDataset.load( + dataset_name='cv_hand_2d_keypoints_coco_wholebody', + namespace='chenhyer', + split='subtrain', + download_mode=DownloadMode.FORCE_REDOWNLOAD) + + kwargs = dict( + model=self.model_id, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=self.tmp_dir, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_trainer_single_gpu(self): + self._train() + + results_files = os.listdir(self.tmp_dir) + json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + self.assertIn(f'{LogKeys.EPOCH}_10.pth', results_files) + self.assertIn(f'{LogKeys.EPOCH}_20.pth', results_files) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_hand_detection.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_hand_detection.py new file mode 100644 index 00000000..15ce84fb --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_hand_detection.py @@ -0,0 +1,58 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import os +import tempfile +import unittest + +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import LogKeys +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import test_level + + +class EasyCVTrainerTestHandDetection(unittest.TestCase): + model_id = 'damo/cv_yolox-pai_hand-detection' + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + + def _train(self, tmp_dir): + cfg_options = {'train.max_epochs': 2} + + trainer_name = 'easycv' + + train_dataset = MsDataset.load( + dataset_name='hand_detection_dataset', split='subtrain') + eval_dataset = MsDataset.load( + dataset_name='hand_detection_dataset', split='subtrain') + + kwargs = dict( + model=self.model_id, + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=tmp_dir, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + @unittest.skipUnless(test_level() >= 1, 'skip test in current test level') + def test_trainer_single_gpu(self): + temp_file_dir = tempfile.TemporaryDirectory() + tmp_dir = temp_file_dir.name + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + self._train(tmp_dir) + + results_files = os.listdir(tmp_dir) + # json_files = glob.glob(os.path.join(tmp_dir, '*.log.json')) + self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files) + + temp_file_dir.cleanup() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_panoptic_mask2former.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_panoptic_mask2former.py new file mode 100644 index 00000000..eedf2e93 --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_panoptic_mask2former.py @@ -0,0 +1,68 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import shutil +import tempfile +import unittest + +import torch +from mmcv.runner.hooks import HOOKS as MMCV_HOOKS +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import LogKeys +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import test_level + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestPanopticMask2Former(unittest.TestCase): + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmp_dir, ignore_errors=True) + + def _train(self): + cfg_options = {'train.max_epochs': 1} + + trainer_name = 'easycv' + + train_dataset = MsDataset.load( + dataset_name='COCO2017_panopic_subset', split='train') + eval_dataset = MsDataset.load( + dataset_name='COCO2017_panopic_subset', split='validation') + kwargs = dict( + model='damo/cv_r50_panoptic-segmentation_cocopan', + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=self.tmp_dir, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + + hook_name = 'YOLOXLrUpdaterHook' + mmcv_hook = MMCV_HOOKS._module_dict.pop(hook_name, None) + + trainer.train() + + MMCV_HOOKS._module_dict[hook_name] = mmcv_hook + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_single_gpu_mask2former_r50(self): + self._train() + + results_files = os.listdir(self.tmp_dir) + json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_realtime_object_detection.py b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_realtime_object_detection.py new file mode 100644 index 00000000..88f8f298 --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_easycv_trainer_realtime_object_detection.py @@ -0,0 +1,96 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import tempfile +import unittest + +import torch +from modelscope.hub.snapshot_download import snapshot_download +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import LogKeys +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import test_level + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestRealtimeObjectDetection(unittest.TestCase): + model_id = 'damo/cv_cspnet_image-object-detection_yolox' + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + + def _train(self, tmp_dir): + # cfg_options = {'train.max_epochs': 2} + self.cache_path = snapshot_download(self.model_id) + cfg_options = { + 'train.max_epochs': + 2, + 'train.dataloader.batch_size_per_gpu': + 4, + 'evaluation.dataloader.batch_size_per_gpu': + 2, + 'train.hooks': [ + { + 'type': 'CheckpointHook', + 'interval': 1 + }, + { + 'type': 'EvaluationHook', + 'interval': 1 + }, + { + 'type': 'TextLoggerHook', + 'ignore_rounding_keys': None, + 'interval': 2 + }, + ], + 'load_from': + os.path.join(self.cache_path, 'pytorch_model.bin') + } + + trainer_name = 'easycv' + + train_dataset = MsDataset.load( + dataset_name='small_coco_for_test', + namespace='EasyCV', + split='train') + eval_dataset = MsDataset.load( + dataset_name='small_coco_for_test', + namespace='EasyCV', + split='validation') + + kwargs = dict( + model=self.model_id, + # model_revision='v1.0.2', + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=tmp_dir, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + @unittest.skipUnless( + test_level() >= 0, + 'skip since face_2d_keypoints_dataset is set to private for now') + def test_trainer_single_gpu(self): + temp_file_dir = tempfile.TemporaryDirectory() + tmp_dir = temp_file_dir.name + if not os.path.exists(tmp_dir): + os.makedirs(tmp_dir) + + self._train(tmp_dir) + + results_files = os.listdir(tmp_dir) + json_files = glob.glob(os.path.join(tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files) + + temp_file_dir.cleanup() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/modelscope/trainers/test_segformer.py b/tests/test_toolkit/modelscope/trainers/test_segformer.py new file mode 100644 index 00000000..2c89e8c7 --- /dev/null +++ b/tests/test_toolkit/modelscope/trainers/test_segformer.py @@ -0,0 +1,70 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. +import glob +import os +import shutil +import tempfile +import unittest + +import torch +from modelscope.msdatasets import MsDataset +from modelscope.trainers import build_trainer +from modelscope.utils.constant import LogKeys +from modelscope.utils.logger import get_logger +from modelscope.utils.test_utils import test_level + + +@unittest.skipIf(not torch.cuda.is_available(), 'cuda unittest') +class EasyCVTrainerTestSegformer(unittest.TestCase): + + def setUp(self): + self.logger = get_logger() + self.logger.info( + ('Testing %s.%s' % (type(self).__name__, self._testMethodName))) + self.tmp_dir = tempfile.TemporaryDirectory().name + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) + + def tearDown(self): + super().tearDown() + shutil.rmtree(self.tmp_dir, ignore_errors=True) + + def _train(self): + + cfg_options = { + 'train.max_epochs': 2, + 'model.decode_head.norm_cfg.type': 'BN' + } + + trainer_name = 'easycv' + train_dataset = MsDataset.load( + dataset_name='small_coco_stuff164k', + namespace='EasyCV', + split='train') + eval_dataset = MsDataset.load( + dataset_name='small_coco_stuff164k', + namespace='EasyCV', + split='validation') + kwargs = dict( + model= + 'damo/cv_segformer-b0_image_semantic-segmentation_coco-stuff164k', + train_dataset=train_dataset, + eval_dataset=eval_dataset, + work_dir=self.tmp_dir, + cfg_options=cfg_options) + + trainer = build_trainer(trainer_name, kwargs) + trainer.train() + + @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') + def test_single_gpu_segformer(self): + self._train() + + results_files = os.listdir(self.tmp_dir) + json_files = glob.glob(os.path.join(self.tmp_dir, '*.log.json')) + self.assertEqual(len(json_files), 1) + self.assertIn(f'{LogKeys.EPOCH}_1.pth', results_files) + self.assertIn(f'{LogKeys.EPOCH}_2.pth', results_files) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/test_toolkit/torchacc/__init__.py b/tests/test_toolkit/torchacc/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/toolkit/torchacc/test_convert_ops.py b/tests/test_toolkit/torchacc/test_convert_ops.py similarity index 100% rename from tests/toolkit/torchacc/test_convert_ops.py rename to tests/test_toolkit/torchacc/test_convert_ops.py diff --git a/tests/test_tools/__init__.py b/tests/test_tools/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/tools/test_classification_train.py b/tests/test_tools/test_classification_train.py similarity index 95% rename from tests/tools/test_classification_train.py rename to tests/test_tools/test_classification_train.py index 0637ec7c..621dc188 100644 --- a/tests/tools/test_classification_train.py +++ b/tests/test_tools/test_classification_train.py @@ -98,7 +98,9 @@ def _base_train(self, train_cfgs, adapt_pai=False): (tmp_cfg_file, work_dir, args_str) logging.info('run command: %s' % cmd) - run_in_subprocess(cmd) + # run_in_subprocess(cmd) # 管道缓冲区被写满,后面的写入请求都hang住了 + import subprocess + subprocess.call(cmd, shell=True) output_files = io.listdir(work_dir) self.assertIn('epoch_1.pth', output_files) diff --git a/tests/tools/test_eval.py b/tests/test_tools/test_eval.py similarity index 100% rename from tests/tools/test_eval.py rename to tests/test_tools/test_eval.py diff --git a/tests/tools/test_mae_train.py b/tests/test_tools/test_mae_train.py similarity index 100% rename from tests/tools/test_mae_train.py rename to tests/test_tools/test_mae_train.py diff --git a/tests/tools/test_mask2former_train.py b/tests/test_tools/test_mask2former_train.py similarity index 100% rename from tests/tools/test_mask2former_train.py rename to tests/test_tools/test_mask2former_train.py diff --git a/tests/tools/test_pose_train.py b/tests/test_tools/test_pose_train.py similarity index 100% rename from tests/tools/test_pose_train.py rename to tests/test_tools/test_pose_train.py diff --git a/tests/tools/test_predict.py b/tests/test_tools/test_predict.py similarity index 68% rename from tests/tools/test_predict.py rename to tests/test_tools/test_predict.py index 47493001..74c163fd 100644 --- a/tests/tools/test_predict.py +++ b/tests/test_tools/test_predict.py @@ -13,7 +13,7 @@ from tests.ut_config import (PRETRAINED_MODEL_SEGFORMER, PRETRAINED_MODEL_YOLOXS_EXPORT, TEST_IMAGES_DIR) -from easycv.file import io +from easycv.file import get_oss_config, io from easycv.utils.test_util import run_in_subprocess sys.path.append(os.path.dirname(os.path.realpath(__file__))) @@ -75,6 +75,32 @@ def test_predict_dist(self): model_path = PRETRAINED_MODEL_YOLOXS_EXPORT self._base_predict(model_type, model_path, dist=True) + def test_predict_oss_path(self): + model_type = 'YoloXPredictor' + model_path = PRETRAINED_MODEL_YOLOXS_EXPORT + + os.environ['OSS_CONFIG_FILE'] = '~/.ossutilconfig.unittest' + oss_config = get_oss_config() + ak_id = oss_config['ak_id'] + ak_secret = oss_config['ak_secret'] + hosts = oss_config['hosts'] + ['oss-cn-hangzhou.aliyuncs.com'] + hosts = ','.join(_ for _ in hosts) + buckets = oss_config['buckets'] + ['easycv'] + buckets = ','.join(_ for _ in buckets) + + input_file = 'oss://easycv/data/small_test_data/test_images/http_image_list.txt' + output_file = tempfile.NamedTemporaryFile('w').name + cmd = f'PYTHONPATH=. python tools/predict.py \ + --input_file {input_file} \ + --output_file {output_file} \ + --model_type {model_type} \ + --model_path {model_path} \ + --oss_io_config ak_id={ak_id} ak_secret={ak_secret} hosts={hosts} buckets={buckets}' + + logging.info('run command: %s' % cmd) + run_in_subprocess(cmd) + io.remove(output_file) + if __name__ == '__main__': unittest.main() diff --git a/tests/tools/test_prune.py b/tests/test_tools/test_prune.py similarity index 100% rename from tests/tools/test_prune.py rename to tests/test_tools/test_prune.py diff --git a/tests/tools/test_quantize.py b/tests/test_tools/test_quantize.py similarity index 100% rename from tests/tools/test_quantize.py rename to tests/test_tools/test_quantize.py diff --git a/tests/tools/test_yolox_train.py b/tests/test_tools/test_yolox_train.py similarity index 100% rename from tests/tools/test_yolox_train.py rename to tests/test_tools/test_yolox_train.py diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/utils/test_bbox_util.py b/tests/test_utils/test_bbox_util.py similarity index 100% rename from tests/utils/test_bbox_util.py rename to tests/test_utils/test_bbox_util.py diff --git a/tests/utils/test_json_utils.py b/tests/test_utils/test_json_utils.py similarity index 100% rename from tests/utils/test_json_utils.py rename to tests/test_utils/test_json_utils.py diff --git a/tests/utils/test_mmlab_utils.py b/tests/test_utils/test_mmlab_utils.py similarity index 100% rename from tests/utils/test_mmlab_utils.py rename to tests/test_utils/test_mmlab_utils.py diff --git a/tests/utils/test_ms_utils.py b/tests/test_utils/test_ms_utils.py similarity index 100% rename from tests/utils/test_ms_utils.py rename to tests/test_utils/test_ms_utils.py diff --git a/tools/predict.py b/tools/predict.py index e395c85d..8585cb63 100644 --- a/tools/predict.py +++ b/tools/predict.py @@ -10,8 +10,9 @@ import os import threading import traceback - import torch +from mmcv import DictAction +from easycv.file import io try: import easy_predict @@ -115,6 +116,12 @@ def define_args(): type=str, choices=[None, 'pytorch'], help='if assigned pytorch, should be used in gpu environment') + parser.add_argument( + '--oss_io_config', + nargs='+', + action=DictAction, + help='designer needs a oss of config to access the data') + args = parser.parse_args() if 'LOCAL_RANK' not in os.environ: os.environ['LOCAL_RANK'] = str(args.local_rank) @@ -255,7 +262,7 @@ def create_default_predictor_kwargs(model_dir): if config_path: return {'model_path': model_path, 'config_file': config_path} else: - return {'model_path': model_path} + return {'model_path': model_path, 'config_file': None} def create_predictor_kwargs(model_type, model_dir): @@ -288,7 +295,7 @@ def replace_oss_with_local_path(ori_file, dst_file, bucket_prefix, local_prefix): bucket_prefix = bucket_prefix.rstrip('/') + '/' local_prefix = local_prefix.rstrip('/') + '/' - with open(ori_file, 'r') as infile: + with io.open(ori_file, 'r') as infile: with open(dst_file, 'w') as ofile: for l in infile: if l.startswith('oss://'): @@ -301,9 +308,26 @@ def build_and_run_file_io(args): rank, world_size = get_dist_info() worker_id = rank - input_oss_file_new_host = args.input_file + '.tmp%d' % worker_id - replace_oss_with_local_path(args.input_file, input_oss_file_new_host, - args.oss_prefix, args.local_prefix) + # check oss_config and init oss io + if args.oss_io_config is not None: + io.access_oss(**args.oss_io_config) + + # acquire the temporary save path + if args.output_file: + io.makedirs(os.path.dirname(args.output_file)) + input_oss_file_new_host = os.path.join( + os.path.dirname(args.output_file), + os.path.basename(args.input_file + '.tmp%d' % worker_id)) + replace_oss_with_local_path(args.input_file, input_oss_file_new_host, + args.oss_prefix, args.local_prefix) + else: + io.makedirs(args.output_dir) + input_oss_file_new_host = os.path.join( + args.output_dir, + os.path.basename(args.input_file + '.tmp%d' % worker_id)) + replace_oss_with_local_path(args.input_file, input_oss_file_new_host, + args.oss_prefix, args.local_prefix) + args.input_file = input_oss_file_new_host num_worker = world_size print(f'worker num {num_worker}')