From ed53d94336bedbb385445d59bba31121951c5c37 Mon Sep 17 00:00:00 2001
From: gengenkai <30782254+gengenkai@users.noreply.github.com>
Date: Tue, 28 Dec 2021 15:22:59 +0800
Subject: [PATCH] [Docs] Docs revert (#1359)
* master
* master 0721
* add README
* Revert "[Docs] Merge docs & docs_zh (#1342)"
This reverts commit 364b54d024a87ce1aa599aeccdbe83a7e2653fbe.
---
.github/workflows/build.yml | 4 +--
.gitignore | 3 +-
README.md | 30 +++++++++---------
README_zh-CN.md | 28 ++++++++--------
configs/detection/acrn/README.md | 6 ++--
configs/detection/acrn/README_zh-CN.md | 6 ++--
configs/detection/ava/README.md | 6 ++--
configs/detection/ava/README_zh-CN.md | 6 ++--
configs/detection/lfb/README.md | 4 +--
configs/detection/lfb/README_zh-CN.md | 4 +--
configs/localization/bmn/README.md | 6 ++--
configs/localization/bmn/README_zh-CN.md | 6 ++--
configs/localization/bsn/README.md | 6 ++--
configs/localization/bsn/README_zh-CN.md | 6 ++--
configs/localization/ssn/README.md | 6 ++--
configs/localization/ssn/README_zh-CN.md | 6 ++--
configs/recognition/c3d/README.md | 6 ++--
configs/recognition/c3d/README_zh-CN.md | 6 ++--
configs/recognition/csn/README.md | 6 ++--
configs/recognition/csn/README_zh-CN.md | 6 ++--
configs/recognition/i3d/README.md | 6 ++--
configs/recognition/i3d/README_zh-CN.md | 6 ++--
configs/recognition/r2plus1d/README.md | 6 ++--
configs/recognition/r2plus1d/README_zh-CN.md | 6 ++--
configs/recognition/slowfast/README.md | 6 ++--
configs/recognition/slowfast/README_zh-CN.md | 6 ++--
configs/recognition/slowonly/README.md | 6 ++--
configs/recognition/slowonly/README_zh-CN.md | 6 ++--
configs/recognition/tanet/README.md | 6 ++--
configs/recognition/tanet/README_zh-CN.md | 6 ++--
configs/recognition/timesformer/README.md | 6 ++--
.../recognition/timesformer/README_zh-CN.md | 6 ++--
configs/recognition/tin/README.md | 6 ++--
configs/recognition/tin/README_zh-CN.md | 6 ++--
configs/recognition/tpn/README.md | 6 ++--
configs/recognition/tpn/README_zh-CN.md | 4 +--
configs/recognition/trn/README.md | 4 +--
configs/recognition/trn/README_zh-CN.md | 4 +--
configs/recognition/tsm/README.md | 6 ++--
configs/recognition/tsm/README_zh-CN.md | 6 ++--
configs/recognition/tsn/README.md | 4 +--
configs/recognition/tsn/README_zh-CN.md | 4 +--
configs/recognition/x3d/README.md | 4 +--
configs/recognition/x3d/README_zh-CN.md | 4 +--
configs/recognition_audio/resnet/README.md | 6 ++--
.../recognition_audio/resnet/README_zh-CN.md | 6 ++--
configs/skeleton/2s-agcn/README.md | 4 +--
configs/skeleton/2s-agcn/README_zh-CN.md | 4 +--
configs/skeleton/posec3d/README.md | 4 +--
configs/skeleton/posec3d/README_zh-CN.md | 4 +--
configs/skeleton/stgcn/README.md | 4 +--
configs/skeleton/stgcn/README_zh-CN.md | 4 +--
demo/mmaction2_tutorial.ipynb | 2 +-
demo/mmaction2_tutorial_zh-CN.ipynb | 2 +-
docs/{en => }/Makefile | 0
docs/{en => }/_static/css/readthedocs.css | 0
docs/{en => }/_static/images/mmaction2.png | Bin
docs/{en => }/api.rst | 0
docs/{en => }/benchmark.md | 0
docs/{en => }/changelog.md | 0
docs/{en => }/conf.py | 4 +--
docs/{en => }/data_preparation.md | 0
docs/{en => }/faq.md | 6 ++--
docs/{en => }/feature_extraction.md | 0
docs/{en => }/getting_started.md | 0
docs/{en => }/index.rst | 0
docs/{en => }/install.md | 0
docs/{en => }/make.bat | 0
docs/{en => }/merge_docs.sh | 0
docs/{en => }/projects.md | 0
docs/{en => }/stat.py | 0
docs/{en => }/supported_datasets.md | 0
docs/{en => }/switch_language.md | 0
docs/{en => }/tutorials/1_config.md | 0
docs/{en => }/tutorials/2_finetune.md | 2 +-
docs/{en => }/tutorials/3_new_dataset.md | 0
docs/{en => }/tutorials/4_data_pipeline.md | 0
docs/{en => }/tutorials/5_new_modules.md | 0
docs/{en => }/tutorials/6_export_model.md | 0
.../{en => }/tutorials/7_customize_runtime.md | 0
docs/{en => }/useful_tools.md | 0
{docs/zh_cn => docs_zh_CN}/Makefile | 0
{docs/zh_cn => docs_zh_CN}/README.md | 0
{docs/zh_cn => docs_zh_CN}/api.rst | 0
{docs/zh_cn => docs_zh_CN}/benchmark.md | 0
{docs/zh_cn => docs_zh_CN}/conf.py | 4 +--
.../zh_cn => docs_zh_CN}/data_preparation.md | 0
{docs/zh_cn => docs_zh_CN}/demo.md | 0
{docs/zh_cn => docs_zh_CN}/faq.md | 4 +--
.../feature_extraction.md | 0
{docs/zh_cn => docs_zh_CN}/getting_started.md | 0
{docs/zh_cn => docs_zh_CN}/index.rst | 0
{docs/zh_cn => docs_zh_CN}/install.md | 0
{docs/zh_cn => docs_zh_CN}/make.bat | 0
{docs/zh_cn => docs_zh_CN}/merge_docs.sh | 14 ++++----
{docs/zh_cn => docs_zh_CN}/stat.py | 0
.../supported_datasets.md | 0
{docs/zh_cn => docs_zh_CN}/switch_language.md | 0
.../tutorials/1_config.md | 0
.../tutorials/2_finetune.md | 0
.../tutorials/3_new_dataset.md | 0
.../tutorials/4_data_pipeline.md | 0
.../tutorials/5_new_modules.md | 0
.../tutorials/6_export_model.md | 0
.../tutorials/7_customize_runtime.md | 0
{docs/zh_cn => docs_zh_CN}/useful_tools.md | 0
tools/data/activitynet/README.md | 6 ++--
tools/data/activitynet/README_zh-CN.md | 6 ++--
tools/data/ava/README.md | 4 +--
tools/data/ava/README_zh-CN.md | 4 +--
tools/data/diving48/README.md | 4 +--
tools/data/diving48/README_zh-CN.md | 4 +--
tools/data/gym/README.md | 4 +--
tools/data/gym/README_zh-CN.md | 4 +--
tools/data/hmdb51/README.md | 4 +--
tools/data/hmdb51/README_zh-CN.md | 4 +--
tools/data/hvu/README.md | 6 ++--
tools/data/hvu/README_zh-CN.md | 6 ++--
tools/data/jester/README.md | 4 +--
tools/data/jester/README_zh-CN.md | 4 +--
tools/data/kinetics/README.md | 6 ++--
tools/data/kinetics/README_zh-CN.md | 6 ++--
tools/data/mit/README.md | 4 +--
tools/data/mit/README_zh-CN.md | 4 +--
tools/data/mmit/README.md | 4 +--
tools/data/mmit/README_zh-CN.md | 4 +--
tools/data/sthv1/README.md | 4 +--
tools/data/sthv1/README_zh-CN.md | 4 +--
tools/data/sthv2/README.md | 4 +--
tools/data/sthv2/README_zh-CN.md | 4 +--
tools/data/thumos14/README.md | 4 +--
tools/data/thumos14/README_zh-CN.md | 4 +--
tools/data/ucf101/README.md | 4 +--
tools/data/ucf101/README_zh-CN.md | 4 +--
134 files changed, 242 insertions(+), 243 deletions(-)
rename docs/{en => }/Makefile (100%)
rename docs/{en => }/_static/css/readthedocs.css (100%)
rename docs/{en => }/_static/images/mmaction2.png (100%)
rename docs/{en => }/api.rst (100%)
rename docs/{en => }/benchmark.md (100%)
rename docs/{en => }/changelog.md (100%)
rename docs/{en => }/conf.py (97%)
rename docs/{en => }/data_preparation.md (100%)
rename docs/{en => }/faq.md (98%)
rename docs/{en => }/feature_extraction.md (100%)
rename docs/{en => }/getting_started.md (100%)
rename docs/{en => }/index.rst (100%)
rename docs/{en => }/install.md (100%)
rename docs/{en => }/make.bat (100%)
rename docs/{en => }/merge_docs.sh (100%)
rename docs/{en => }/projects.md (100%)
rename docs/{en => }/stat.py (100%)
rename docs/{en => }/supported_datasets.md (100%)
rename docs/{en => }/switch_language.md (100%)
rename docs/{en => }/tutorials/1_config.md (100%)
rename docs/{en => }/tutorials/2_finetune.md (96%)
rename docs/{en => }/tutorials/3_new_dataset.md (100%)
rename docs/{en => }/tutorials/4_data_pipeline.md (100%)
rename docs/{en => }/tutorials/5_new_modules.md (100%)
rename docs/{en => }/tutorials/6_export_model.md (100%)
rename docs/{en => }/tutorials/7_customize_runtime.md (100%)
rename docs/{en => }/useful_tools.md (100%)
rename {docs/zh_cn => docs_zh_CN}/Makefile (100%)
rename {docs/zh_cn => docs_zh_CN}/README.md (100%)
rename {docs/zh_cn => docs_zh_CN}/api.rst (100%)
rename {docs/zh_cn => docs_zh_CN}/benchmark.md (100%)
rename {docs/zh_cn => docs_zh_CN}/conf.py (97%)
rename {docs/zh_cn => docs_zh_CN}/data_preparation.md (100%)
rename {docs/zh_cn => docs_zh_CN}/demo.md (100%)
rename {docs/zh_cn => docs_zh_CN}/faq.md (98%)
rename {docs/zh_cn => docs_zh_CN}/feature_extraction.md (100%)
rename {docs/zh_cn => docs_zh_CN}/getting_started.md (100%)
rename {docs/zh_cn => docs_zh_CN}/index.rst (100%)
rename {docs/zh_cn => docs_zh_CN}/install.md (100%)
rename {docs/zh_cn => docs_zh_CN}/make.bat (100%)
rename {docs/zh_cn => docs_zh_CN}/merge_docs.sh (89%)
rename {docs/zh_cn => docs_zh_CN}/stat.py (100%)
rename {docs/zh_cn => docs_zh_CN}/supported_datasets.md (100%)
rename {docs/zh_cn => docs_zh_CN}/switch_language.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/1_config.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/2_finetune.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/3_new_dataset.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/4_data_pipeline.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/5_new_modules.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/6_export_model.md (100%)
rename {docs/zh_cn => docs_zh_CN}/tutorials/7_customize_runtime.md (100%)
rename {docs/zh_cn => docs_zh_CN}/useful_tools.md (100%)
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 11aa8b38db..e9db239da8 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -12,8 +12,8 @@ on:
- '!demo/**'
- '!docker/**'
- '!tools/**'
- - '!docs/en/**'
- - '!docs/zh_cn/**'
+ - '!docs/**'
+ - '!docs_zh_CN/**'
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
diff --git a/.gitignore b/.gitignore
index 68cb7f5941..587b296482 100644
--- a/.gitignore
+++ b/.gitignore
@@ -65,8 +65,7 @@ instance/
.scrapy
# Sphinx documentation
-docs/en/_build/
-docs/zh_cn/_build/
+docs/_build/
# PyBuilder
target/
diff --git a/README.md b/README.md
index 8991f5b018..eeee2b9bc1 100644
--- a/README.md
+++ b/README.md
@@ -51,24 +51,24 @@ The master branch works with **PyTorch 1.3+**.
- (2021-10-25) We provide a [guide](https://github.com/open-mmlab/mmaction2/blob/master/configs/skeleton/posec3d/custom_dataset_training.md) on how to train PoseC3D with custom datasets, [bit-scientist](https://github.com/bit-scientist) authored this PR!
- (2021-10-16) We support **PoseC3D** on UCF101 and HMDB51, achieves 87.0% and 69.3% Top-1 accuracy with 2D skeletons only. Pre-extracted 2D skeletons are also available.
-**Release**: v0.20.0 was released in 30/10/2021. Please refer to [changelog.md](docs/en/changelog.md) for details and release history.
+**Release**: v0.20.0 was released in 30/10/2021. Please refer to [changelog.md](docs/changelog.md) for details and release history.
## Installation
-Please refer to [install.md](docs/en/install.md) for installation.
+Please refer to [install.md](docs/install.md) for installation.
## Get Started
-Please see [getting_started.md](docs/en/getting_started.md) for the basic usage of MMAction2.
+Please see [getting_started.md](docs/getting_started.md) for the basic usage of MMAction2.
There are also tutorials:
-- [learn about configs](docs/en/tutorials/1_config.md)
-- [finetuning models](docs/en/tutorials/2_finetune.md)
-- [adding new dataset](docs/en/tutorials/3_new_dataset.md)
-- [designing data pipeline](docs/en/tutorials/4_data_pipeline.md)
-- [adding new modules](docs/en/tutorials/5_new_modules.md)
-- [exporting model to onnx](docs/en/tutorials/6_export_model.md)
-- [customizing runtime settings](docs/en/tutorials/7_customize_runtime.md)
+- [learn about configs](docs/tutorials/1_config.md)
+- [finetuning models](docs/tutorials/2_finetune.md)
+- [adding new dataset](docs/tutorials/3_new_dataset.md)
+- [designing data pipeline](docs/tutorials/4_data_pipeline.md)
+- [adding new modules](docs/tutorials/5_new_modules.md)
+- [exporting model to onnx](docs/tutorials/6_export_model.md)
+- [customizing runtime settings](docs/tutorials/7_customize_runtime.md)
A Colab tutorial is also provided. You may preview the notebook [here](demo/mmaction2_tutorial.ipynb) or directly [run](https://colab.research.google.com/github/open-mmlab/mmaction2/blob/master/demo/mmaction2_tutorial.ipynb) on Colab.
@@ -207,16 +207,16 @@ Datasets marked with * are not fully supported yet, but related dataset preparat
## Benchmark
-To demonstrate the efficacy and efficiency of our framework, we compare MMAction2 with some other popular frameworks and official releases in terms of speed. Details can be found in [benchmark](docs/en/benchmark.md).
+To demonstrate the efficacy and efficiency of our framework, we compare MMAction2 with some other popular frameworks and official releases in terms of speed. Details can be found in [benchmark](docs/benchmark.md).
## Data Preparation
-Please refer to [data_preparation.md](docs/en/data_preparation.md) for a general knowledge of data preparation.
-The supported datasets are listed in [supported_datasets.md](docs/en/supported_datasets.md)
+Please refer to [data_preparation.md](docs/data_preparation.md) for a general knowledge of data preparation.
+The supported datasets are listed in [supported_datasets.md](docs/supported_datasets.md)
## FAQ
-Please refer to [FAQ](docs/en/faq.md) for frequently asked questions.
+Please refer to [FAQ](docs/faq.md) for frequently asked questions.
## Projects built on MMAction2
@@ -226,7 +226,7 @@ Currently, there are many research works and projects built on MMAction2 by user
- Evidential Deep Learning for Open Set Action Recognition, ICCV 2021 **Oral**. [[paper]](https://arxiv.org/abs/2107.10161)[[github]](https://github.com/Cogito2012/DEAR)
- Rethinking Self-supervised Correspondence Learning: A Video Frame-level Similarity Perspective, ICCV 2021 **Oral**. [[paper]](https://arxiv.org/abs/2103.17263)[[github]](https://github.com/xvjiarui/VFS)
-etc., check [projects.md](docs/en/projects.md) to see all related projects.
+etc., check [projects.md](docs/projects.md) to see all related projects.
## License
diff --git a/README_zh-CN.md b/README_zh-CN.md
index bf0a554a95..2a7690e897 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -50,23 +50,23 @@ MMAction2 是一款基于 PyTorch 的视频理解开源工具箱,是 [OpenMMLa
- (2021-10-25) 提供使用自定义数据集训练 PoseC3D 的 [教程](https://github.com/open-mmlab/mmaction2/blob/master/configs/skeleton/posec3d/custom_dataset_training.md),此 PR 由用户 [bit-scientist](https://github.com/bit-scientist) 完成!
- (2021-10-16) 在 UCF101, HMDB51 上支持 **PoseC3D**,仅用 2D 关键点就可分别达到 87.0% 和 69.3% 的识别准确率。两数据集的预提取骨架特征可以公开下载。
-v0.20.0 版本已于 2021 年 10 月 30 日发布,可通过查阅 [更新日志](/docs/en/changelog.md) 了解更多细节以及发布历史
+v0.20.0 版本已于 2021 年 10 月 30 日发布,可通过查阅 [更新日志](/docs/changelog.md) 了解更多细节以及发布历史
## 安装
-请参考 [安装指南](/docs/zh_cn/install.md) 进行安装
+请参考 [安装指南](/docs_zh_CN/install.md) 进行安装
## 教程
-请参考 [基础教程](/docs/zh_cn/getting_started.md) 了解 MMAction2 的基本使用。MMAction2也提供了其他更详细的教程:
+请参考 [基础教程](/docs_zh_CN/getting_started.md) 了解 MMAction2 的基本使用。MMAction2也提供了其他更详细的教程:
-- [如何编写配置文件](/docs/zh_cn/tutorials/1_config.md)
-- [如何微调模型](/docs/zh_cn/tutorials/2_finetune.md)
-- [如何增加新数据集](/docs/zh_cn/tutorials/3_new_dataset.md)
-- [如何设计数据处理流程](/docs/zh_cn/tutorials/4_data_pipeline.md)
-- [如何增加新模块](/docs/zh_cn/tutorials/5_new_modules.md)
-- [如何导出模型为 onnx 格式](/docs/zh_cn/tutorials/6_export_model.md)
-- [如何自定义模型运行参数](/docs/zh_cn/tutorials/7_customize_runtime.md)
+- [如何编写配置文件](/docs_zh_CN/tutorials/1_config.md)
+- [如何微调模型](/docs_zh_CN/tutorials/2_finetune.md)
+- [如何增加新数据集](/docs_zh_CN/tutorials/3_new_dataset.md)
+- [如何设计数据处理流程](/docs_zh_CN/tutorials/4_data_pipeline.md)
+- [如何增加新模块](/docs_zh_CN/tutorials/5_new_modules.md)
+- [如何导出模型为 onnx 格式](/docs_zh_CN/tutorials/6_export_model.md)
+- [如何自定义模型运行参数](/docs_zh_CN/tutorials/7_customize_runtime.md)
MMAction2 也提供了相应的中文 Colab 教程,可以点击 [这里](https://colab.research.google.com/github/open-mmlab/mmaction2/blob/master/demo/mmaction2_tutorial_zh-CN.ipynb) 进行体验!
@@ -203,15 +203,15 @@ MMAction2 将跟进学界的最新进展,并支持更多算法和框架。如
## 基准测试
-为了验证 MMAction2 框架的高精度和高效率,开发成员将其与当前其他主流框架进行速度对比。更多详情可见 [基准测试](/docs/zh_cn/benchmark.md)
+为了验证 MMAction2 框架的高精度和高效率,开发成员将其与当前其他主流框架进行速度对比。更多详情可见 [基准测试](/docs_zh_CN/benchmark.md)
## 数据集准备
-请参考 [数据准备](/docs/zh_cn/data_preparation.md) 了解数据集准备概况。所有支持的数据集都列于 [数据集清单](/docs/zh_cn/supported_datasets.md) 中
+请参考 [数据准备](/docs_zh_CN/data_preparation.md) 了解数据集准备概况。所有支持的数据集都列于 [数据集清单](/docs_zh_CN/supported_datasets.md) 中
## 常见问题
-请参考 [FAQ](/docs/zh_cn/faq.md) 了解其他用户的常见问题
+请参考 [FAQ](/docs_zh_CN/faq.md) 了解其他用户的常见问题
## 相关工作
@@ -221,7 +221,7 @@ MMAction2 将跟进学界的最新进展,并支持更多算法和框架。如
- Rethinking Self-supervised Correspondence Learning: A Video Frame-level Similarity Perspective, ICCV 2021 **Oral**. [[论文]](https://arxiv.org/abs/2103.17263)[[代码]](https://github.com/xvjiarui/VFS)
- Video Swin Transformer. [[论文]](https://arxiv.org/abs/2106.13230)[[代码]](https://github.com/SwinTransformer/Video-Swin-Transformer)
-更多详情可见 [相关工作](docs/en/projects.md)
+更多详情可见 [相关工作](docs/projects.md)
## 许可
diff --git a/configs/detection/acrn/README.md b/configs/detection/acrn/README.md
index 75d18765c1..4f34bec2c4 100644
--- a/configs/detection/acrn/README.md
+++ b/configs/detection/acrn/README.md
@@ -59,7 +59,7 @@ Current state-of-the-art approaches for spatio-temporal action localization rely
:::
-For more details on data preparation, you can refer to AVA in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to AVA in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -75,7 +75,7 @@ Example: train ACRN with SlowFast backbone on AVA with periodic validation.
python tools/train.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py --validate
```
-For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -91,4 +91,4 @@ Example: test ACRN with SlowFast backbone on AVA and dump the result to a csv fi
python tools/test.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
```
-For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset) .
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) .
diff --git a/configs/detection/acrn/README_zh-CN.md b/configs/detection/acrn/README_zh-CN.md
index 13d7837d1d..3ec59cc495 100644
--- a/configs/detection/acrn/README_zh-CN.md
+++ b/configs/detection/acrn/README_zh-CN.md
@@ -46,7 +46,7 @@
依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
-对于数据集准备的细节,用户可参考 [数据准备](/docs/zh_cn/data_preparation.md)。
+对于数据集准备的细节,用户可参考 [数据准备](/docs_zh_CN/data_preparation.md)。
## 如何训练
@@ -62,7 +62,7 @@ python tools/train.py ${CONFIG_FILE} [optional arguments]
python tools/train.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py --validate
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -78,4 +78,4 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
python tools/test.py configs/detection/acrn/slowfast_acrn_kinetics_pretrained_r50_8x8x1_cosine_10e_ava22_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/detection/ava/README.md b/configs/detection/ava/README.md
index c46895f007..5fa66a4c18 100644
--- a/configs/detection/ava/README.md
+++ b/configs/detection/ava/README.md
@@ -86,7 +86,7 @@ AVA, with its realistic scene and action complexity, exposes the intrinsic diffi
:::
-For more details on data preparation, you can refer to AVA in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to AVA in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -102,7 +102,7 @@ Example: train SlowOnly model on AVA with periodic validation.
python tools/train.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py --validate
```
-For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting) .
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting) .
### Train Custom Classes From Ava Dataset
@@ -140,4 +140,4 @@ Example: test SlowOnly model on AVA and dump the result to a csv file.
python tools/test.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
```
-For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset) .
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) .
diff --git a/configs/detection/ava/README_zh-CN.md b/configs/detection/ava/README_zh-CN.md
index a682a039ac..6cd82f4a3e 100644
--- a/configs/detection/ava/README_zh-CN.md
+++ b/configs/detection/ava/README_zh-CN.md
@@ -72,7 +72,7 @@
如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
2. **Context** 表示同时使用 RoI 特征与全局特征进行分类,可带来约 1% mAP 的提升。
-对于数据集准备的细节,用户可参考 [数据准备](/docs/zh_cn/data_preparation.md)。
+对于数据集准备的细节,用户可参考 [数据准备](/docs_zh_CN/data_preparation.md)。
## 如何训练
@@ -88,7 +88,7 @@ python tools/train.py ${CONFIG_FILE} [optional arguments]
python tools/train.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py --validate
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
### 训练 AVA 数据集中的自定义类别
@@ -126,4 +126,4 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
python tools/test.py configs/detection/ava/slowonly_kinetics_pretrained_r50_8x8x1_20e_ava_rgb.py checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/detection/lfb/README.md b/configs/detection/lfb/README.md
index ea88419dd8..2bd9a2a233 100644
--- a/configs/detection/lfb/README.md
+++ b/configs/detection/lfb/README.md
@@ -98,7 +98,7 @@ python tools/train.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_
--validate --seed 0 --deterministic
```
-For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -123,4 +123,4 @@ python tools/test.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r
checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/detection/lfb/README_zh-CN.md b/configs/detection/lfb/README_zh-CN.md
index 3cb8bf5186..4c90a66bd5 100644
--- a/configs/detection/lfb/README_zh-CN.md
+++ b/configs/detection/lfb/README_zh-CN.md
@@ -75,7 +75,7 @@ python tools/train.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 测试
@@ -100,4 +100,4 @@ python tools/test.py configs/detection/lfb/lfb_nl_kinetics_pretrained_slowonly_r
checkpoints/SOME_CHECKPOINT.pth --eval mAP --out results.csv
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/localization/bmn/README.md b/configs/localization/bmn/README.md
index 8c4cfcf8eb..43147c2109 100644
--- a/configs/localization/bmn/README.md
+++ b/configs/localization/bmn/README.md
@@ -60,7 +60,7 @@ Temporal action proposal generation is an challenging and promising task which a
*We train BMN with the [official repo](https://github.com/JJBOY/BMN-Boundary-Matching-Network), evaluate its proposal generation and action detection performance with [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) for label assigning.
-For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -76,7 +76,7 @@ Example: train BMN model on ActivityNet features dataset.
python tools/train.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
```
-For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting) .
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting) .
## Test
@@ -109,4 +109,4 @@ python tools/analysis/report_map.py --proposal path/to/proposal_file
:::
-For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset) .
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset) .
diff --git a/configs/localization/bmn/README_zh-CN.md b/configs/localization/bmn/README_zh-CN.md
index 5e15f6b06e..3778f390fa 100644
--- a/configs/localization/bmn/README_zh-CN.md
+++ b/configs/localization/bmn/README_zh-CN.md
@@ -48,7 +48,7 @@
*MMAction2 在 [原始代码库](https://github.com/JJBOY/BMN-Boundary-Matching-Network) 上训练 BMN,并且在 [anet_cuhk_2017](https://download.openmmlab.com/mmaction/localization/cuhk_anet17_pred.json) 的对应标签上评估时序动作候选生成和时序检测的结果。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 ActivityNet 特征部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 ActivityNet 特征部分。
## 如何训练
@@ -64,7 +64,7 @@ python tools/train.py ${CONFIG_FILE} [optional arguments]
python tools/train.py configs/localization/bmn/bmn_400x100_2x8_9e_activitynet_feature.py
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -95,4 +95,4 @@ python tools/analysis/report_map.py --proposal path/to/proposal_file
python tools/data/activitynet/convert_proposal_format.py
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/localization/bsn/README.md b/configs/localization/bsn/README.md
index b87eebcb9f..d15b6361c7 100644
--- a/configs/localization/bsn/README.md
+++ b/configs/localization/bsn/README.md
@@ -44,7 +44,7 @@ Temporal action proposal generation is an important yet challenging problem, sin
:::
-For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to ActivityNet feature in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -68,7 +68,7 @@ Examples:
python tools/train.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
```
-For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Inference
@@ -167,4 +167,4 @@ Examples:
:::
-For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/localization/bsn/README_zh-CN.md b/configs/localization/bsn/README_zh-CN.md
index 7271bc1cf4..6d0ddfc2df 100644
--- a/configs/localization/bsn/README_zh-CN.md
+++ b/configs/localization/bsn/README_zh-CN.md
@@ -32,7 +32,7 @@
2. 对于 **特征** 这一列,`cuhk_mean_100` 表示所使用的特征为利用 [anet2016-cuhk](https://github.com/yjxiong/anet2016-cuhk) 代码库抽取的,被广泛利用的 CUHK ActivityNet 特征,
`mmaction_video` 和 `mmaction_clip` 分布表示所使用的特征为利用 MMAction 抽取的,视频级别 ActivityNet 预训练模型的特征;视频片段级别 ActivityNet 预训练模型的特征。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 ActivityNet 特征部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 ActivityNet 特征部分。
## 如何训练
@@ -56,7 +56,7 @@ python tools/train.py ${CONFIG_FILE} [optional arguments]
python tools/train.py configs/localization/bsn/bsn_pem_400x100_1x16_20e_activitynet_feature.py
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何进行推理
@@ -153,4 +153,4 @@ python tools/train.py ${CONFIG_FILE} [optional arguments]
python tools/data/activitynet/convert_proposal_format.py
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/localization/ssn/README.md b/configs/localization/ssn/README.md
index b5c2a68257..c5e5dc09fa 100644
--- a/configs/localization/ssn/README.md
+++ b/configs/localization/ssn/README.md
@@ -37,7 +37,7 @@ year = {2017}
According to the [Linear Scaling Rule](https://arxiv.org/abs/1706.02677), you may set the learning rate proportional to the batch size if you use different GPUs or videos per GPU,
e.g., lr=0.01 for 4 GPUs x 2 video/gpu and lr=0.08 for 16 GPUs x 4 video/gpu.
2. Since SSN utilizes different structured temporal pyramid pooling methods at training and testing, please refer to [ssn_r50_450e_thumos14_rgb_train](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) at training and [ssn_r50_450e_thumos14_rgb_test](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py) at testing.
-3. We evaluate the action detection performance of SSN, using action proposals of TAG. For more details on data preparation, you can refer to thumos14 TAG proposals in [Data Preparation](/docs/en/data_preparation.md).
+3. We evaluate the action detection performance of SSN, using action proposals of TAG. For more details on data preparation, you can refer to thumos14 TAG proposals in [Data Preparation](/docs/data_preparation.md).
4. The reference SSN in is evaluated with `ResNet50` backbone in MMAction, which is the same backbone with ours. Note that the original setting of MMAction SSN uses the `BNInception` backbone.
:::
@@ -56,7 +56,7 @@ Example: train SSN model on thumos14 dataset.
python tools/train.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
```
-For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details and optional arguments infos, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -73,4 +73,4 @@ Example: test BMN on ActivityNet feature dataset.
python tools/test.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py checkpoints/SOME_CHECKPOINT.pth --eval mAP
```
-For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details and optional arguments infos, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/localization/ssn/README_zh-CN.md b/configs/localization/ssn/README_zh-CN.md
index 3b85c61ad1..d1ec5bbcee 100644
--- a/configs/localization/ssn/README_zh-CN.md
+++ b/configs/localization/ssn/README_zh-CN.md
@@ -26,7 +26,7 @@ year = {2017}
依据 [线性缩放规则](https://arxiv.org/abs/1706.02677),当用户使用不同数量的 GPU 或者每块 GPU 处理不同视频个数时,需要根据批大小等比例地调节学习率。
如,lr=0.01 对应 4 GPUs x 2 video/gpu,以及 lr=0.08 对应 16 GPUs x 4 video/gpu。
2. 由于 SSN 在训练和测试阶段使用不同的结构化时序金字塔池化方法(structured temporal pyramid pooling methods),请分别参考 [ssn_r50_450e_thumos14_rgb_train](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py) 和 [ssn_r50_450e_thumos14_rgb_test](/configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py)。
-3. MMAction2 使用 TAG 的时序动作候选进行 SSN 模型的精度验证。关于数据准备的更多细节,用户可参考 [Data 数据集准备文档](/docs/zh_cn/data_preparation.md) 准备 thumos14 的 TAG 时序动作候选。
+3. MMAction2 使用 TAG 的时序动作候选进行 SSN 模型的精度验证。关于数据准备的更多细节,用户可参考 [Data 数据集准备文档](/docs_zh_CN/data_preparation.md) 准备 thumos14 的 TAG 时序动作候选。
4. 参考代码的 SSN 模型是和 MMAction2 一样在 `ResNet50` 主干网络上验证的。注意,这里的 SSN 的初始设置与原代码库的 `BNInception` 骨干网络的设置相同。
## 如何训练
@@ -43,7 +43,7 @@ python tools/train.py ${CONFIG_FILE} [optional arguments]
python tools/train.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_train.py
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -60,4 +60,4 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [optional arguments]
python tools/test.py configs/localization/ssn/ssn_r50_450e_thumos14_rgb_test.py checkpoints/SOME_CHECKPOINT.pth --eval mAP
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/c3d/README.md b/configs/recognition/c3d/README.md
index 0ea46809cb..067097fdbe 100644
--- a/configs/recognition/c3d/README.md
+++ b/configs/recognition/c3d/README.md
@@ -45,7 +45,7 @@ eid = {arXiv:1412.0767}
:::
-For more details on data preparation, you can refer to UCF-101 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to UCF-101 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -62,7 +62,7 @@ python tools/train.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -79,4 +79,4 @@ python tools/test.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.
checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/c3d/README_zh-CN.md b/configs/recognition/c3d/README_zh-CN.md
index 6f1965bee0..c4f02c16f2 100644
--- a/configs/recognition/c3d/README_zh-CN.md
+++ b/configs/recognition/c3d/README_zh-CN.md
@@ -32,7 +32,7 @@ eid = {arXiv:1412.0767}
3. 这里的 **推理时间** 是根据 [基准测试脚本](/tools/analysis/benchmark.py) 获得的,采用测试时的采帧策略,且只考虑模型的推理时间,
并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 UCF-101 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 UCF-101 部分。
## 如何训练
@@ -49,7 +49,7 @@ python tools/train.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -66,4 +66,4 @@ python tools/test.py configs/recognition/c3d/c3d_sports1m_16x1x1_45e_ucf101_rgb.
checkpoints/SOME_CHECKPOINT.pth --eval top_k_accuracy
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/csn/README.md b/configs/recognition/csn/README.md
index 5e3d4e4f57..3a48f6bbda 100644
--- a/configs/recognition/csn/README.md
+++ b/configs/recognition/csn/README.md
@@ -66,7 +66,7 @@ doi = {10.1109/ICCV.2019.00565}
:::
-For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -84,7 +84,7 @@ python tools/train.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -102,4 +102,4 @@ python tools/test.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_
--out result.json --average-clips prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/csn/README_zh-CN.md b/configs/recognition/csn/README_zh-CN.md
index 4ad92b64fd..06a28cd5c5 100644
--- a/configs/recognition/csn/README_zh-CN.md
+++ b/configs/recognition/csn/README_zh-CN.md
@@ -53,7 +53,7 @@ doi = {10.1109/ICCV.2019.00565}
3. 这里使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
4. 这里的 **infer_ckpt** 表示该模型权重文件是从 [VMZ](https://github.com/facebookresearch/VMZ) 导入的。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -71,7 +71,7 @@ python tools/train.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -89,4 +89,4 @@ python tools/test.py configs/recognition/csn/ircsn_ig65m_pretrained_r152_32x2x1_
--out result.json --average-clips prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/i3d/README.md b/configs/recognition/i3d/README.md
index bf67c6f189..5a2bfd7a33 100644
--- a/configs/recognition/i3d/README.md
+++ b/configs/recognition/i3d/README.md
@@ -64,7 +64,7 @@ The paucity of videos in current action classification datasets (UCF-101 and HMD
:::
-For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -82,7 +82,7 @@ python tools/train.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rg
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -100,4 +100,4 @@ python tools/test.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb
--out result.json --average-clips prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/i3d/README_zh-CN.md b/configs/recognition/i3d/README_zh-CN.md
index 6e778cd7c9..ac10732615 100644
--- a/configs/recognition/i3d/README_zh-CN.md
+++ b/configs/recognition/i3d/README_zh-CN.md
@@ -52,7 +52,7 @@
并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
3. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -70,7 +70,7 @@ python tools/train.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rg
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -88,4 +88,4 @@ python tools/test.py configs/recognition/i3d/i3d_r50_32x2x1_100e_kinetics400_rgb
--out result.json --average-clips prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/r2plus1d/README.md b/configs/recognition/r2plus1d/README.md
index 9671e88cb4..f9cd05cca1 100644
--- a/configs/recognition/r2plus1d/README.md
+++ b/configs/recognition/r2plus1d/README.md
@@ -46,7 +46,7 @@ In this paper we discuss several forms of spatiotemporal convolutions for video
:::
-For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -64,7 +64,7 @@ python tools/train.py configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinet
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -82,4 +82,4 @@ python tools/test.py configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kineti
--out result.json --average-clips=prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/r2plus1d/README_zh-CN.md b/configs/recognition/r2plus1d/README_zh-CN.md
index 5df080c927..d720508dc0 100644
--- a/configs/recognition/r2plus1d/README_zh-CN.md
+++ b/configs/recognition/r2plus1d/README_zh-CN.md
@@ -34,7 +34,7 @@
并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
3. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -52,7 +52,7 @@ python tools/train.py configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kinet
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -70,4 +70,4 @@ python tools/test.py configs/recognition/r2plus1d/r2plus1d_r34_8x8x1_180e_kineti
--out result.json --average-clips=prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/slowfast/README.md b/configs/recognition/slowfast/README.md
index 61f461c98e..4bbdbd4f0c 100644
--- a/configs/recognition/slowfast/README.md
+++ b/configs/recognition/slowfast/README.md
@@ -56,7 +56,7 @@ We present SlowFast networks for video recognition. Our model involves (i) a Slo
:::
-For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -74,7 +74,7 @@ python tools/train.py configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kine
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -92,4 +92,4 @@ python tools/test.py configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinet
--out result.json --average-clips=prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/slowfast/README_zh-CN.md b/configs/recognition/slowfast/README_zh-CN.md
index 95e9383140..7605871d2d 100644
--- a/configs/recognition/slowfast/README_zh-CN.md
+++ b/configs/recognition/slowfast/README_zh-CN.md
@@ -44,7 +44,7 @@
并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
3. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -62,7 +62,7 @@ python tools/train.py configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kine
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -80,4 +80,4 @@ python tools/test.py configs/recognition/slowfast/slowfast_r50_4x16x1_256e_kinet
--out result.json --average-clips=prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/slowonly/README.md b/configs/recognition/slowonly/README.md
index 622c3fde7a..d5846782ae 100644
--- a/configs/recognition/slowonly/README.md
+++ b/configs/recognition/slowonly/README.md
@@ -118,7 +118,7 @@ In data benchmark, we compare two different data preprocessing methods: (1) Resi
:::
-For more details on data preparation, you can refer to corresponding parts in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to corresponding parts in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -136,7 +136,7 @@ python tools/train.py configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kine
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -154,4 +154,4 @@ python tools/test.py configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinet
--out result.json --average-clips=prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/slowonly/README_zh-CN.md b/configs/recognition/slowonly/README_zh-CN.md
index 29109d2c5d..917be85500 100644
--- a/configs/recognition/slowonly/README_zh-CN.md
+++ b/configs/recognition/slowonly/README_zh-CN.md
@@ -106,7 +106,7 @@
并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
3. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -124,7 +124,7 @@ python tools/train.py configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kine
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -142,4 +142,4 @@ python tools/test.py configs/recognition/slowonly/slowonly_r50_4x16x1_256e_kinet
--out result.json --average-clips=prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/tanet/README.md b/configs/recognition/tanet/README.md
index 10f76d2aa9..37760e5042 100644
--- a/configs/recognition/tanet/README.md
+++ b/configs/recognition/tanet/README.md
@@ -50,7 +50,7 @@ Video data is with complex temporal dynamics due to various factors such as came
:::
-For more details on data preparation, you can refer to corresponding parts in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to corresponding parts in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -68,7 +68,7 @@ python tools/train.py configs/recognition/tanet/tanet_r50_dense_1x1x8_100e_kinet
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -86,4 +86,4 @@ python tools/test.py configs/recognition/tanet/tanet_r50_dense_1x1x8_100e_kineti
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/tanet/README_zh-CN.md b/configs/recognition/tanet/README_zh-CN.md
index 02e42201c3..4902cf8430 100644
--- a/configs/recognition/tanet/README_zh-CN.md
+++ b/configs/recognition/tanet/README_zh-CN.md
@@ -38,7 +38,7 @@
3. 参考代码的结果是通过使用相同的模型配置在原来的代码库上训练得到的。对应的模型权重文件可从 [这里](https://drive.google.com/drive/folders/1sFfmP3yrfc7IzRshEELOby7-aEoymIFL?usp=sharing) 下载。
4. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -56,7 +56,7 @@ python tools/train.py configs/recognition/tanet/tanet_r50_dense_1x1x8_100e_kinet
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -74,4 +74,4 @@ python tools/test.py configs/recognition/tanet/tanet_r50_dense_1x1x8_100e_kineti
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/timesformer/README.md b/configs/recognition/timesformer/README.md
index 2ee361d949..54b4f25443 100644
--- a/configs/recognition/timesformer/README.md
+++ b/configs/recognition/timesformer/README.md
@@ -46,7 +46,7 @@ We present a convolution-free approach to video classification built exclusively
:::
-For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -64,7 +64,7 @@ python tools/train.py configs/recognition/timesformer/timesformer_divST_8x32x1_1
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -82,4 +82,4 @@ python tools/test.py configs/recognition/timesformer/timesformer_divST_8x32x1_15
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/timesformer/README_zh-CN.md b/configs/recognition/timesformer/README_zh-CN.md
index d84d2fe2e0..c844917e01 100644
--- a/configs/recognition/timesformer/README_zh-CN.md
+++ b/configs/recognition/timesformer/README_zh-CN.md
@@ -33,7 +33,7 @@
2. MMAction2 保持与 [原代码](https://github.com/facebookresearch/TimeSformer) 的测试设置一致(three crop x 1 clip)。
3. TimeSformer 使用的预训练模型 `vit_base_patch16_224.pth` 转换自 [vision_transformer](https://github.com/google-research/vision_transformer)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分。
## 如何训练
@@ -51,7 +51,7 @@ python tools/train.py configs/recognition/timesformer/timesformer_divST_8x32x1_1
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -69,4 +69,4 @@ python tools/test.py configs/recognition/timesformer/timesformer_divST_8x32x1_15
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/tin/README.md b/configs/recognition/tin/README.md
index eb7fc375d7..cf57eed749 100644
--- a/configs/recognition/tin/README.md
+++ b/configs/recognition/tin/README.md
@@ -60,7 +60,7 @@ Here, we use `finetune` to indicate that we use [TSM model](https://download.ope
:::
-For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -78,7 +78,7 @@ python tools/train.py configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py \
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -96,4 +96,4 @@ python tools/test.py configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py \
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/tin/README_zh-CN.md b/configs/recognition/tin/README_zh-CN.md
index 431769addb..2747fa6c94 100644
--- a/configs/recognition/tin/README_zh-CN.md
+++ b/configs/recognition/tin/README_zh-CN.md
@@ -46,7 +46,7 @@
4. 参考代码的结果是通过使用相同的模型配置在原来的代码库上训练得到的。
5. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400, Something-Something V1 and Something-Something V2 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400, Something-Something V1 and Something-Something V2 部分。
## 如何训练
@@ -64,7 +64,7 @@ python tools/train.py configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py \
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -82,4 +82,4 @@ python tools/test.py configs/recognition/tin/tin_r50_1x1x8_40e_sthv1_rgb.py \
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/tpn/README.md b/configs/recognition/tpn/README.md
index 304bc5ecad..7ce9ce6f63 100644
--- a/configs/recognition/tpn/README.md
+++ b/configs/recognition/tpn/README.md
@@ -51,7 +51,7 @@ Visual tempo characterizes the dynamics and the temporal scale of an action. Mod
:::
-For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400, Something-Something V1 and Something-Something V2 in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -68,7 +68,7 @@ python tools/train.py configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kineti
--work-dir work_dirs/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb [--validate --seed 0 --deterministic]
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -86,4 +86,4 @@ python tools/test.py configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetic
--out result.json --average-clips prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/tpn/README_zh-CN.md b/configs/recognition/tpn/README_zh-CN.md
index e525c2140d..ec66656d1d 100644
--- a/configs/recognition/tpn/README_zh-CN.md
+++ b/configs/recognition/tpn/README_zh-CN.md
@@ -53,7 +53,7 @@ python tools/train.py configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kineti
--work-dir work_dirs/tpn_slowonly_r50_8x8x1_150e_kinetics_rgb [--validate --seed 0 --deterministic]
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -71,4 +71,4 @@ python tools/test.py configs/recognition/tpn/tpn_slowonly_r50_8x8x1_150e_kinetic
--out result.json --average-clips prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/trn/README.md b/configs/recognition/trn/README.md
index 75ad603e63..ff2f4d8785 100644
--- a/configs/recognition/trn/README.md
+++ b/configs/recognition/trn/README.md
@@ -70,7 +70,7 @@ python tools/train.py configs/recognition/trn/trn_r50_1x1x8_50e_sthv1_rgb.py \
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -88,4 +88,4 @@ python tools/test.py configs/recognition/trn/trn_r50_1x1x8_50e_sthv1_rgb.py \
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/trn/README_zh-CN.md b/configs/recognition/trn/README_zh-CN.md
index beb575159f..d0e85f015c 100644
--- a/configs/recognition/trn/README_zh-CN.md
+++ b/configs/recognition/trn/README_zh-CN.md
@@ -57,7 +57,7 @@ python tools/train.py configs/recognition/trn/trn_r50_1x1x8_50e_sthv1_rgb.py \
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -75,4 +75,4 @@ python tools/test.py configs/recognition/trn/trn_r50_1x1x8_50e_sthv1_rgb.py \
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/tsm/README.md b/configs/recognition/tsm/README.md
index 994723a308..c3528ee5be 100644
--- a/configs/recognition/tsm/README.md
+++ b/configs/recognition/tsm/README.md
@@ -151,7 +151,7 @@ test_pipeline = [
:::
-For more details on data preparation, you can refer to corresponding parts in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to corresponding parts in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -169,7 +169,7 @@ python tools/train.py configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -187,4 +187,4 @@ python tools/test.py configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.p
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/tsm/README_zh-CN.md b/configs/recognition/tsm/README_zh-CN.md
index ccf584d57a..f95876fd9e 100644
--- a/configs/recognition/tsm/README_zh-CN.md
+++ b/configs/recognition/tsm/README_zh-CN.md
@@ -145,7 +145,7 @@ test_pipeline = [
6. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
7. 这里的 **infer_ckpt** 表示该模型权重文件是从 [TSM](https://github.com/mit-han-lab/temporal-shift-module/blob/master/test_models.py) 导入的。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400, Something-Something V1 and Something-Something V2 部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400, Something-Something V1 and Something-Something V2 部分。
## 如何训练
@@ -163,7 +163,7 @@ python tools/train.py configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -181,4 +181,4 @@ python tools/test.py configs/recognition/tsm/tsm_r50_1x1x8_50e_kinetics400_rgb.p
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/tsn/README.md b/configs/recognition/tsn/README.md
index 276234f280..f3f5811ef5 100644
--- a/configs/recognition/tsn/README.md
+++ b/configs/recognition/tsn/README.md
@@ -224,7 +224,7 @@ python tools/train.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -242,4 +242,4 @@ python tools/test.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/tsn/README_zh-CN.md b/configs/recognition/tsn/README_zh-CN.md
index 5cee3ea365..69e95459a5 100644
--- a/configs/recognition/tsn/README_zh-CN.md
+++ b/configs/recognition/tsn/README_zh-CN.md
@@ -213,7 +213,7 @@ python tools/train.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -231,4 +231,4 @@ python tools/test.py configs/recognition/tsn/tsn_r50_1x1x3_100e_kinetics400_rgb.
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition/x3d/README.md b/configs/recognition/x3d/README.md
index cea5789cfc..a7a3c7e715 100644
--- a/configs/recognition/x3d/README.md
+++ b/configs/recognition/x3d/README.md
@@ -44,7 +44,7 @@ This paper presents X3D, a family of efficient video networks that progressively
:::
-For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to Kinetics400 in [Data Preparation](/docs/data_preparation.md).
## Test
@@ -62,4 +62,4 @@ python tools/test.py configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_r
--out result.json --average-clips prob
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/recognition/x3d/README_zh-CN.md b/configs/recognition/x3d/README_zh-CN.md
index 947b5bf093..3b09e5276b 100644
--- a/configs/recognition/x3d/README_zh-CN.md
+++ b/configs/recognition/x3d/README_zh-CN.md
@@ -31,7 +31,7 @@
1. 参考代码的结果是通过使用相同的数据和原来的代码库所提供的模型进行测试得到的。
2. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的 Kinetics400 部分
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的 Kinetics400 部分
## 如何测试
@@ -49,4 +49,4 @@ python tools/test.py configs/recognition/x3d/x3d_s_13x6x1_facebook_kinetics400_r
--out result.json --average-clips prob
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/recognition_audio/resnet/README.md b/configs/recognition_audio/resnet/README.md
index 1a5b718490..4c9ee539c8 100644
--- a/configs/recognition_audio/resnet/README.md
+++ b/configs/recognition_audio/resnet/README.md
@@ -44,7 +44,7 @@ tecture for integrated audiovisual perception. AVSlowFast has Slow and Fast visu
:::
-For more details on data preparation, you can refer to ``Prepare audio`` in [Data Preparation](/docs/en/data_preparation.md).
+For more details on data preparation, you can refer to ``Prepare audio`` in [Data Preparation](/docs/data_preparation.md).
## Train
@@ -62,7 +62,7 @@ python tools/train.py configs/audio_recognition/tsn_r50_64x1x1_100e_kinetics400_
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -80,7 +80,7 @@ python tools/test.py configs/audio_recognition/tsn_r50_64x1x1_100e_kinetics400_a
--out result.json
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
## Fusion
diff --git a/configs/recognition_audio/resnet/README_zh-CN.md b/configs/recognition_audio/resnet/README_zh-CN.md
index c3a38dd0e1..bf1188ff46 100644
--- a/configs/recognition_audio/resnet/README_zh-CN.md
+++ b/configs/recognition_audio/resnet/README_zh-CN.md
@@ -31,7 +31,7 @@
并不包括 IO 时间以及预处理时间。对于每个配置,MMAction2 使用 1 块 GPU 并设置批大小(每块 GPU 处理的视频个数)为 1 来计算推理时间。
3. 我们使用的 Kinetics400 验证集包含 19796 个视频,用户可以从 [验证集视频](https://mycuhk-my.sharepoint.com/:u:/g/personal/1155136485_link_cuhk_edu_hk/EbXw2WX94J1Hunyt3MWNDJUBz-nHvQYhO9pvKqm6g39PMA?e=a9QldB) 下载这些视频。同时也提供了对应的 [数据列表](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_val_list.txt) (每行格式为:视频 ID,视频帧数目,类别序号)以及 [标签映射](https://download.openmmlab.com/mmaction/dataset/k400_val/kinetics_class2ind.txt) (类别序号到类别名称)。
-对于数据集准备的细节,用户可参考 [数据集准备文档](/docs/zh_cn/data_preparation.md) 中的准备音频部分。
+对于数据集准备的细节,用户可参考 [数据集准备文档](/docs_zh_CN/data_preparation.md) 中的准备音频部分。
## 如何训练
@@ -49,7 +49,7 @@ python tools/train.py configs/audio_recognition/tsn_r50_64x1x1_100e_kinetics400_
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -67,7 +67,7 @@ python tools/test.py configs/audio_recognition/tsn_r50_64x1x1_100e_kinetics400_a
--out result.json
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
## 融合
diff --git a/configs/skeleton/2s-agcn/README.md b/configs/skeleton/2s-agcn/README.md
index 5013f4e7c4..d6049c735c 100644
--- a/configs/skeleton/2s-agcn/README.md
+++ b/configs/skeleton/2s-agcn/README.md
@@ -58,7 +58,7 @@ python tools/train.py configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -84,4 +84,4 @@ python tools/test.py configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py \
--out bone_result.pkl
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/skeleton/2s-agcn/README_zh-CN.md b/configs/skeleton/2s-agcn/README_zh-CN.md
index 5e5f0f4092..ae7cc00a20 100644
--- a/configs/skeleton/2s-agcn/README_zh-CN.md
+++ b/configs/skeleton/2s-agcn/README_zh-CN.md
@@ -47,7 +47,7 @@ python tools/train.py configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -73,4 +73,4 @@ python tools/test.py configs/skeleton/2s-agcn/2sagcn_80e_ntu60_xsub_bone_3d.py \
--out bone_result.pkl
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/skeleton/posec3d/README.md b/configs/skeleton/posec3d/README.md
index 7cc5c22f16..3b8b686db6 100644
--- a/configs/skeleton/posec3d/README.md
+++ b/configs/skeleton/posec3d/README.md
@@ -125,7 +125,7 @@ python tools/train.py configs/skeleton/posec3d/slowonly_r50_u48_240e_gym_keypoin
For training with your custom dataset, you can refer to [Custom Dataset Training](https://github.com/open-mmlab/mmaction2/blob/master/configs/skeleton/posec3d/custom_dataset_training.md).
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -143,4 +143,4 @@ python tools/test.py configs/skeleton/posec3d/slowonly_r50_u48_240e_gym_keypoint
--out result.pkl
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/skeleton/posec3d/README_zh-CN.md b/configs/skeleton/posec3d/README_zh-CN.md
index 734adb9213..4c4cdf8d46 100644
--- a/configs/skeleton/posec3d/README_zh-CN.md
+++ b/configs/skeleton/posec3d/README_zh-CN.md
@@ -112,7 +112,7 @@ python tools/train.py configs/skeleton/posec3d/slowonly_r50_u48_240e_gym_keypoin
有关自定义数据集上的训练,可以参考 [Custom Dataset Training](https://github.com/open-mmlab/mmaction2/blob/master/configs/skeleton/posec3d/custom_dataset_training.md)。
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -130,4 +130,4 @@ python tools/test.py configs/skeleton/posec3d/slowonly_r50_u48_240e_gym_keypoint
--out result.pkl
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/configs/skeleton/stgcn/README.md b/configs/skeleton/stgcn/README.md
index 6ca35a4e20..98b95a5cf9 100644
--- a/configs/skeleton/stgcn/README.md
+++ b/configs/skeleton/stgcn/README.md
@@ -60,7 +60,7 @@ python tools/train.py configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint.py \
--validate --seed 0 --deterministic
```
-For more details, you can refer to **Training setting** part in [getting_started](/docs/en/getting_started.md#training-setting).
+For more details, you can refer to **Training setting** part in [getting_started](/docs/getting_started.md#training-setting).
## Test
@@ -78,4 +78,4 @@ python tools/test.py configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint.py \
--out result.pkl
```
-For more details, you can refer to **Test a dataset** part in [getting_started](/docs/en/getting_started.md#test-a-dataset).
+For more details, you can refer to **Test a dataset** part in [getting_started](/docs/getting_started.md#test-a-dataset).
diff --git a/configs/skeleton/stgcn/README_zh-CN.md b/configs/skeleton/stgcn/README_zh-CN.md
index 48fc4f6d90..c7e57077cd 100644
--- a/configs/skeleton/stgcn/README_zh-CN.md
+++ b/configs/skeleton/stgcn/README_zh-CN.md
@@ -49,7 +49,7 @@ python tools/train.py configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint.py \
--validate --seed 0 --deterministic
```
-更多训练细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#训练配置) 中的 **训练配置** 部分。
+更多训练细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#训练配置) 中的 **训练配置** 部分。
## 如何测试
@@ -67,4 +67,4 @@ python tools/test.py configs/skeleton/stgcn/stgcn_80e_ntu60_xsub_keypoint.py \
--out result.pkl
```
-更多测试细节,可参考 [基础教程](/docs/zh_cn/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
+更多测试细节,可参考 [基础教程](/docs_zh_CN/getting_started.md#测试某个数据集) 中的 **测试某个数据集** 部分。
diff --git a/demo/mmaction2_tutorial.ipynb b/demo/mmaction2_tutorial.ipynb
index 72c0639b27..14441ab79b 100644
--- a/demo/mmaction2_tutorial.ipynb
+++ b/demo/mmaction2_tutorial.ipynb
@@ -430,7 +430,7 @@
"source": [
"### Support a new dataset\n",
"\n",
- "In this tutorial, we gives an example to convert the data into the format of existing datasets. Other methods and more advanced usages can be found in the [doc](/docs/en/tutorials/new_dataset.md)\n",
+ "In this tutorial, we gives an example to convert the data into the format of existing datasets. Other methods and more advanced usages can be found in the [doc](/docs/tutorials/new_dataset.md)\n",
"\n",
"Firstly, let's download a tiny dataset obtained from [Kinetics-400](https://deepmind.com/research/open-source/open-source-datasets/kinetics/). We select 30 videos with their labels as train dataset and 10 videos with their labels as test dataset."
]
diff --git a/demo/mmaction2_tutorial_zh-CN.ipynb b/demo/mmaction2_tutorial_zh-CN.ipynb
index c5b545893b..28940ce931 100644
--- a/demo/mmaction2_tutorial_zh-CN.ipynb
+++ b/demo/mmaction2_tutorial_zh-CN.ipynb
@@ -405,7 +405,7 @@
"source": [
"### 支持新数据集\n",
"\n",
- "这里我们给出将数据转换为已有数据集格式的示例。其他方法可以参考[doc](/docs/en/tutorials/new_dataset.md)\n",
+ "这里我们给出将数据转换为已有数据集格式的示例。其他方法可以参考[doc](/docs/tutorials/new_dataset.md)\n",
"\n",
"用到的是一个从[Kinetics-400](https://deepmind.com/research/open-source/open-source-datasets/kinetics/)中获取的tiny数据集。包含30个训练视频,10个测试视频。"
]
diff --git a/docs/en/Makefile b/docs/Makefile
similarity index 100%
rename from docs/en/Makefile
rename to docs/Makefile
diff --git a/docs/en/_static/css/readthedocs.css b/docs/_static/css/readthedocs.css
similarity index 100%
rename from docs/en/_static/css/readthedocs.css
rename to docs/_static/css/readthedocs.css
diff --git a/docs/en/_static/images/mmaction2.png b/docs/_static/images/mmaction2.png
similarity index 100%
rename from docs/en/_static/images/mmaction2.png
rename to docs/_static/images/mmaction2.png
diff --git a/docs/en/api.rst b/docs/api.rst
similarity index 100%
rename from docs/en/api.rst
rename to docs/api.rst
diff --git a/docs/en/benchmark.md b/docs/benchmark.md
similarity index 100%
rename from docs/en/benchmark.md
rename to docs/benchmark.md
diff --git a/docs/en/changelog.md b/docs/changelog.md
similarity index 100%
rename from docs/en/changelog.md
rename to docs/changelog.md
diff --git a/docs/en/conf.py b/docs/conf.py
similarity index 97%
rename from docs/en/conf.py
rename to docs/conf.py
index 40bc92fc8d..3248b1f326 100644
--- a/docs/en/conf.py
+++ b/docs/conf.py
@@ -17,14 +17,14 @@
import pytorch_sphinx_theme
-sys.path.insert(0, os.path.abspath('../..'))
+sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'MMAction2'
copyright = '2020, OpenMMLab'
author = 'MMAction2 Authors'
-version_file = '../../mmaction/version.py'
+version_file = '../mmaction/version.py'
def get_version():
diff --git a/docs/en/data_preparation.md b/docs/data_preparation.md
similarity index 100%
rename from docs/en/data_preparation.md
rename to docs/data_preparation.md
diff --git a/docs/en/faq.md b/docs/faq.md
similarity index 98%
rename from docs/en/faq.md
rename to docs/faq.md
index 583cdc31af..0a462b7b8a 100644
--- a/docs/en/faq.md
+++ b/docs/faq.md
@@ -22,7 +22,7 @@ If the contents here do not cover your issue, please create an issue using the [
- **"OSError: MoviePy Error: creation of None failed because of the following error"**
- Refer to [install.md](https://github.com/open-mmlab/mmaction2/blob/master/docs/en/install.md#requirements)
+ Refer to [install.md](https://github.com/open-mmlab/mmaction2/blob/master/docs/install.md#requirements)
1. For Windows users, [ImageMagick](https://www.imagemagick.org/script/index.php) will not be automatically detected by MoviePy, there is a need to modify `moviepy/config_defaults.py` file by providing the path to the ImageMagick binary called `magick`, like `IMAGEMAGICK_BINARY = "C:\\Program Files\\ImageMagick_VERSION\\magick.exe"`
2. For Linux users, there is a need to modify the `/etc/ImageMagick-6/policy.xml` file by commenting out `` to ``, if ImageMagick is not detected by moviepy.
@@ -56,7 +56,7 @@ If the contents here do not cover your issue, please create an issue using the [
- **How to just use trained recognizer models for backbone pre-training?**
- Refer to [Use Pre-Trained Model](https://github.com/open-mmlab/mmaction2/blob/master/docs/en/tutorials/2_finetune.md#use-pre-trained-model),
+ Refer to [Use Pre-Trained Model](https://github.com/open-mmlab/mmaction2/blob/master/docs/tutorials/2_finetune.md#use-pre-trained-model),
in order to use the pre-trained model for the whole network, the new config adds the link of pre-trained models in the `load_from`.
And to use backbone for pre-training, you can change `pretrained` value in the backbone dict of config files to the checkpoint path / url.
@@ -106,7 +106,7 @@ If the contents here do not cover your issue, please create an issue using the [
- **How to set `load_from` value in config files to finetune models?**
- In MMAction2, We set `load_from=None` as default in `configs/_base_/default_runtime.py` and owing to [inheritance design](/docs/en/tutorials/1_config.md),
+ In MMAction2, We set `load_from=None` as default in `configs/_base_/default_runtime.py` and owing to [inheritance design](/docs/tutorials/1_config.md),
users can directly change it by setting `load_from` in their configs.
## Testing
diff --git a/docs/en/feature_extraction.md b/docs/feature_extraction.md
similarity index 100%
rename from docs/en/feature_extraction.md
rename to docs/feature_extraction.md
diff --git a/docs/en/getting_started.md b/docs/getting_started.md
similarity index 100%
rename from docs/en/getting_started.md
rename to docs/getting_started.md
diff --git a/docs/en/index.rst b/docs/index.rst
similarity index 100%
rename from docs/en/index.rst
rename to docs/index.rst
diff --git a/docs/en/install.md b/docs/install.md
similarity index 100%
rename from docs/en/install.md
rename to docs/install.md
diff --git a/docs/en/make.bat b/docs/make.bat
similarity index 100%
rename from docs/en/make.bat
rename to docs/make.bat
diff --git a/docs/en/merge_docs.sh b/docs/merge_docs.sh
similarity index 100%
rename from docs/en/merge_docs.sh
rename to docs/merge_docs.sh
diff --git a/docs/en/projects.md b/docs/projects.md
similarity index 100%
rename from docs/en/projects.md
rename to docs/projects.md
diff --git a/docs/en/stat.py b/docs/stat.py
similarity index 100%
rename from docs/en/stat.py
rename to docs/stat.py
diff --git a/docs/en/supported_datasets.md b/docs/supported_datasets.md
similarity index 100%
rename from docs/en/supported_datasets.md
rename to docs/supported_datasets.md
diff --git a/docs/en/switch_language.md b/docs/switch_language.md
similarity index 100%
rename from docs/en/switch_language.md
rename to docs/switch_language.md
diff --git a/docs/en/tutorials/1_config.md b/docs/tutorials/1_config.md
similarity index 100%
rename from docs/en/tutorials/1_config.md
rename to docs/tutorials/1_config.md
diff --git a/docs/en/tutorials/2_finetune.md b/docs/tutorials/2_finetune.md
similarity index 96%
rename from docs/en/tutorials/2_finetune.md
rename to docs/tutorials/2_finetune.md
index 91d075f3c3..f29263601e 100644
--- a/docs/en/tutorials/2_finetune.md
+++ b/docs/tutorials/2_finetune.md
@@ -91,7 +91,7 @@ checkpoint_config = dict(interval=5)
## Use Pre-Trained Model
To use the pre-trained model for the whole network, the new config adds the link of pre-trained models in the `load_from`.
-We set `load_from=None` as default in `configs/_base_/default_runtime.py` and owing to [inheritance design](/docs/en/tutorials/1_config.md), users can directly change it by setting `load_from` in their configs.
+We set `load_from=None` as default in `configs/_base_/default_runtime.py` and owing to [inheritance design](/docs/tutorials/1_config.md), users can directly change it by setting `load_from` in their configs.
```python
# use the pre-trained model for the whole TSN network
diff --git a/docs/en/tutorials/3_new_dataset.md b/docs/tutorials/3_new_dataset.md
similarity index 100%
rename from docs/en/tutorials/3_new_dataset.md
rename to docs/tutorials/3_new_dataset.md
diff --git a/docs/en/tutorials/4_data_pipeline.md b/docs/tutorials/4_data_pipeline.md
similarity index 100%
rename from docs/en/tutorials/4_data_pipeline.md
rename to docs/tutorials/4_data_pipeline.md
diff --git a/docs/en/tutorials/5_new_modules.md b/docs/tutorials/5_new_modules.md
similarity index 100%
rename from docs/en/tutorials/5_new_modules.md
rename to docs/tutorials/5_new_modules.md
diff --git a/docs/en/tutorials/6_export_model.md b/docs/tutorials/6_export_model.md
similarity index 100%
rename from docs/en/tutorials/6_export_model.md
rename to docs/tutorials/6_export_model.md
diff --git a/docs/en/tutorials/7_customize_runtime.md b/docs/tutorials/7_customize_runtime.md
similarity index 100%
rename from docs/en/tutorials/7_customize_runtime.md
rename to docs/tutorials/7_customize_runtime.md
diff --git a/docs/en/useful_tools.md b/docs/useful_tools.md
similarity index 100%
rename from docs/en/useful_tools.md
rename to docs/useful_tools.md
diff --git a/docs/zh_cn/Makefile b/docs_zh_CN/Makefile
similarity index 100%
rename from docs/zh_cn/Makefile
rename to docs_zh_CN/Makefile
diff --git a/docs/zh_cn/README.md b/docs_zh_CN/README.md
similarity index 100%
rename from docs/zh_cn/README.md
rename to docs_zh_CN/README.md
diff --git a/docs/zh_cn/api.rst b/docs_zh_CN/api.rst
similarity index 100%
rename from docs/zh_cn/api.rst
rename to docs_zh_CN/api.rst
diff --git a/docs/zh_cn/benchmark.md b/docs_zh_CN/benchmark.md
similarity index 100%
rename from docs/zh_cn/benchmark.md
rename to docs_zh_CN/benchmark.md
diff --git a/docs/zh_cn/conf.py b/docs_zh_CN/conf.py
similarity index 97%
rename from docs/zh_cn/conf.py
rename to docs_zh_CN/conf.py
index fe1b066f29..7949166dc9 100644
--- a/docs/zh_cn/conf.py
+++ b/docs_zh_CN/conf.py
@@ -17,14 +17,14 @@
import pytorch_sphinx_theme
-sys.path.insert(0, os.path.abspath('../../'))
+sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'MMAction2'
copyright = '2020, OpenMMLab'
author = 'MMAction2 Authors'
-version_file = '../../mmaction/version.py'
+version_file = '../mmaction/version.py'
def get_version():
diff --git a/docs/zh_cn/data_preparation.md b/docs_zh_CN/data_preparation.md
similarity index 100%
rename from docs/zh_cn/data_preparation.md
rename to docs_zh_CN/data_preparation.md
diff --git a/docs/zh_cn/demo.md b/docs_zh_CN/demo.md
similarity index 100%
rename from docs/zh_cn/demo.md
rename to docs_zh_CN/demo.md
diff --git a/docs/zh_cn/faq.md b/docs_zh_CN/faq.md
similarity index 98%
rename from docs/zh_cn/faq.md
rename to docs_zh_CN/faq.md
index 1a4f722f33..2f328792f0 100644
--- a/docs/zh_cn/faq.md
+++ b/docs_zh_CN/faq.md
@@ -13,7 +13,7 @@
- **"OSError: MoviePy Error: creation of None failed because of the following error"**
- 参照 [MMAction2 安装文档](https://github.com/open-mmlab/mmaction2/blob/master/docs/zh_cn/install.md#安装依赖包)
+ 参照 [MMAction2 安装文档](https://github.com/open-mmlab/mmaction2/blob/master/docs_zh_CN/install.md#安装依赖包)
1. 对于 Windows 用户,[ImageMagick](https://www.imagemagick.org/script/index.php) 不再被 MoviePy 自动检测,
需要获取名为 `magick` 的 ImageMagick 二进制包的路径,来修改 `moviepy/config_defaults.py` 文件中的 `IMAGEMAGICK_BINARY`,如 `IMAGEMAGICK_BINARY = "C:\\Program Files\\ImageMagick_VERSION\\magick.exe"`
2. 对于 Linux 用户,如果 ImageMagick 没有被 moviepy 检测,需要注释掉 `/etc/ImageMagick-6/policy.xml` 文件中的 ``,即改为 ``。
@@ -48,7 +48,7 @@
- **如何使用训练过的识别器作为主干网络的预训练模型?**
- 参照 [使用预训练模型](https://github.com/open-mmlab/mmaction2/blob/master/docs/zh_cn/tutorials/2_finetune.md#使用预训练模型),
+ 参照 [使用预训练模型](https://github.com/open-mmlab/mmaction2/blob/master/docs_zh_CN/tutorials/2_finetune.md#使用预训练模型),
如果想对整个网络使用预训练模型,可以在配置文件中,将 `load_from` 设置为预训练模型的链接。
如果只想对主干网络使用预训练模型,可以在配置文件中,将主干网络 `backbone` 中的 `pretrained` 设置为预训练模型的地址或链接。
diff --git a/docs/zh_cn/feature_extraction.md b/docs_zh_CN/feature_extraction.md
similarity index 100%
rename from docs/zh_cn/feature_extraction.md
rename to docs_zh_CN/feature_extraction.md
diff --git a/docs/zh_cn/getting_started.md b/docs_zh_CN/getting_started.md
similarity index 100%
rename from docs/zh_cn/getting_started.md
rename to docs_zh_CN/getting_started.md
diff --git a/docs/zh_cn/index.rst b/docs_zh_CN/index.rst
similarity index 100%
rename from docs/zh_cn/index.rst
rename to docs_zh_CN/index.rst
diff --git a/docs/zh_cn/install.md b/docs_zh_CN/install.md
similarity index 100%
rename from docs/zh_cn/install.md
rename to docs_zh_CN/install.md
diff --git a/docs/zh_cn/make.bat b/docs_zh_CN/make.bat
similarity index 100%
rename from docs/zh_cn/make.bat
rename to docs_zh_CN/make.bat
diff --git a/docs/zh_cn/merge_docs.sh b/docs_zh_CN/merge_docs.sh
similarity index 89%
rename from docs/zh_cn/merge_docs.sh
rename to docs_zh_CN/merge_docs.sh
index 187b8bd419..1265731a97 100755
--- a/docs/zh_cn/merge_docs.sh
+++ b/docs_zh_CN/merge_docs.sh
@@ -1,10 +1,10 @@
#!/usr/bin/env bash
# gather models
-cat ../configs/localization/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 时序动作检测模型' | sed 's/](\/docs/zh_cn\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##/getting_started.html#/g" > localization_models.md
-cat ../configs/recognition/*/README_zh-CN.md | sed "s/md#测/html#t测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 动作识别模型' | sed 's/](\/docs/zh_cn\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" > recognition_models.md
-cat ../configs/recognition_audio/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed 's/](\/docs/zh_cn\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" >> recognition_models.md
-cat ../configs/detection/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 时空动作检测模型' | sed 's/](\/docs/zh_cn\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" > detection_models.md
-cat ../configs/skeleton/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 骨骼动作识别模型' | sed 's/](\/docs/zh_cn\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" > skeleton_models.md
+cat ../configs/localization/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 时序动作检测模型' | sed 's/](\/docs_zh_CN\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' | sed "s/getting_started.html##/getting_started.html#/g" > localization_models.md
+cat ../configs/recognition/*/README_zh-CN.md | sed "s/md#测/html#t测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 动作识别模型' | sed 's/](\/docs_zh_CN\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" > recognition_models.md
+cat ../configs/recognition_audio/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed 's/](\/docs_zh_CN\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" >> recognition_models.md
+cat ../configs/detection/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 时空动作检测模型' | sed 's/](\/docs_zh_CN\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" > detection_models.md
+cat ../configs/skeleton/*/README_zh-CN.md | sed "s/md#测/html#测/g" | sed "s/md#训/html#训/g" | sed "s/#/#&/" | sed '1i\# 骨骼动作识别模型' | sed 's/](\/docs_zh_CN\//](/g' | sed 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g'| sed "s/getting_started.html##/getting_started.html#/g" > skeleton_models.md
# gather datasets
cat ../tools/data/*/README_zh-CN.md | sed 's/# 准备/# /g' | sed 's/#/#&/' > prepare_data.md
@@ -29,7 +29,7 @@ sed -i 's/(\/tools\/data\/diving48\/README_zh-CN.md/(#diving48/g' supported_data
sed -i 's/(\/tools\/data\/skeleton\/README_zh-CN.md/(#skeleton/g' supported_datasets.md
cat prepare_data.md >> supported_datasets.md
-sed -i 's/](\/docs/zh_cn\//](/g' supported_datasets.md
+sed -i 's/](\/docs_zh_CN\//](/g' supported_datasets.md
sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' supported_datasets.md
sed -i "s/md###t/html#t/g" demo.md
@@ -37,5 +37,5 @@ sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' demo.md
sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' benchmark.md
sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' getting_started.md
sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' install.md
-sed -i 's/](\/docs/zh_cn\//](/g' ./tutorials/*.md
+sed -i 's/](\/docs_zh_CN\//](/g' ./tutorials/*.md
sed -i 's=](/=](https://github.com/open-mmlab/mmaction2/tree/master/=g' ./tutorials/*.md
diff --git a/docs/zh_cn/stat.py b/docs_zh_CN/stat.py
similarity index 100%
rename from docs/zh_cn/stat.py
rename to docs_zh_CN/stat.py
diff --git a/docs/zh_cn/supported_datasets.md b/docs_zh_CN/supported_datasets.md
similarity index 100%
rename from docs/zh_cn/supported_datasets.md
rename to docs_zh_CN/supported_datasets.md
diff --git a/docs/zh_cn/switch_language.md b/docs_zh_CN/switch_language.md
similarity index 100%
rename from docs/zh_cn/switch_language.md
rename to docs_zh_CN/switch_language.md
diff --git a/docs/zh_cn/tutorials/1_config.md b/docs_zh_CN/tutorials/1_config.md
similarity index 100%
rename from docs/zh_cn/tutorials/1_config.md
rename to docs_zh_CN/tutorials/1_config.md
diff --git a/docs/zh_cn/tutorials/2_finetune.md b/docs_zh_CN/tutorials/2_finetune.md
similarity index 100%
rename from docs/zh_cn/tutorials/2_finetune.md
rename to docs_zh_CN/tutorials/2_finetune.md
diff --git a/docs/zh_cn/tutorials/3_new_dataset.md b/docs_zh_CN/tutorials/3_new_dataset.md
similarity index 100%
rename from docs/zh_cn/tutorials/3_new_dataset.md
rename to docs_zh_CN/tutorials/3_new_dataset.md
diff --git a/docs/zh_cn/tutorials/4_data_pipeline.md b/docs_zh_CN/tutorials/4_data_pipeline.md
similarity index 100%
rename from docs/zh_cn/tutorials/4_data_pipeline.md
rename to docs_zh_CN/tutorials/4_data_pipeline.md
diff --git a/docs/zh_cn/tutorials/5_new_modules.md b/docs_zh_CN/tutorials/5_new_modules.md
similarity index 100%
rename from docs/zh_cn/tutorials/5_new_modules.md
rename to docs_zh_CN/tutorials/5_new_modules.md
diff --git a/docs/zh_cn/tutorials/6_export_model.md b/docs_zh_CN/tutorials/6_export_model.md
similarity index 100%
rename from docs/zh_cn/tutorials/6_export_model.md
rename to docs_zh_CN/tutorials/6_export_model.md
diff --git a/docs/zh_cn/tutorials/7_customize_runtime.md b/docs_zh_CN/tutorials/7_customize_runtime.md
similarity index 100%
rename from docs/zh_cn/tutorials/7_customize_runtime.md
rename to docs_zh_CN/tutorials/7_customize_runtime.md
diff --git a/docs/zh_cn/useful_tools.md b/docs_zh_CN/useful_tools.md
similarity index 100%
rename from docs/zh_cn/useful_tools.md
rename to docs_zh_CN/useful_tools.md
diff --git a/tools/data/activitynet/README.md b/tools/data/activitynet/README.md
index 8c36a9eec9..f3286f6fc1 100644
--- a/tools/data/activitynet/README.md
+++ b/tools/data/activitynet/README.md
@@ -78,7 +78,7 @@ For this case, the downloading scripts update the annotation file after download
### Step 3. Extract RGB and Flow
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
Use following scripts to extract both RGB and Flow.
@@ -87,7 +87,7 @@ bash extract_frames.sh
```
The command above can generate images with new short edge 256. If you want to generate images with short edge 320 (320p), or with fix size 340x256, you can change the args `--new-short 256` to `--new-short 320` or `--new-width 340 --new-height 256`.
-More details can be found in [data_preparation](/docs/en/data_preparation.md)
+More details can be found in [data_preparation](/docs/data_preparation.md)
### Step 4. Generate File List for ActivityNet Finetuning
@@ -168,4 +168,4 @@ mmaction2
```
-For training and evaluating on ActivityNet, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on ActivityNet, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/activitynet/README_zh-CN.md b/tools/data/activitynet/README_zh-CN.md
index 5007d0a2ad..7687b948db 100644
--- a/tools/data/activitynet/README_zh-CN.md
+++ b/tools/data/activitynet/README_zh-CN.md
@@ -78,7 +78,7 @@ bash download_bsn_videos.sh
### 步骤 3. 抽取 RGB 帧和光流
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
可使用以下命令抽取视频帧和光流。
@@ -87,7 +87,7 @@ bash extract_frames.sh
```
以上脚本将会生成短边 256 分辨率的视频。如果用户想生成短边 320 分辨率的视频(即 320p),或者 340x256 的固定分辨率,用户可以通过改变参数由 `--new-short 256` 至 `--new-short 320`,或者 `--new-width 340 --new-height 256` 进行设置
-更多细节可参考 [数据准备指南](/docs/zh_cn/data_preparation.md)
+更多细节可参考 [数据准备指南](/docs_zh_CN/data_preparation.md)
### 步骤 4. 生成用于 ActivityNet 微调的文件列表
@@ -166,4 +166,4 @@ mmaction2
```
-关于对 ActivityNet 进行训练和验证,可以参考 [基础教程](/docs/zh_cn/getting_started.md).
+关于对 ActivityNet 进行训练和验证,可以参考 [基础教程](/docs_zh_CN/getting_started.md).
diff --git a/tools/data/ava/README.md b/tools/data/ava/README.md
index 4e297716de..a416eb2632 100644
--- a/tools/data/ava/README.md
+++ b/tools/data/ava/README.md
@@ -64,7 +64,7 @@ bash cut_videos.sh
## Step 4. Extract RGB and Flow
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. And you can run the following script to soft link the extracted frames.
@@ -141,7 +141,7 @@ mmaction2
| │ │ │ ├── ...
```
-For training and evaluating on AVA, please refer to [getting_started](/docs/en/getting_started.md).
+For training and evaluating on AVA, please refer to [getting_started](/docs/getting_started.md).
## Reference
diff --git a/tools/data/ava/README_zh-CN.md b/tools/data/ava/README_zh-CN.md
index 6a922f5e1b..5a7b96da88 100644
--- a/tools/data/ava/README_zh-CN.md
+++ b/tools/data/ava/README_zh-CN.md
@@ -56,7 +56,7 @@ bash cut_videos.sh
## 4. 提取 RGB 帧和光流
-在提取之前,请参考 [安装教程](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在提取之前,请参考 [安装教程](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果用户有足够的 SSD 空间,那么建议将视频抽取为 RGB 帧以提升 I/O 性能。用户可以使用以下脚本为抽取得到的帧文件夹建立软连接:
@@ -131,4 +131,4 @@ mmaction2
| │ │ │ ├── ...
```
-关于 AVA 数据集上的训练与测试,请参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于 AVA 数据集上的训练与测试,请参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/diving48/README.md b/tools/data/diving48/README.md
index 1cbdbcdb27..588cddd173 100644
--- a/tools/data/diving48/README.md
+++ b/tools/data/diving48/README.md
@@ -39,7 +39,7 @@ This part is **optional** if you only want to use the video loader.
The frames provided in official compressed file are not complete. You may need to go through the following extraction steps to get the complete frames.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance.
@@ -120,4 +120,4 @@ mmaction2
│ | | ├── ...
```
-For training and evaluating on Diving48, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on Diving48, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/diving48/README_zh-CN.md b/tools/data/diving48/README_zh-CN.md
index 3210d06b9d..e91f8729a5 100644
--- a/tools/data/diving48/README_zh-CN.md
+++ b/tools/data/diving48/README_zh-CN.md
@@ -39,7 +39,7 @@ bash download_videos.sh
官网提供的帧压缩包并不完整。若想获取完整的数据,可以使用以下步骤解帧。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果拥有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 中。
@@ -120,4 +120,4 @@ mmaction2
│ | | ├── ...
```
-关于对 Diving48 进行训练和验证,可以参考 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 Diving48 进行训练和验证,可以参考 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/gym/README.md b/tools/data/gym/README.md
index 22b09f66f9..a39eda6fd4 100644
--- a/tools/data/gym/README.md
+++ b/tools/data/gym/README.md
@@ -55,7 +55,7 @@ python trim_subaction.py
This part is **optional** if you only want to use the video loader for RGB model training.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
Run the following script to extract both rgb and flow using "tvl1" algorithm.
@@ -106,4 +106,4 @@ mmaction2
| | └── subaction_frames
```
-For training and evaluating on GYM, please refer to [getting_started](/docs/en/getting_started.md).
+For training and evaluating on GYM, please refer to [getting_started](/docs/getting_started.md).
diff --git a/tools/data/gym/README_zh-CN.md b/tools/data/gym/README_zh-CN.md
index 9fff9dd20a..cb3a796ec7 100644
--- a/tools/data/gym/README_zh-CN.md
+++ b/tools/data/gym/README_zh-CN.md
@@ -55,7 +55,7 @@ python trim_subaction.py
如果用户仅使用 video loader,则可以跳过本步。
-在提取之前,请参考 [安装教程](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在提取之前,请参考 [安装教程](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
用户可使用如下脚本同时抽取 RGB 帧和光流(提取光流时使用 tvl1 算法):
@@ -106,4 +106,4 @@ mmaction2
| | └── subaction_frames
```
-关于 GYM 数据集上的训练与测试,请参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于 GYM 数据集上的训练与测试,请参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/hmdb51/README.md b/tools/data/hmdb51/README.md
index f003e58b97..206b548764 100644
--- a/tools/data/hmdb51/README.md
+++ b/tools/data/hmdb51/README.md
@@ -41,7 +41,7 @@ bash download_videos.sh
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance.
@@ -122,4 +122,4 @@ mmaction2
```
-For training and evaluating on HMDB51, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on HMDB51, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/hmdb51/README_zh-CN.md b/tools/data/hmdb51/README_zh-CN.md
index f82f397b5c..a34c4b9ce9 100644
--- a/tools/data/hmdb51/README_zh-CN.md
+++ b/tools/data/hmdb51/README_zh-CN.md
@@ -39,7 +39,7 @@ bash download_videos.sh
如果用户只想使用视频加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果用户有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 上。
用户可使用以下命令为 SSD 建立软链接。
@@ -118,4 +118,4 @@ mmaction2
```
-关于对 HMDB51 进行训练和验证,可以参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 HMDB51 进行训练和验证,可以参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/hvu/README.md b/tools/data/hvu/README.md
index f668f52788..755e71dbb3 100644
--- a/tools/data/hvu/README.md
+++ b/tools/data/hvu/README.md
@@ -43,7 +43,7 @@ bash download_videos.sh
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
You can use the following script to extract both RGB and Flow frames.
@@ -52,7 +52,7 @@ bash extract_frames.sh
```
By default, we generate frames with short edge resized to 256.
-More details can be found in [data_preparation](/docs/en/data_preparation.md)
+More details can be found in [data_preparation](/docs/data_preparation.md)
## Step 4. Generate File List
@@ -120,4 +120,4 @@ mmaction2
```
-For training and evaluating on HVU, please refer to [getting_started](/docs/en/getting_started.md).
+For training and evaluating on HVU, please refer to [getting_started](/docs/getting_started.md).
diff --git a/tools/data/hvu/README_zh-CN.md b/tools/data/hvu/README_zh-CN.md
index a83f85c571..5b3ffa1ea3 100644
--- a/tools/data/hvu/README_zh-CN.md
+++ b/tools/data/hvu/README_zh-CN.md
@@ -43,7 +43,7 @@ bash download_videos.sh
如果用户仅使用 video loader,则可以跳过本步。
-在提取之前,请参考 [安装教程](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在提取之前,请参考 [安装教程](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
用户可使用如下脚本同时抽取 RGB 帧和光流:
@@ -51,7 +51,7 @@ bash download_videos.sh
bash extract_frames.sh
```
-该脚本默认生成短边长度为 256 的帧,可参考 [数据准备](/docs/zh_cn/data_preparation.md) 获得更多细节。
+该脚本默认生成短边长度为 256 的帧,可参考 [数据准备](/docs_zh_CN/data_preparation.md) 获得更多细节。
## 4. 生成文件列表
@@ -107,4 +107,4 @@ mmaction2
```
-关于 HVU 数据集上的训练与测试,请参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于 HVU 数据集上的训练与测试,请参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/jester/README.md b/tools/data/jester/README.md
index 26161e78bb..2e054ab33d 100644
--- a/tools/data/jester/README.md
+++ b/tools/data/jester/README.md
@@ -64,7 +64,7 @@ data = dict(
This part is **optional** if you only want to use RGB frames.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance.
@@ -140,4 +140,4 @@ mmaction2
```
-For training and evaluating on Jester, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on Jester, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/jester/README_zh-CN.md b/tools/data/jester/README_zh-CN.md
index 86f37badf2..4b3fb17f0b 100644
--- a/tools/data/jester/README_zh-CN.md
+++ b/tools/data/jester/README_zh-CN.md
@@ -64,7 +64,7 @@ data = dict(
如果用户只想使用 RGB 帧训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果拥有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 中。
@@ -140,4 +140,4 @@ mmaction2
```
-关于对 jester 进行训练和验证,可以参考 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 jester 进行训练和验证,可以参考 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/kinetics/README.md b/tools/data/kinetics/README.md
index 7351d1b128..725190ee41 100644
--- a/tools/data/kinetics/README.md
+++ b/tools/data/kinetics/README.md
@@ -72,7 +72,7 @@ You can also download from [Academic Torrents](https://academictorrents.com/) ([
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. And you can run the following script to soft link the extracted frames.
@@ -103,7 +103,7 @@ bash extract_frames.sh ${DATASET}
```
The commands above can generate images with new short edge 256. If you want to generate images with short edge 320 (320p), or with fix size 340x256, you can change the args `--new-short 256` to `--new-short 320` or `--new-width 340 --new-height 256`.
-More details can be found in [data_preparation](/docs/en/data_preparation.md)
+More details can be found in [data_preparation](/docs/data_preparation.md)
## Step 4. Generate File List
@@ -147,4 +147,4 @@ mmaction2
```
-For training and evaluating on Kinetics, please refer to [getting_started](/docs/en/getting_started.md).
+For training and evaluating on Kinetics, please refer to [getting_started](/docs/getting_started.md).
diff --git a/tools/data/kinetics/README_zh-CN.md b/tools/data/kinetics/README_zh-CN.md
index 1fa8741e22..ef49ba8e8a 100644
--- a/tools/data/kinetics/README_zh-CN.md
+++ b/tools/data/kinetics/README_zh-CN.md
@@ -66,7 +66,7 @@ python ../resize_videos.py ../../../data/${DATASET}/videos_train/ ../../../data/
如果用户仅使用 video loader,则可以跳过本步。
-在提取之前,请参考 [安装教程](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在提取之前,请参考 [安装教程](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果用户有足够的 SSD 空间,那么建议将视频抽取为 RGB 帧以提升 I/O 性能。用户可以使用以下脚本为抽取得到的帧文件夹建立软连接:
@@ -97,7 +97,7 @@ bash extract_frames.sh ${DATASET}
```
以上的命令生成短边长度为 256 的 RGB 帧和光流帧。如果用户需要生成短边长度为 320 的帧 (320p),或是固定分辨率为 340 x 256 的帧,可改变参数 `--new-short 256` 为 `--new-short 320` 或 `--new-width 340 --new-height 256`。
-更多细节可以参考 [数据准备](/docs/zh_cn/data_preparation.md)。
+更多细节可以参考 [数据准备](/docs_zh_CN/data_preparation.md)。
## 4. 生成文件列表
@@ -139,4 +139,4 @@ mmaction2
```
-关于 Kinetics 数据集上的训练与测试,请参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于 Kinetics 数据集上的训练与测试,请参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/mit/README.md b/tools/data/mit/README.md
index 6e4ef0d37d..e67ca45335 100644
--- a/tools/data/mit/README.md
+++ b/tools/data/mit/README.md
@@ -34,7 +34,7 @@ python ../resize_videos.py ../../../data/mit/videos/ ../../../data/mit/videos_25
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. And you can run the following script to soft link the extracted frames.
@@ -125,4 +125,4 @@ mmaction2
```
-For training and evaluating on Moments in Time, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on Moments in Time, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/mit/README_zh-CN.md b/tools/data/mit/README_zh-CN.md
index 21289e34e1..74a3d0c247 100644
--- a/tools/data/mit/README_zh-CN.md
+++ b/tools/data/mit/README_zh-CN.md
@@ -36,7 +36,7 @@ python ../resize_videos.py ../../../data/mit/videos/ ../../../data/mit/videos_25
如果用户只想使用视频加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果用户有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 上。
用户可使用以下命令为 SSD 建立软链接。
@@ -127,4 +127,4 @@ mmaction2
```
-关于对 Moments in Times 进行训练和验证,可以参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 Moments in Times 进行训练和验证,可以参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/mmit/README.md b/tools/data/mmit/README.md
index 3f6b618977..5deedf71d0 100644
--- a/tools/data/mmit/README.md
+++ b/tools/data/mmit/README.md
@@ -32,7 +32,7 @@ python ../resize_videos.py ../../../data/mmit/videos/ ../../../data/mmit/videos_
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
First, you can run the following script to soft link SSD.
@@ -110,4 +110,4 @@ mmaction2/
└── ...
```
-For training and evaluating on Multi-Moments in Time, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on Multi-Moments in Time, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/mmit/README_zh-CN.md b/tools/data/mmit/README_zh-CN.md
index 31d5cddcde..e070505e34 100644
--- a/tools/data/mmit/README_zh-CN.md
+++ b/tools/data/mmit/README_zh-CN.md
@@ -34,7 +34,7 @@ python ../resize_videos.py ../../../data/mmit/videos/ ../../../data/mmit/videos_
如果用户只想使用视频加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果用户有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 上。
用户可使用以下命令为 SSD 建立软链接。
@@ -112,4 +112,4 @@ mmaction2/
└── ...
```
-关于对 Multi-Moments in Time 进行训练和验证,可以参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 Multi-Moments in Time 进行训练和验证,可以参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/sthv1/README.md b/tools/data/sthv1/README.md
index eb837d435e..75f4c11134 100644
--- a/tools/data/sthv1/README.md
+++ b/tools/data/sthv1/README.md
@@ -65,7 +65,7 @@ data = dict(
This part is **optional** if you only want to use RGB frames.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance.
@@ -141,4 +141,4 @@ mmaction2
```
-For training and evaluating on Something-Something V1, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on Something-Something V1, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/sthv1/README_zh-CN.md b/tools/data/sthv1/README_zh-CN.md
index 7506b4ad5c..11cc9318be 100644
--- a/tools/data/sthv1/README_zh-CN.md
+++ b/tools/data/sthv1/README_zh-CN.md
@@ -63,7 +63,7 @@ data = dict(
如果用户只想使用原 RGB 帧加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果拥有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 中。
@@ -139,4 +139,4 @@ mmaction2
```
-关于对 Something-Something V1 进行训练和验证,可以参考 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 Something-Something V1 进行训练和验证,可以参考 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/sthv2/README.md b/tools/data/sthv2/README.md
index ea4c66e270..af112872da 100644
--- a/tools/data/sthv2/README.md
+++ b/tools/data/sthv2/README.md
@@ -36,7 +36,7 @@ cd $MMACTION2/tools/data/sthv2/
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance.
@@ -115,4 +115,4 @@ mmaction2
```
-For training and evaluating on Something-Something V2, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on Something-Something V2, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/sthv2/README_zh-CN.md b/tools/data/sthv2/README_zh-CN.md
index 87cd3558f6..7d8080c5a4 100644
--- a/tools/data/sthv2/README_zh-CN.md
+++ b/tools/data/sthv2/README_zh-CN.md
@@ -36,7 +36,7 @@ cd $MMACTION2/tools/data/sthv2/
如果用户只想使用视频加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果拥有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 中。
@@ -115,4 +115,4 @@ mmaction2
```
-关于对 Something-Something V2 进行训练和验证,可以参考 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 Something-Something V2 进行训练和验证,可以参考 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/thumos14/README.md b/tools/data/thumos14/README.md
index 8b52284951..eaddb60cbe 100644
--- a/tools/data/thumos14/README.md
+++ b/tools/data/thumos14/README.md
@@ -40,7 +40,7 @@ bash download_videos.sh
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance.
@@ -139,4 +139,4 @@ mmaction2
│ │ │ | ├── video_test_0000001
```
-For training and evaluating on THUMOS'14, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on THUMOS'14, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/thumos14/README_zh-CN.md b/tools/data/thumos14/README_zh-CN.md
index 05bd862316..fb7140a24e 100644
--- a/tools/data/thumos14/README_zh-CN.md
+++ b/tools/data/thumos14/README_zh-CN.md
@@ -40,7 +40,7 @@ bash download_videos.sh
如果用户只想使用视频加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果用户有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 上。
用户可使用以下命令为 SSD 建立软链接。
@@ -136,4 +136,4 @@ mmaction2
│ │ │ | ├── video_test_0000001
```
-关于对 THUMOS'14 进行训练和验证,可以参照 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 THUMOS'14 进行训练和验证,可以参照 [基础教程](/docs_zh_CN/getting_started.md)。
diff --git a/tools/data/ucf101/README.md b/tools/data/ucf101/README.md
index 4d71c1e9f8..abac25f0c7 100644
--- a/tools/data/ucf101/README.md
+++ b/tools/data/ucf101/README.md
@@ -43,7 +43,7 @@ python ../resize_videos.py ../../../data/ucf101/videos/ ../../../data/ucf101/vid
This part is **optional** if you only want to use the video loader.
-Before extracting, please refer to [install.md](/docs/en/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
+Before extracting, please refer to [install.md](/docs/install.md) for installing [denseflow](https://github.com/open-mmlab/denseflow).
If you have plenty of SSD space, then we recommend extracting frames there for better I/O performance. The extracted frames (RGB + Flow) will take up about 100GB.
@@ -124,4 +124,4 @@ mmaction2
```
-For training and evaluating on UCF-101, please refer to [getting_started.md](/docs/en/getting_started.md).
+For training and evaluating on UCF-101, please refer to [getting_started.md](/docs/getting_started.md).
diff --git a/tools/data/ucf101/README_zh-CN.md b/tools/data/ucf101/README_zh-CN.md
index 28c696a059..96e9453ff4 100644
--- a/tools/data/ucf101/README_zh-CN.md
+++ b/tools/data/ucf101/README_zh-CN.md
@@ -41,7 +41,7 @@ python ../resize_videos.py ../../../data/ucf101/videos/ ../../../data/ucf101/vid
如果用户只想使用视频加载训练,则该部分是 **可选项**。
-在抽取视频帧和光流之前,请参考 [安装指南](/docs/zh_cn/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
+在抽取视频帧和光流之前,请参考 [安装指南](/docs_zh_CN/install.md) 安装 [denseflow](https://github.com/open-mmlab/denseflow)。
如果拥有大量的 SSD 存储空间,则推荐将抽取的帧存储至 I/O 性能更优秀的 SSD 中。所抽取的视频帧和光流约占据 100 GB 的存储空间。
@@ -122,4 +122,4 @@ mmaction2
```
-关于对 UCF-101 进行训练和验证,可以参考 [基础教程](/docs/zh_cn/getting_started.md)。
+关于对 UCF-101 进行训练和验证,可以参考 [基础教程](/docs_zh_CN/getting_started.md)。