diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1101f7f --- /dev/null +++ b/.gitignore @@ -0,0 +1,51 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Visual Studio Code files +.vscode +.vs + +# PyCharm files +.idea + +# Eclipse Project settings +*.*project +.settings + +# Sublime Text settings +*.sublime-workspace +*.sublime-project + +# Editor temporaries +*.swn +*.swo +*.swp +*.swm +*~ + +# IPython notebook checkpoints +.ipynb_checkpoints + +# macOS dir files +.DS_Store + +exp +data +raw_wav +tensorboard +**/*build* + +# Clangd files +.cache +compile_commands.json + +# train/inference files +*.wav +*.pt +pretrained_models/* +*_pb2_grpc.py +*_pb2.py + +py311/ \ No newline at end of file diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..66c2a4c --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at mikelei@mobvoi.com. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git "a/GPU\350\257\212\346\226\255.bat" "b/GPU\350\257\212\346\226\255.bat" new file mode 100644 index 0000000..d4e7725 --- /dev/null +++ "b/GPU\350\257\212\346\226\255.bat" @@ -0,0 +1,2 @@ +py311\python.exe gpu_diagnostics.py +pause diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..261eeb9 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..db06cb7 --- /dev/null +++ b/README.md @@ -0,0 +1,142 @@ +# CosyVoice +## 👉🏻 [CosyVoice Demos](https://fun-audio-llm.github.io/) 👈🏻 +[[CosyVoice Paper](https://fun-audio-llm.github.io/pdf/CosyVoice_v1.pdf)][[CosyVoice Studio](https://www.modelscope.cn/studios/iic/CosyVoice-300M)][[CosyVoice Code](https://github.com/FunAudioLLM/CosyVoice)] + +For `SenseVoice`, visit [SenseVoice repo](https://github.com/FunAudioLLM/SenseVoice) and [SenseVoice space](https://www.modelscope.cn/studios/iic/SenseVoice). + +## Install + +**Clone and install** + +- Clone the repo +``` sh +git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git +# If you failed to clone submodule due to network failures, please run following command until success +cd CosyVoice +git submodule update --init --recursive +``` + +- Install Conda: please see https://docs.conda.io/en/latest/miniconda.html +- Create Conda env: + +``` sh +conda create -n cosyvoice python=3.11 +conda activate cosyvoice +pip install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com + + +# when you in windows +no need to install sox +``` + +**Model download** + +We strongly recommand that you download our pretrained `CosyVoice-300M` `CosyVoice-300M-SFT` `CosyVoice-300M-Instruct` model and `speech_kantts_ttsfrd` resource. + +If you are expert in this field, and you are only interested in training your own CosyVoice model from scratch, you can skip this step. + +``` python +# SDK模型下载 +from modelscope import snapshot_download +snapshot_download('iic/CosyVoice-300M', local_dir='pretrained_models/CosyVoice-300M') +snapshot_download('iic/CosyVoice-300M-SFT', local_dir='pretrained_models/CosyVoice-300M-SFT') +snapshot_download('iic/CosyVoice-300M-Instruct', local_dir='pretrained_models/CosyVoice-300M-Instruct') +snapshot_download('speech_tts/speech_kantts_ttsfrd', local_dir='pretrained_models/speech_kantts_ttsfrd') +``` + +``` sh +# git模型下载,请确保已安装git lfs +mkdir -p pretrained_models +git clone https://www.modelscope.cn/iic/CosyVoice-300M.git pretrained_models/CosyVoice-300M +git clone https://www.modelscope.cn/iic/CosyVoice-300M-SFT.git pretrained_models/CosyVoice-300M-SFT +git clone https://www.modelscope.cn/iic/CosyVoice-300M-Instruct.git pretrained_models/CosyVoice-300M-Instruct +git clone https://www.modelscope.cn/speech_tts/speech_kantts_ttsfrd.git pretrained_models/speech_kantts_ttsfrd +``` + + +**Basic Usage** + +For zero_shot/cross_lingual inference, please use `CosyVoice-300M` model. +For sft inference, please use `CosyVoice-300M-SFT` model. +For instruct inference, please use `CosyVoice-300M-Instruct` model. +First, add `third_party/AcademiCodec` and `third_party/Matcha-TTS` to your `PYTHONPATH`. + +``` sh +set PYTHONPATH=third_party/AcademiCodec;third_party/Matcha-TTS +``` + +``` python +from cosyvoice.cli.cosyvoice import CosyVoice +from cosyvoice.utils.file_utils import load_wav +import torchaudio + +cosyvoice = CosyVoice('speech_tts/CosyVoice-300M-SFT') +# sft usage +print(cosyvoice.list_avaliable_spks()) +output = cosyvoice.inference_sft('你好,我是通义生成式语音大模型,请问有什么可以帮您的吗?', '中文女') +torchaudio.save('sft.wav', output['tts_speech'], 22050) + +cosyvoice = CosyVoice('speech_tts/CosyVoice-300M') +# zero_shot usage +prompt_speech_16k = load_wav('zero_shot_prompt.wav', 16000) +output = cosyvoice.inference_zero_shot('收到好友从远方寄来的生日礼物,那份意外的惊喜与深深的祝福让我心中充满了甜蜜的快乐,笑容如花儿般绽放。', '希望你以后能够做的比我还好呦。', prompt_speech_16k) +torchaudio.save('zero_shot.wav', output['tts_speech'], 22050) +# cross_lingual usage +prompt_speech_16k = load_wav('cross_lingual_prompt.wav', 16000) +output = cosyvoice.inference_cross_lingual('<|en|>And then later on, fully acquiring that company. So keeping management in line, interest in line with the asset that\'s coming into the family is a reason why sometimes we don\'t buy the whole thing.', prompt_speech_16k) +torchaudio.save('cross_lingual.wav', output['tts_speech'], 22050) + +cosyvoice = CosyVoice('speech_tts/CosyVoice-300M-Instruct') +# instruct usage +output = cosyvoice.inference_instruct('在面对挑战时,他展现了非凡的勇气智慧。', '中文男', 'Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.') +torchaudio.save('instruct.wav', output['tts_speech'], 22050) +``` + +**Start web demo** + +You can use our web demo page to get familiar with CosyVoice quickly. +We support sft/zero_shot/cross_lingual/instruct inference in web demo. + +Please see the demo website for details. + +``` python +# change speech_tts/CosyVoice-300M-SFT for sft inference, or speech_tts/CosyVoice-300M-Instruct for instruct inference +python3 webui.py --port 9886 --model_dir ./pretrained_models/CosyVoice-300M +``` + +**Advanced Usage** + +For advanced user, we have provided train and inference scripts in `examples/libritts/cosyvoice/run.sh`. +You can get familiar with CosyVoice following this recipie. + +**Build for deployment** + +Optionally, if you want to use grpc for service deployment, +you can run following steps. Otherwise, you can just ignore this step. + +``` sh +cd runtime/python +docker build -t cosyvoice:v1.0 . +# change speech_tts/CosyVoice-300M to speech_tts/CosyVoice-300M-Instruct if you want to use instruct inference +docker run -d --runtime=nvidia -p 50000:50000 cosyvoice:v1.0 /bin/bash -c "cd /opt/CosyVoice/CosyVoice/runtime/python && python3 server.py --port 50000 --max_conc 4 --model_dir speech_tts/CosyVoice-300M && sleep infinity" +python3 client.py --port 50000 --mode +``` + +## Discussion & Communication + +You can directly discuss on [Github Issues](https://github.com/FunAudioLLM/CosyVoice/issues). + +You can also scan the QR code to join our officla Dingding chat group. + + + +## Acknowledge + +1. We borrowed a lot of code from [FunASR](https://github.com/modelscope/FunASR). +2. We borrowed a lot of code from [FunCodec](https://github.com/modelscope/FunCodec). +3. We borrowed a lot of code from [Matcha-TTS](https://github.com/shivammehta25/Matcha-TTS). +4. We borrowed a lot of code from [AcademiCodec](https://github.com/yangdongchao/AcademiCodec). +5. We borrowed a lot of code from [WeNet](https://github.com/wenet-e2e/wenet). + +## Disclaimer +The content provided above is for academic purposes only and is intended to demonstrate technical capabilities. Some examples are sourced from the internet. If any content infringes on your rights, please contact us to request its removal. diff --git a/asset/dingding.png b/asset/dingding.png new file mode 100644 index 0000000..9a64400 Binary files /dev/null and b/asset/dingding.png differ diff --git a/cosyvoice/__init__.py b/cosyvoice/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cosyvoice/bin/inference.py b/cosyvoice/bin/inference.py new file mode 100644 index 0000000..6b777fa --- /dev/null +++ b/cosyvoice/bin/inference.py @@ -0,0 +1,114 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import argparse +import logging +logging.getLogger('matplotlib').setLevel(logging.WARNING) +import os + +import torch +from torch.utils.data import DataLoader +import torchaudio +from hyperpyyaml import load_hyperpyyaml +from tqdm import tqdm +from cosyvoice.cli.model import CosyVoiceModel + +from cosyvoice.dataset.dataset import Dataset + +def get_args(): + parser = argparse.ArgumentParser(description='inference with your model') + parser.add_argument('--config', required=True, help='config file') + parser.add_argument('--prompt_data', required=True, help='prompt data file') + parser.add_argument('--prompt_utt2data', required=True, help='prompt data file') + parser.add_argument('--tts_text', required=True, help='tts input file') + parser.add_argument('--llm_model', required=True, help='llm model file') + parser.add_argument('--flow_model', required=True, help='flow model file') + parser.add_argument('--hifigan_model', required=True, help='hifigan model file') + parser.add_argument('--gpu', + type=int, + default=-1, + help='gpu id for this rank, -1 for cpu') + parser.add_argument('--mode', + default='sft', + choices=['sft', 'zero_shot'], + help='inference mode') + parser.add_argument('--result_dir', required=True, help='asr result file') + args = parser.parse_args() + print(args) + return args + + +def main(): + args = get_args() + logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s %(levelname)s %(message)s') + os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu) + + # Init cosyvoice models from configs + use_cuda = args.gpu >= 0 and torch.cuda.is_available() + device = torch.device('cuda' if use_cuda else 'cpu') + with open(args.config, 'r') as f: + configs = load_hyperpyyaml(f) + + model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift']) + model.load(args.llm_model, args.flow_model, args.hifigan_model) + + test_dataset = Dataset(args.prompt_data, data_pipeline=configs['data_pipeline'], mode='inference', shuffle=False, partition=False, tts_file=args.tts_text, prompt_utt2data=args.prompt_utt2data) + test_data_loader = DataLoader(test_dataset, batch_size=None, num_workers=0) + + del configs + os.makedirs(args.result_dir, exist_ok=True) + fn = os.path.join(args.result_dir, 'wav.scp') + f = open(fn, 'w') + with torch.no_grad(): + for batch_idx, batch in tqdm(enumerate(test_data_loader)): + utts = batch["utts"] + assert len(utts) == 1, "inference mode only support batchsize 1" + text = batch["text"] + text_token = batch["text_token"].to(device) + text_token_len = batch["text_token_len"].to(device) + tts_text = batch["tts_text"] + tts_index = batch["tts_index"] + tts_text_token = batch["tts_text_token"].to(device) + tts_text_token_len = batch["tts_text_token_len"].to(device) + speech_token = batch["speech_token"].to(device) + speech_token_len = batch["speech_token_len"].to(device) + speech_feat = batch["speech_feat"].to(device) + speech_feat_len = batch["speech_feat_len"].to(device) + utt_embedding = batch["utt_embedding"].to(device) + spk_embedding = batch["spk_embedding"].to(device) + if args.mode == 'sft': + model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, + 'llm_embedding': spk_embedding, 'flow_embedding': spk_embedding} + else: + model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, + 'prompt_text': text_token, 'prompt_text_len': text_token_len, + 'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len, + 'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len, + 'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len, + 'llm_embedding': utt_embedding, 'flow_embedding': utt_embedding} + model_output = model.inference(**model_input) + tts_key = '{}_{}'.format(utts[0], tts_index[0]) + tts_fn = os.path.join(args.result_dir, '{}.wav'.format(tts_key)) + torchaudio.save(tts_fn, model_output['tts_speech'], sample_rate=22050) + f.write('{} {}\n'.format(tts_key, tts_fn)) + f.flush() + f.close() + logging.info('Result wav.scp saved in {}'.format(fn)) + + +if __name__ == '__main__': + main() diff --git a/cosyvoice/bin/train.py b/cosyvoice/bin/train.py new file mode 100644 index 0000000..a9d0e05 --- /dev/null +++ b/cosyvoice/bin/train.py @@ -0,0 +1,136 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function +import argparse +import datetime +import logging +logging.getLogger('matplotlib').setLevel(logging.WARNING) +from copy import deepcopy +import torch +import torch.distributed as dist +import deepspeed + +from hyperpyyaml import load_hyperpyyaml + +from torch.distributed.elastic.multiprocessing.errors import record + +from cosyvoice.utils.executor import Executor +from cosyvoice.utils.train_utils import ( + init_distributed, + init_dataset_and_dataloader, + init_optimizer_and_scheduler, + init_summarywriter, save_model, + wrap_cuda_model, check_modify_and_save_config) + + +def get_args(): + parser = argparse.ArgumentParser(description='training your network') + parser.add_argument('--train_engine', + default='torch_ddp', + choices=['torch_ddp', 'deepspeed'], + help='Engine for paralleled training') + parser.add_argument('--model', required=True, help='model which will be trained') + parser.add_argument('--config', required=True, help='config file') + parser.add_argument('--train_data', required=True, help='train data file') + parser.add_argument('--cv_data', required=True, help='cv data file') + parser.add_argument('--checkpoint', help='checkpoint model') + parser.add_argument('--model_dir', required=True, help='save model dir') + parser.add_argument('--tensorboard_dir', + default='tensorboard', + help='tensorboard log dir') + parser.add_argument('--ddp.dist_backend', + dest='dist_backend', + default='nccl', + choices=['nccl', 'gloo'], + help='distributed backend') + parser.add_argument('--num_workers', + default=0, + type=int, + help='num of subprocess workers for reading') + parser.add_argument('--prefetch', + default=100, + type=int, + help='prefetch number') + parser.add_argument('--pin_memory', + action='store_true', + default=False, + help='Use pinned memory buffers used for reading') + parser.add_argument('--deepspeed.save_states', + dest='save_states', + default='model_only', + choices=['model_only', 'model+optimizer'], + help='save model/optimizer states') + parser.add_argument('--timeout', + default=30, + type=int, + help='timeout (in seconds) of cosyvoice_join.') + parser = deepspeed.add_config_arguments(parser) + args = parser.parse_args() + return args + + +@record +def main(): + args = get_args() + logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s %(levelname)s %(message)s') + + override_dict = {k: None for k in ['llm', 'flow', 'hift'] if k != args.model} + with open(args.config, 'r') as f: + configs = load_hyperpyyaml(f, overrides=override_dict) + configs['train_conf'].update(vars(args)) + + # Init env for ddp + init_distributed(args) + + # Get dataset & dataloader + train_dataset, cv_dataset, train_data_loader, cv_data_loader = \ + init_dataset_and_dataloader(args, configs) + + # Do some sanity checks and save config to arsg.model_dir + configs = check_modify_and_save_config(args, configs) + + # Tensorboard summary + writer = init_summarywriter(args) + + # load checkpoint + model = configs[args.model] + if args.checkpoint is not None: + model.load_state_dict(torch.load(args.checkpoint, map_location='cpu')) + + # Dispatch model from cpu to gpu + model = wrap_cuda_model(args, model) + + # Get optimizer & scheduler + model, optimizer, scheduler = init_optimizer_and_scheduler(args, configs, model) + + # Save init checkpoints + info_dict = deepcopy(configs['train_conf']) + save_model(model, 'init', info_dict) + + # Get executor + executor = Executor() + + # Start training loop + for epoch in range(info_dict['max_epoch']): + executor.epoch = epoch + train_dataset.set_epoch(epoch) + dist.barrier() + group_join = dist.new_group(backend="gloo", timeout=datetime.timedelta(seconds=args.timeout)) + executor.train_one_epoc(model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, group_join) + dist.destroy_process_group(group_join) + +if __name__ == '__main__': + main() diff --git a/cosyvoice/cli/__init__.py b/cosyvoice/cli/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cosyvoice/cli/cosyvoice.py b/cosyvoice/cli/cosyvoice.py new file mode 100644 index 0000000..9ee7d93 --- /dev/null +++ b/cosyvoice/cli/cosyvoice.py @@ -0,0 +1,87 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import torch +import torchaudio +torchaudio.set_audio_backend('soundfile') +from hyperpyyaml import load_hyperpyyaml +from modelscope import snapshot_download +from cosyvoice.cli.frontend import CosyVoiceFrontEnd +from cosyvoice.cli.model import CosyVoiceModel + +class CosyVoice: + + def __init__(self, model_dir): + instruct = True if '-Instruct' in model_dir else False + self.model_dir = model_dir + if not os.path.exists(model_dir): + model_dir = snapshot_download(model_dir) + with open('{}/cosyvoice.yaml'.format(model_dir), 'r') as f: + configs = load_hyperpyyaml(f) + self.frontend = CosyVoiceFrontEnd(configs['get_tokenizer'], + configs['feat_extractor'], + '{}/campplus.onnx'.format(model_dir), + '{}/speech_tokenizer_v1.onnx'.format(model_dir), + '{}/spk2info.pt'.format(model_dir), + instruct, + configs['allowed_special']) + self.model = CosyVoiceModel(configs['llm'], configs['flow'], configs['hift']) + self.model.load('{}/llm.pt'.format(model_dir), + '{}/flow.pt'.format(model_dir), + '{}/hift.pt'.format(model_dir)) + del configs + + def list_avaliable_spks(self): + spks = list(self.frontend.spk2info.keys()) + return spks + + def inference_sft(self, tts_text, spk_id): + tts_speeches = [] + for i in self.frontend.text_normalize(tts_text, split=True): + model_input = self.frontend.frontend_sft(i, spk_id) + print(i) + model_output = self.model.inference(**model_input) + print(model_output) + tts_speeches.append(model_output['tts_speech']) + return {'tts_speech': torch.concat(tts_speeches, dim=1)} + + def inference_zero_shot(self, tts_text, prompt_text, prompt_speech_16k): + prompt_text = self.frontend.text_normalize(prompt_text, split=False) + tts_speeches = [] + for i in self.frontend.text_normalize(tts_text, split=True): + model_input = self.frontend.frontend_zero_shot(i, prompt_text, prompt_speech_16k) + model_output = self.model.inference(**model_input) + tts_speeches.append(model_output['tts_speech']) + return {'tts_speech': torch.concat(tts_speeches, dim=1)} + + def inference_cross_lingual(self, tts_text, prompt_speech_16k): + if self.frontend.instruct is True: + raise ValueError('{} do not support cross_lingual inference'.format(self.model_dir)) + tts_speeches = [] + for i in self.frontend.text_normalize(tts_text, split=True): + model_input = self.frontend.frontend_cross_lingual(i, prompt_speech_16k) + model_output = self.model.inference(**model_input) + tts_speeches.append(model_output['tts_speech']) + return {'tts_speech': torch.concat(tts_speeches, dim=1)} + + def inference_instruct(self, tts_text, spk_id, instruct_text): + if self.frontend.instruct is False: + raise ValueError('{} do not support instruct inference'.format(self.model_dir)) + instruct_text = self.frontend.text_normalize(instruct_text, split=False) + tts_speeches = [] + for i in self.frontend.text_normalize(tts_text, split=True): + model_input = self.frontend.frontend_instruct(i, spk_id, instruct_text) + model_output = self.model.inference(**model_input) + tts_speeches.append(model_output['tts_speech']) + return {'tts_speech': torch.concat(tts_speeches, dim=1)} diff --git a/cosyvoice/cli/frontend.py b/cosyvoice/cli/frontend.py new file mode 100644 index 0000000..9b833bc --- /dev/null +++ b/cosyvoice/cli/frontend.py @@ -0,0 +1,146 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from functools import partial +import onnxruntime +import torch +import numpy as np +import whisper +from typing import Callable +import torchaudio.compliance.kaldi as kaldi +import torchaudio +import os +import inflect +# import ttsfrd +from cosyvoice.utils.frontend_utils import contains_chinese, replace_blank, replace_corner_mark, remove_bracket, spell_out_number, split_paragraph + + +class CosyVoiceFrontEnd: + + def __init__(self, + get_tokenizer: Callable, + feat_extractor: Callable, + campplus_model: str, + speech_tokenizer_model: str, + spk2info: str = '', + instruct: bool = False, + allowed_special: str = 'all'): + self.tokenizer = get_tokenizer() + self.feat_extractor = feat_extractor + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + option = onnxruntime.SessionOptions() + option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + option.intra_op_num_threads = 1 + self.campplus_session = onnxruntime.InferenceSession(campplus_model, sess_options=option, providers=["CPUExecutionProvider"]) + self.speech_tokenizer_session = onnxruntime.InferenceSession(speech_tokenizer_model, sess_options=option, providers=["CUDAExecutionProvider"if torch.cuda.is_available() else "CPUExecutionProvider"]) + if os.path.exists(spk2info): + self.spk2info = torch.load(spk2info, map_location=self.device) + self.instruct = instruct + self.allowed_special = allowed_special + self.inflect_parser = inflect.engine() + self.frd = None + ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) + # assert self.frd.initialize('{}/../../pretrained_models/speech_kantts_ttsfrd/resource'.format(ROOT_DIR)) is True, 'failed to initialize ttsfrd resource' + # self.frd.set_lang_type('pinyin') + # self.frd.enable_pinyin_mix(True) + # self.frd.set_breakmodel_index(1) + + def _extract_text_token(self, text): + text_token = self.tokenizer.encode(text, allowed_special=self.allowed_special) + text_token = torch.tensor([text_token], dtype=torch.int32).to(self.device) + text_token_len = torch.tensor([text_token.shape[1]], dtype=torch.int32).to(self.device) + return text_token, text_token_len + + def _extract_speech_token(self, speech): + feat = whisper.log_mel_spectrogram(speech, n_mels=128) + speech_token = self.speech_tokenizer_session.run(None, {self.speech_tokenizer_session.get_inputs()[0].name: feat.detach().cpu().numpy(), + self.speech_tokenizer_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist() + speech_token = torch.tensor([speech_token], dtype=torch.int32).to(self.device) + speech_token_len = torch.tensor([speech_token.shape[1]], dtype=torch.int32).to(self.device) + return speech_token, speech_token_len + + def _extract_spk_embedding(self, speech): + feat = kaldi.fbank(speech, + num_mel_bins=80, + dither=0, + sample_frequency=16000) + feat = feat - feat.mean(dim=0, keepdim=True) + embedding = self.campplus_session.run(None, {self.campplus_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist() + embedding = torch.tensor([embedding]).to(self.device) + return embedding + + def _extract_speech_feat(self, speech): + speech_feat = self.feat_extractor(speech).squeeze(dim=0).transpose(0, 1).to(self.device) + speech_feat = speech_feat.unsqueeze(dim=0) + speech_feat_len = torch.tensor([speech_feat.shape[1]], dtype=torch.int32).to(self.device) + return speech_feat, speech_feat_len + + def text_normalize(self, text, split=True): + text = text.strip() + if contains_chinese(text): + # text = self.frd.get_frd_extra_info(text, 'input').replace("\n", "") + text = replace_blank(text) + text = replace_corner_mark(text) + text = text.replace(".", "、") + text = text.replace(" - ", ",") + text = remove_bracket(text) + texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "zh", token_max_n=80, + token_min_n=60, merge_len=20, + comma_split=False)] + else: + text = spell_out_number(text, self.inflect_parser) + texts = [i for i in split_paragraph(text, partial(self.tokenizer.encode, allowed_special=self.allowed_special), "en", token_max_n=80, + token_min_n=60, merge_len=20, + comma_split=False)] + if split is False: + return text + return texts + + def frontend_sft(self, tts_text, spk_id): + tts_text_token, tts_text_token_len = self._extract_text_token(tts_text) + embedding = self.spk2info[spk_id]['embedding'] + model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, 'llm_embedding': embedding, 'flow_embedding': embedding} + return model_input + + def frontend_zero_shot(self, tts_text, prompt_text, prompt_speech_16k): + tts_text_token, tts_text_token_len = self._extract_text_token(tts_text) + prompt_text_token, prompt_text_token_len = self._extract_text_token(prompt_text) + prompt_speech_22050 = torchaudio.transforms.Resample(orig_freq=16000, new_freq=22050)(prompt_speech_16k) + speech_feat, speech_feat_len = self._extract_speech_feat(prompt_speech_22050) + speech_token, speech_token_len = self._extract_speech_token(prompt_speech_16k) + embedding = self._extract_spk_embedding(prompt_speech_16k) + model_input = {'text': tts_text_token, 'text_len': tts_text_token_len, + 'prompt_text': prompt_text_token, 'prompt_text_len': prompt_text_token_len, + 'llm_prompt_speech_token': speech_token, 'llm_prompt_speech_token_len': speech_token_len, + 'flow_prompt_speech_token': speech_token, 'flow_prompt_speech_token_len': speech_token_len, + 'prompt_speech_feat': speech_feat, 'prompt_speech_feat_len': speech_feat_len, + 'llm_embedding': embedding, 'flow_embedding': embedding} + return model_input + + def frontend_cross_lingual(self, tts_text, prompt_speech_16k): + model_input = self.frontend_zero_shot(tts_text, '', prompt_speech_16k) + # in cross lingual mode, we remove prompt in llm + del model_input['prompt_text'] + del model_input['prompt_text_len'] + del model_input['llm_prompt_speech_token'] + del model_input['llm_prompt_speech_token_len'] + return model_input + + def frontend_instruct(self, tts_text, spk_id, instruct_text): + model_input = self.frontend_sft(tts_text, spk_id) + # in instruct mode, we remove spk_embedding in llm due to information leakage + del model_input['llm_embedding'] + instruct_text_token, instruct_text_token_len = self._extract_text_token(instruct_text + '') + model_input['prompt_text'] = instruct_text_token + model_input['prompt_text_len'] = instruct_text_token_len + return model_input diff --git a/cosyvoice/cli/model.py b/cosyvoice/cli/model.py new file mode 100644 index 0000000..98f19b2 --- /dev/null +++ b/cosyvoice/cli/model.py @@ -0,0 +1,59 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +class CosyVoiceModel: + + def __init__(self, + llm: torch.nn.Module, + flow: torch.nn.Module, + hift: torch.nn.Module): + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + self.llm = llm + self.flow = flow + self.hift = hift + + def load(self, llm_model, flow_model, hift_model): + self.llm.load_state_dict(torch.load(llm_model, map_location=self.device)) + self.llm.to(self.device).eval() + self.flow.load_state_dict(torch.load(flow_model, map_location=self.device)) + self.flow.to(self.device).eval() + self.hift.load_state_dict(torch.load(hift_model, map_location=self.device)) + self.hift.to(self.device).eval() + + def inference(self, text, text_len, flow_embedding, llm_embedding=torch.zeros(0, 192), + prompt_text=torch.zeros(1, 0, dtype=torch.int32), prompt_text_len=torch.zeros(1, dtype=torch.int32), + llm_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), llm_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), + flow_prompt_speech_token=torch.zeros(1, 0, dtype=torch.int32), flow_prompt_speech_token_len=torch.zeros(1, dtype=torch.int32), + prompt_speech_feat=torch.zeros(1, 0, 80), prompt_speech_feat_len=torch.zeros(1, dtype=torch.int32)): + tts_speech_token = self.llm.inference(text=text.to(self.device), + text_len=text_len.to(self.device), + prompt_text=prompt_text.to(self.device), + prompt_text_len=prompt_text_len.to(self.device), + prompt_speech_token=llm_prompt_speech_token.to(self.device), + prompt_speech_token_len=llm_prompt_speech_token_len.to(self.device), + embedding=llm_embedding.to(self.device), + beam_size=1, + sampling=25, + max_token_text_ratio=30, + min_token_text_ratio=3) + tts_mel = self.flow.inference(token=tts_speech_token, + token_len=torch.tensor([tts_speech_token.size(1)], dtype=torch.int32).to(self.device), + prompt_token=flow_prompt_speech_token.to(self.device), + prompt_token_len=flow_prompt_speech_token_len.to(self.device), + prompt_feat=prompt_speech_feat.to(self.device), + prompt_feat_len=prompt_speech_feat_len.to(self.device), + embedding=flow_embedding.to(self.device)) + tts_speech = self.hift.inference(mel=tts_mel).cpu() + return {'tts_speech': tts_speech} diff --git a/cosyvoice/dataset/__init__.py b/cosyvoice/dataset/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cosyvoice/dataset/dataset.py b/cosyvoice/dataset/dataset.py new file mode 100644 index 0000000..431fae1 --- /dev/null +++ b/cosyvoice/dataset/dataset.py @@ -0,0 +1,160 @@ +# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import random +import json +import math +from functools import partial + +import torch +import torch.distributed as dist +from torch.utils.data import IterableDataset +from cosyvoice.utils.file_utils import read_lists, read_json_lists + + +class Processor(IterableDataset): + + def __init__(self, source, f, *args, **kw): + assert callable(f) + self.source = source + self.f = f + self.args = args + self.kw = kw + + def set_epoch(self, epoch): + self.source.set_epoch(epoch) + + def __iter__(self): + """ Return an iterator over the source dataset processed by the + given processor. + """ + assert self.source is not None + assert callable(self.f) + return self.f(iter(self.source), *self.args, **self.kw) + + def apply(self, f): + assert callable(f) + return Processor(self, f, *self.args, **self.kw) + + +class DistributedSampler: + + def __init__(self, shuffle=True, partition=True): + self.epoch = -1 + self.update() + self.shuffle = shuffle + self.partition = partition + + def update(self): + assert dist.is_available() + if dist.is_initialized(): + self.rank = dist.get_rank() + self.world_size = dist.get_world_size() + else: + self.rank = 0 + self.world_size = 1 + worker_info = torch.utils.data.get_worker_info() + if worker_info is None: + self.worker_id = 0 + self.num_workers = 1 + else: + self.worker_id = worker_info.id + self.num_workers = worker_info.num_workers + return dict(rank=self.rank, + world_size=self.world_size, + worker_id=self.worker_id, + num_workers=self.num_workers) + + def set_epoch(self, epoch): + self.epoch = epoch + + def sample(self, data): + """ Sample data according to rank/world_size/num_workers + + Args: + data(List): input data list + + Returns: + List: data list after sample + """ + data = list(range(len(data))) + # force datalist even + if self.partition: + if self.shuffle: + random.Random(self.epoch).shuffle(data) + if len(data) < self.world_size: + data = data * math.ceil(self.world_size / len(data)) + data = data[:self.world_size] + data = data[self.rank::self.world_size] + if len(data) < self.num_workers: + data = data * math.ceil(self.num_workers / len(data)) + data = data[:self.num_workers] + data = data[self.worker_id::self.num_workers] + return data + + +class DataList(IterableDataset): + + def __init__(self, lists, shuffle=True, partition=True): + self.lists = lists + self.sampler = DistributedSampler(shuffle, partition) + + def set_epoch(self, epoch): + self.sampler.set_epoch(epoch) + + def __iter__(self): + sampler_info = self.sampler.update() + indexes = self.sampler.sample(self.lists) + for index in indexes: + data = dict(src=self.lists[index]) + data.update(sampler_info) + yield data + + +def Dataset(data_list_file, + data_pipeline, + mode='train', + shuffle=True, + partition=True, + tts_file='', + prompt_utt2data=''): + """ Construct dataset from arguments + + We have two shuffle stage in the Dataset. The first is global + shuffle at shards tar/raw file level. The second is global shuffle + at training samples level. + + Args: + data_type(str): raw/shard + tokenizer (BaseTokenizer): tokenizer to tokenize + partition(bool): whether to do data partition in terms of rank + """ + assert mode in ['train', 'inference'] + lists = read_lists(data_list_file) + if mode == 'inference': + with open(tts_file) as f: + tts_data = json.load(f) + utt2lists = read_json_lists(prompt_utt2data) + # filter unnecessary file in inference mode + lists = list(set([utt2lists[utt] for utt in tts_data.keys() if utt2lists[utt] in lists])) + dataset = DataList(lists, + shuffle=shuffle, + partition=partition) + if mode == 'inference': + # map partial arg tts_data in inference mode + data_pipeline[0] = partial(data_pipeline[0], tts_data=tts_data) + for func in data_pipeline: + dataset = Processor(dataset, func, mode=mode) + return dataset diff --git a/cosyvoice/dataset/processor.py b/cosyvoice/dataset/processor.py new file mode 100644 index 0000000..d177cdc --- /dev/null +++ b/cosyvoice/dataset/processor.py @@ -0,0 +1,366 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +import random + +import pyarrow.parquet as pq +from io import BytesIO +import torch +import torchaudio +from torch.nn.utils.rnn import pad_sequence +import torch.nn.functional as F + +torchaudio.set_audio_backend('soundfile') +# torchaudio.utils.sox_utils.set_buffer_size(16500) + +AUDIO_FORMAT_SETS = set(['flac', 'mp3', 'm4a', 'ogg', 'opus', 'wav', 'wma']) + + +def parquet_opener(data, mode='train', tts_data={}): + """ Give url or local file, return file descriptor + Inplace operation. + + Args: + data(Iterable[str]): url or local file list + + Returns: + Iterable[{src, stream}] + """ + for sample in data: + assert 'src' in sample + url = sample['src'] + try: + df = pq.read_table(url).to_pandas() + for i in range(len(df)): + if mode == 'inference' and df.loc[i, 'utt'] not in tts_data: + continue + sample.update(dict(df.loc[i])) + if mode == 'train': + # NOTE do not return sample directly, must initialize a new dict + yield {**sample} + else: + for index, text in enumerate(tts_data[df.loc[i, 'utt']]): + yield {**sample, 'tts_index': index, 'tts_text': text} + except Exception as ex: + logging.warning('Failed to open {}, ex info {}'.format(url, ex)) + +def filter(data, + max_length=10240, + min_length=10, + token_max_length=200, + token_min_length=1, + min_output_input_ratio=0.0005, + max_output_input_ratio=1, + mode='train'): + """ Filter sample according to feature and label length + Inplace operation. + + Args:: + data: Iterable[{key, wav, label, sample_rate}] + max_length: drop utterance which is greater than max_length(10ms) + min_length: drop utterance which is less than min_length(10ms) + token_max_length: drop utterance which is greater than + token_max_length, especially when use char unit for + english modeling + token_min_length: drop utterance which is + less than token_max_length + min_output_input_ratio: minimal ration of + token_length / feats_length(10ms) + max_output_input_ratio: maximum ration of + token_length / feats_length(10ms) + + Returns: + Iterable[{key, wav, label, sample_rate}] + """ + for sample in data: + sample['speech'], sample['sample_rate'] = torchaudio.load(BytesIO(sample['audio_data'])) + del sample['audio_data'] + # sample['wav'] is torch.Tensor, we have 100 frames every second + num_frames = sample['speech'].size(1) / sample['sample_rate'] * 100 + if num_frames < min_length: + continue + if num_frames > max_length: + continue + if len(sample['text_token']) < token_min_length: + continue + if len(sample['text_token']) > token_max_length: + continue + if len(sample['speech_token']) == 0: + continue + if num_frames != 0: + if len(sample['text_token']) / num_frames < min_output_input_ratio: + continue + if len(sample['text_token']) / num_frames > max_output_input_ratio: + continue + yield sample + + +def resample(data, resample_rate=22050, mode='train'): + """ Resample data. + Inplace operation. + + Args: + data: Iterable[{key, wav, label, sample_rate}] + resample_rate: target resample rate + + Returns: + Iterable[{key, wav, label, sample_rate}] + """ + for sample in data: + assert 'sample_rate' in sample + assert 'speech' in sample + sample_rate = sample['sample_rate'] + waveform = sample['speech'] + if sample_rate != resample_rate: + if sample_rate < resample_rate: + continue + sample['sample_rate'] = resample_rate + sample['speech'] = torchaudio.transforms.Resample( + orig_freq=sample_rate, new_freq=resample_rate)(waveform) + max_val = sample['speech'].abs().max() + if max_val > 1: + sample['speech'] /= max_val + yield sample + + +def compute_fbank(data, + feat_extractor, + mode='train'): + """ Extract fbank + + Args: + data: Iterable[{key, wav, label, sample_rate}] + + Returns: + Iterable[{key, feat, label}] + """ + for sample in data: + assert 'sample_rate' in sample + assert 'speech' in sample + assert 'utt' in sample + assert 'text_token' in sample + waveform = sample['speech'] + mat = feat_extractor(waveform).squeeze(dim=0).transpose(0, 1) + sample['speech_feat'] = mat + del sample['speech'] + yield sample + + +def parse_embedding(data, normalize, mode='train'): + """ Parse utt_embedding/spk_embedding + + Args: + data: Iterable[{key, wav, label, sample_rate}] + + Returns: + Iterable[{key, feat, label}] + """ + for sample in data: + sample['utt_embedding'] = torch.tensor(sample['utt_embedding'], dtype=torch.float32) + sample['spk_embedding'] = torch.stack([torch.tensor(i, dtype=torch.float32) for i in sample['spk_embedding']], dim=0).mean(dim=0) + if normalize: + sample['utt_embedding'] = F.normalize(sample['utt_embedding'], dim=0) + sample['spk_embedding'] = F.normalize(sample['spk_embedding'], dim=0) + yield sample + + +def tokenize(data, get_tokenizer, allowed_special, mode='train'): + """ Decode text to chars or BPE + Inplace operation + + Args: + data: Iterable[{key, wav, txt, sample_rate}] + + Returns: + Iterable[{key, wav, txt, tokens, label, sample_rate}] + """ + tokenizer = get_tokenizer() + for sample in data: + assert 'text' in sample + sample['text_token'] = tokenizer.encode(sample['text'], allowed_special=allowed_special) + if mode == 'inference': + sample['tts_text_token'] = tokenizer.encode(sample['tts_text'], allowed_special=allowed_special) + yield sample + + +def shuffle(data, shuffle_size=10000, mode='train'): + """ Local shuffle the data + + Args: + data: Iterable[{key, feat, label}] + shuffle_size: buffer size for shuffle + + Returns: + Iterable[{key, feat, label}] + """ + buf = [] + for sample in data: + buf.append(sample) + if len(buf) >= shuffle_size: + random.shuffle(buf) + for x in buf: + yield x + buf = [] + # The sample left over + random.shuffle(buf) + for x in buf: + yield x + + +def sort(data, sort_size=500, mode='train'): + """ Sort the data by feature length. + Sort is used after shuffle and before batch, so we can group + utts with similar lengths into a batch, and `sort_size` should + be less than `shuffle_size` + + Args: + data: Iterable[{key, feat, label}] + sort_size: buffer size for sort + + Returns: + Iterable[{key, feat, label}] + """ + + buf = [] + for sample in data: + buf.append(sample) + if len(buf) >= sort_size: + buf.sort(key=lambda x: x['speech_feat'].size(0)) + for x in buf: + yield x + buf = [] + # The sample left over + buf.sort(key=lambda x: x['speech_feat'].size(0)) + for x in buf: + yield x + + +def static_batch(data, batch_size=16): + """ Static batch the data by `batch_size` + + Args: + data: Iterable[{key, feat, label}] + batch_size: batch size + + Returns: + Iterable[List[{key, feat, label}]] + """ + buf = [] + for sample in data: + buf.append(sample) + if len(buf) >= batch_size: + yield buf + buf = [] + if len(buf) > 0: + yield buf + + +def dynamic_batch(data, max_frames_in_batch=12000, mode='train'): + """ Dynamic batch the data until the total frames in batch + reach `max_frames_in_batch` + + Args: + data: Iterable[{key, feat, label}] + max_frames_in_batch: max_frames in one batch + + Returns: + Iterable[List[{key, feat, label}]] + """ + buf = [] + longest_frames = 0 + for sample in data: + assert 'speech_feat' in sample + assert isinstance(sample['speech_feat'], torch.Tensor) + new_sample_frames = sample['speech_feat'].size(0) + longest_frames = max(longest_frames, new_sample_frames) + frames_after_padding = longest_frames * (len(buf) + 1) + if frames_after_padding > max_frames_in_batch: + yield buf + buf = [sample] + longest_frames = new_sample_frames + else: + buf.append(sample) + if len(buf) > 0: + yield buf + + +def batch(data, batch_type='static', batch_size=16, max_frames_in_batch=12000, mode='train'): + """ Wrapper for static/dynamic batch + """ + if mode == 'inference': + return static_batch(data, 1) + else: + if batch_type == 'static': + return static_batch(data, batch_size) + elif batch_type == 'dynamic': + return dynamic_batch(data, max_frames_in_batch) + else: + logging.fatal('Unsupported batch type {}'.format(batch_type)) + + +def padding(data, mode='train'): + """ Padding the data into training data + + Args: + data: Iterable[List[{key, feat, label}]] + + Returns: + Iterable[Tuple(keys, feats, labels, feats lengths, label lengths)] + """ + for sample in data: + assert isinstance(sample, list) + speech_feat_len = torch.tensor([x['speech_feat'].size(1) for x in sample], + dtype=torch.int32) + order = torch.argsort(speech_feat_len, descending=True) + + utts = [sample[i]['utt'] for i in order] + speech_token = [torch.tensor(sample[i]['speech_token']) for i in order] + speech_token_len = torch.tensor([i.size(0) for i in speech_token], dtype=torch.int32) + speech_token = pad_sequence(speech_token, + batch_first=True, + padding_value=0) + speech_feat = [sample[i]['speech_feat'] for i in order] + speech_feat_len = torch.tensor([i.size(0) for i in speech_feat], dtype=torch.int32) + speech_feat = pad_sequence(speech_feat, + batch_first=True, + padding_value=0) + text = [sample[i]['text'] for i in order] + text_token = [torch.tensor(sample[i]['text_token']) for i in order] + text_token_len = torch.tensor([i.size(0) for i in text_token], dtype=torch.int32) + text_token = pad_sequence(text_token, batch_first=True, padding_value=0) + utt_embedding = torch.stack([sample[i]['utt_embedding'] for i in order], dim=0) + spk_embedding = torch.stack([sample[i]['spk_embedding'] for i in order], dim=0) + batch = { + "utts": utts, + "speech_token": speech_token, + "speech_token_len": speech_token_len, + "speech_feat": speech_feat, + "speech_feat_len": speech_feat_len, + "text": text, + "text_token": text_token, + "text_token_len": text_token_len, + "utt_embedding": utt_embedding, + "spk_embedding": spk_embedding, + } + if mode == 'inference': + tts_text = [sample[i]['tts_text'] for i in order] + tts_index = [sample[i]['tts_index'] for i in order] + tts_text_token = [torch.tensor(sample[i]['tts_text_token']) for i in order] + tts_text_token_len = torch.tensor([i.size(0) for i in tts_text_token], dtype=torch.int32) + tts_text_token = pad_sequence(tts_text_token, batch_first=True, padding_value=-1) + batch.update({'tts_text': tts_text, + 'tts_index': tts_index, + 'tts_text_token': tts_text_token, + 'tts_text_token_len': tts_text_token_len}) + yield batch diff --git a/cosyvoice/flow/decoder.py b/cosyvoice/flow/decoder.py new file mode 100644 index 0000000..4349279 --- /dev/null +++ b/cosyvoice/flow/decoder.py @@ -0,0 +1,222 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch.nn as nn +from einops import pack, rearrange, repeat +from matcha.models.components.decoder import SinusoidalPosEmb, Block1D, ResnetBlock1D, Downsample1D, TimestepEmbedding, Upsample1D +from matcha.models.components.transformer import BasicTransformerBlock + + +class ConditionalDecoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + channels=(256, 256), + dropout=0.05, + attention_head_dim=64, + n_blocks=1, + num_mid_blocks=2, + num_heads=4, + act_fn="snake", + ): + """ + This decoder requires an input with the same shape of the target. So, if your text content + is shorter or longer than the outputs, please re-sampling it before feeding to the decoder. + """ + super().__init__() + channels = tuple(channels) + self.in_channels = in_channels + self.out_channels = out_channels + + self.time_embeddings = SinusoidalPosEmb(in_channels) + time_embed_dim = channels[0] * 4 + self.time_mlp = TimestepEmbedding( + in_channels=in_channels, + time_embed_dim=time_embed_dim, + act_fn="silu", + ) + self.down_blocks = nn.ModuleList([]) + self.mid_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + output_channel = in_channels + for i in range(len(channels)): # pylint: disable=consider-using-enumerate + input_channel = output_channel + output_channel = channels[i] + is_last = i == len(channels) - 1 + resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) + transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + dim=output_channel, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + for _ in range(n_blocks) + ] + ) + downsample = ( + Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1) + ) + self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample])) + + for i in range(num_mid_blocks): + input_channel = channels[-1] + out_channels = channels[-1] + resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) + + transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + dim=output_channel, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + for _ in range(n_blocks) + ] + ) + + self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks])) + + channels = channels[::-1] + (channels[0],) + for i in range(len(channels) - 1): + input_channel = channels[i] * 2 + output_channel = channels[i + 1] + is_last = i == len(channels) - 2 + resnet = ResnetBlock1D( + dim=input_channel, + dim_out=output_channel, + time_emb_dim=time_embed_dim, + ) + transformer_blocks = nn.ModuleList( + [ + BasicTransformerBlock( + dim=output_channel, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + for _ in range(n_blocks) + ] + ) + upsample = ( + Upsample1D(output_channel, use_conv_transpose=True) + if not is_last + else nn.Conv1d(output_channel, output_channel, 3, padding=1) + ) + self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample])) + self.final_block = Block1D(channels[-1], channels[-1]) + self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1) + self.initialize_weights() + + + def initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x, mask, mu, t, spks=None, cond=None): + """Forward pass of the UNet1DConditional model. + + Args: + x (torch.Tensor): shape (batch_size, in_channels, time) + mask (_type_): shape (batch_size, 1, time) + t (_type_): shape (batch_size) + spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None. + cond (_type_, optional): placeholder for future use. Defaults to None. + + Raises: + ValueError: _description_ + ValueError: _description_ + + Returns: + _type_: _description_ + """ + + t = self.time_embeddings(t) + t = self.time_mlp(t) + + x = pack([x, mu], "b * t")[0] + + if spks is not None: + spks = repeat(spks, "b c -> b c t", t=x.shape[-1]) + x = pack([x, spks], "b * t")[0] + if cond is not None: + x = pack([x, cond], "b * t")[0] + + hiddens = [] + masks = [mask] + for resnet, transformer_blocks, downsample in self.down_blocks: + mask_down = masks[-1] + x = resnet(x, mask_down, t) + x = rearrange(x, "b c t -> b t c").contiguous() + attn_mask = torch.matmul(mask_down.transpose(1, 2).contiguous(), mask_down) + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=attn_mask, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t").contiguous() + hiddens.append(x) # Save hidden states for skip connections + x = downsample(x * mask_down) + masks.append(mask_down[:, :, ::2]) + masks = masks[:-1] + mask_mid = masks[-1] + + for resnet, transformer_blocks in self.mid_blocks: + x = resnet(x, mask_mid, t) + x = rearrange(x, "b c t -> b t c").contiguous() + attn_mask = torch.matmul(mask_mid.transpose(1, 2).contiguous(), mask_mid) + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=attn_mask, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t").contiguous() + + for resnet, transformer_blocks, upsample in self.up_blocks: + mask_up = masks.pop() + skip = hiddens.pop() + x = pack([x[:, :, :skip.shape[-1]], skip], "b * t")[0] + x = resnet(x, mask_up, t) + x = rearrange(x, "b c t -> b t c").contiguous() + attn_mask = torch.matmul(mask_up.transpose(1, 2).contiguous(), mask_up) + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=attn_mask, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t").contiguous() + x = upsample(x * mask_up) + x = self.final_block(x, mask_up) + output = self.final_proj(x * mask_up) + return output * mask diff --git a/cosyvoice/flow/flow.py b/cosyvoice/flow/flow.py new file mode 100644 index 0000000..d0dbcd0 --- /dev/null +++ b/cosyvoice/flow/flow.py @@ -0,0 +1,135 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging +from typing import Dict, Optional +import torch +import torch.nn as nn +from torch.nn import functional as F +from omegaconf import DictConfig +from cosyvoice.utils.mask import make_pad_mask + + +class MaskedDiffWithXvec(torch.nn.Module): + def __init__(self, + input_size: int = 512, + output_size: int = 80, + spk_embed_dim: int = 192, + output_type: str = "mel", + vocab_size: int = 4096, + input_frame_rate: int = 50, + only_mask_loss: bool = True, + encoder: torch.nn.Module = None, + length_regulator: torch.nn.Module = None, + decoder: torch.nn.Module = None, + decoder_conf: Dict = {'in_channels': 240, 'out_channel': 80, 'spk_emb_dim': 80, 'n_spks': 1, 'cfm_params': DictConfig({'sigma_min': 1e-06, 'solver': 'euler', 't_scheduler': 'cosine', 'training_cfg_rate': 0.2, 'inference_cfg_rate': 0.7, 'reg_loss_type': 'l1'}), 'decoder_params': {'channels': [256, 256], 'dropout': 0.0, 'attention_head_dim': 64, 'n_blocks': 4, 'num_mid_blocks': 12, 'num_heads': 8, 'act_fn': 'gelu'}}, + mel_feat_conf: Dict = {'n_fft': 1024, 'num_mels': 80, 'sampling_rate': 22050, 'hop_size': 256, 'win_size': 1024, 'fmin': 0, 'fmax': 8000}): + super().__init__() + self.input_size = input_size + self.output_size = output_size + self.decoder_conf = decoder_conf + self.mel_feat_conf = mel_feat_conf + self.vocab_size = vocab_size + self.output_type = output_type + self.input_frame_rate = input_frame_rate + logging.info(f"input frame rate={self.input_frame_rate}") + self.input_embedding = nn.Embedding(vocab_size, input_size) + self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, output_size) + self.encoder = encoder + self.encoder_proj = torch.nn.Linear(self.encoder.output_size(), output_size) + self.decoder = decoder + self.length_regulator = length_regulator + self.only_mask_loss = only_mask_loss + + def forward( + self, + batch: dict, + device: torch.device, + ) -> Dict[str, Optional[torch.Tensor]]: + token = batch['speech_token'].to(device) + token_len = batch['speech_token_len'].to(device) + feat = batch['speech_feat'].to(device) + feat_len = batch['speech_feat_len'].to(device) + embedding = batch['utt_embedding'].to(device) + + # xvec projection + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + + # concat text and prompt_text + mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(device) + token = self.input_embedding(torch.clamp(token, min=0)) * mask + + # text encode + h, h_lengths = self.encoder(token, token_len) + h = self.encoder_proj(h) + h, h_lengths = self.length_regulator(h, feat_len) + + # get conditions + conds = torch.zeros(feat.shape, device=token.device) + conds = conds.transpose(1, 2) + + mask = (~make_pad_mask(feat_len)).to(h) + feat = F.interpolate(feat.unsqueeze(dim=1), size=h.shape[1:], mode="nearest").squeeze(dim=1) + loss, _ = self.decoder.compute_loss( + feat.transpose(1, 2).contiguous(), + mask.unsqueeze(1), + h.transpose(1, 2).contiguous(), + embedding, + cond=conds + ) + return {'loss': loss} + + @torch.inference_mode() + def inference(self, + token, + token_len, + prompt_token, + prompt_token_len, + prompt_feat, + prompt_feat_len, + embedding): + assert token.shape[0] == 1 + # xvec projection + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + + # concat text and prompt_text + token, token_len = torch.concat([prompt_token, token], dim=1), prompt_token_len + token_len + mask = (~make_pad_mask(token_len)).float().unsqueeze(-1).to(embedding) + token = self.input_embedding(torch.clamp(token, min=0)) * mask + + # text encode + h, h_lengths = self.encoder(token, token_len) + h = self.encoder_proj(h) + feat_len = (token_len / 50 * 22050 / 256).int() + h, h_lengths = self.length_regulator(h, feat_len) + + # get conditions + conds = torch.zeros([1, feat_len.max().item(), self.output_size], device=token.device) + if prompt_feat.shape[1] != 0: + for i, j in enumerate(prompt_feat_len): + conds[i, :j] = prompt_feat[i] + conds = conds.transpose(1, 2) + + mask = (~make_pad_mask(feat_len)).to(h) + feat = self.decoder( + mu=h.transpose(1, 2).contiguous(), + mask=mask.unsqueeze(1), + spks=embedding, + cond=conds, + n_timesteps=10 + ) + if prompt_feat.shape[1] != 0: + feat = feat[:, :, prompt_feat.shape[1]:] + return feat diff --git a/cosyvoice/flow/flow_matching.py b/cosyvoice/flow/flow_matching.py new file mode 100644 index 0000000..6a8985f --- /dev/null +++ b/cosyvoice/flow/flow_matching.py @@ -0,0 +1,131 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch.nn.functional as F +from matcha.models.components.flow_matching import BASECFM + +class ConditionalCFM(BASECFM): + def __init__(self, in_channels, cfm_params, n_spks=1, spk_emb_dim=64, estimator: torch.nn.Module = None): + super().__init__( + n_feats=in_channels, + cfm_params=cfm_params, + n_spks=n_spks, + spk_emb_dim=spk_emb_dim, + ) + self.t_scheduler = cfm_params.t_scheduler + self.training_cfg_rate = cfm_params.training_cfg_rate + self.inference_cfg_rate = cfm_params.inference_cfg_rate + in_channels = in_channels + (spk_emb_dim if n_spks > 0 else 0) + # Just change the architecture of the estimator here + self.estimator = estimator + + @torch.inference_mode() + def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None): + """Forward diffusion + + Args: + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + n_timesteps (int): number of diffusion steps + temperature (float, optional): temperature for scaling noise. Defaults to 1.0. + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + + Returns: + sample: generated mel-spectrogram + shape: (batch_size, n_feats, mel_timesteps) + """ + z = torch.randn_like(mu) * temperature + t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device) + if self.t_scheduler == 'cosine': + t_span = 1 - torch.cos(t_span * 0.5 * torch.pi) + return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond) + + def solve_euler(self, x, t_span, mu, mask, spks, cond): + """ + Fixed euler solver for ODEs. + Args: + x (torch.Tensor): random noise + t_span (torch.Tensor): n_timesteps interpolated + shape: (n_timesteps + 1,) + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + """ + t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0] + + # I am storing this because I can later plot it by putting a debugger here and saving it to a file + # Or in future might add like a return_all_steps flag + sol = [] + + for step in range(1, len(t_span)): + dphi_dt = self.estimator(x, mask, mu, t, spks, cond) + # Classifier-Free Guidance inference introduced in VoiceBox + if self.inference_cfg_rate > 0: + cfg_dphi_dt = self.estimator( + x, mask, + torch.zeros_like(mu), t, + torch.zeros_like(spks) if spks is not None else None, + torch.zeros_like(cond) + ) + dphi_dt = ((1.0 + self.inference_cfg_rate) * dphi_dt - + self.inference_cfg_rate * cfg_dphi_dt) + x = x + dt * dphi_dt + t = t + dt + sol.append(x) + if step < len(t_span) - 1: + dt = t_span[step + 1] - t + + return sol[-1] + + def compute_loss(self, x1, mask, mu, spks=None, cond=None): + """Computes diffusion loss + + Args: + x1 (torch.Tensor): Target + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): target mask + shape: (batch_size, 1, mel_timesteps) + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + spks (torch.Tensor, optional): speaker embedding. Defaults to None. + shape: (batch_size, spk_emb_dim) + + Returns: + loss: conditional flow matching loss + y: conditional flow + shape: (batch_size, n_feats, mel_timesteps) + """ + b, _, t = mu.shape + + # random timestep + t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype) + if self.t_scheduler == 'cosine': + t = 1 - torch.cos(t * 0.5 * torch.pi) + # sample noise p(x_0) + z = torch.randn_like(x1) + + y = (1 - (1 - self.sigma_min) * t) * z + t * x1 + u = x1 - (1 - self.sigma_min) * z + + pred = self.estimator(y, mask, mu, t.squeeze(), spks, cond) + loss = F.mse_loss(pred * mask, u * mask, reduction="sum") / (torch.sum(mask) * u.shape[1]) + return loss, y diff --git a/cosyvoice/flow/length_regulator.py b/cosyvoice/flow/length_regulator.py new file mode 100644 index 0000000..622f29a --- /dev/null +++ b/cosyvoice/flow/length_regulator.py @@ -0,0 +1,49 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Tuple +import torch.nn as nn +from torch.nn import functional as F +from cosyvoice.utils.mask import make_pad_mask + + +class InterpolateRegulator(nn.Module): + def __init__( + self, + channels: int, + sampling_ratios: Tuple, + out_channels: int = None, + groups: int = 1, + ): + super().__init__() + self.sampling_ratios = sampling_ratios + out_channels = out_channels or channels + model = nn.ModuleList([]) + if len(sampling_ratios) > 0: + for _ in sampling_ratios: + module = nn.Conv1d(channels, channels, 3, 1, 1) + norm = nn.GroupNorm(groups, channels) + act = nn.Mish() + model.extend([module, norm, act]) + model.append( + nn.Conv1d(channels, out_channels, 1, 1) + ) + self.model = nn.Sequential(*model) + + def forward(self, x, ylens=None): + # x in (B, T, D) + mask = (~make_pad_mask(ylens)).to(x).unsqueeze(-1) + x = F.interpolate(x.transpose(1, 2).contiguous(), size=ylens.max(), mode='nearest') + out = self.model(x).transpose(1, 2).contiguous() + olens = ylens + return out * mask, olens diff --git a/cosyvoice/hifigan/f0_predictor.py b/cosyvoice/hifigan/f0_predictor.py new file mode 100644 index 0000000..36b85f4 --- /dev/null +++ b/cosyvoice/hifigan/f0_predictor.py @@ -0,0 +1,55 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch +import torch.nn as nn +from torch.nn.utils import weight_norm + + +class ConvRNNF0Predictor(nn.Module): + def __init__(self, + num_class: int = 1, + in_channels: int = 80, + cond_channels: int = 512 + ): + super().__init__() + + self.num_class = num_class + self.condnet = nn.Sequential( + weight_norm( + nn.Conv1d(in_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + weight_norm( + nn.Conv1d(cond_channels, cond_channels, kernel_size=3, padding=1) + ), + nn.ELU(), + ) + self.classifier = nn.Linear(in_features=cond_channels, out_features=self.num_class) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.condnet(x) + x = x.transpose(1, 2) + return torch.abs(self.classifier(x).squeeze(-1)) diff --git a/cosyvoice/hifigan/generator.py b/cosyvoice/hifigan/generator.py new file mode 100644 index 0000000..aa8c7ee --- /dev/null +++ b/cosyvoice/hifigan/generator.py @@ -0,0 +1,391 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Kai Hu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""HIFI-GAN""" + +import typing as tp +import numpy as np +from scipy.signal import get_window +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import Conv1d +from torch.nn import ConvTranspose1d +from torch.nn.utils import remove_weight_norm +from torch.nn.utils import weight_norm +from torch.distributions.uniform import Uniform + +from cosyvoice.transformer.activation import Snake +from academicodec.utils import get_padding +from academicodec.utils import init_weights + + +"""hifigan based generator implementation. + +This code is modified from https://github.com/jik876/hifi-gan + ,https://github.com/kan-bayashi/ParallelWaveGAN and + https://github.com/NVIDIA/BigVGAN + +""" +class ResBlock(torch.nn.Module): + """Residual block module in HiFiGAN/BigVGAN.""" + def __init__( + self, + channels: int = 512, + kernel_size: int = 3, + dilations: tp.List[int] = [1, 3, 5], + ): + super(ResBlock, self).__init__() + self.convs1 = nn.ModuleList() + self.convs2 = nn.ModuleList() + + for dilation in dilations: + self.convs1.append( + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation, + padding=get_padding(kernel_size, dilation) + ) + ) + ) + self.convs2.append( + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1) + ) + ) + ) + self.convs1.apply(init_weights) + self.convs2.apply(init_weights) + self.activations1 = nn.ModuleList([ + Snake(channels, alpha_logscale=False) + for _ in range(len(self.convs1)) + ]) + self.activations2 = nn.ModuleList([ + Snake(channels, alpha_logscale=False) + for _ in range(len(self.convs2)) + ]) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + for idx in range(len(self.convs1)): + xt = self.activations1[idx](x) + xt = self.convs1[idx](xt) + xt = self.activations2[idx](xt) + xt = self.convs2[idx](xt) + x = xt + x + return x + + def remove_weight_norm(self): + for idx in range(len(self.convs1)): + remove_weight_norm(self.convs1[idx]) + remove_weight_norm(self.convs2[idx]) + +class SineGen(torch.nn.Module): + """ Definition of sine generator + SineGen(samp_rate, harmonic_num = 0, + sine_amp = 0.1, noise_std = 0.003, + voiced_threshold = 0, + flag_for_pulse=False) + samp_rate: sampling rate in Hz + harmonic_num: number of harmonic overtones (default 0) + sine_amp: amplitude of sine-wavefrom (default 0.1) + noise_std: std of Gaussian noise (default 0.003) + voiced_thoreshold: F0 threshold for U/V classification (default 0) + flag_for_pulse: this SinGen is used inside PulseGen (default False) + Note: when flag_for_pulse is True, the first time step of a voiced + segment is always sin(np.pi) or cos(0) + """ + + def __init__(self, samp_rate, harmonic_num=0, + sine_amp=0.1, noise_std=0.003, + voiced_threshold=0): + super(SineGen, self).__init__() + self.sine_amp = sine_amp + self.noise_std = noise_std + self.harmonic_num = harmonic_num + self.sampling_rate = samp_rate + self.voiced_threshold = voiced_threshold + + def _f02uv(self, f0): + # generate uv signal + uv = (f0 > self.voiced_threshold).type(torch.float32) + return uv + + @torch.no_grad() + def forward(self, f0): + """ + :param f0: [B, 1, sample_len], Hz + :return: [B, 1, sample_len] + """ + + F_mat = torch.zeros((f0.size(0), self.harmonic_num + 1, f0.size(-1))).to(f0.device) + for i in range(self.harmonic_num + 1): + F_mat[:, i: i + 1, :] = f0 * (i + 1) / self.sampling_rate + + theta_mat = 2 * np.pi * (torch.cumsum(F_mat, dim=-1) % 1) + u_dist = Uniform(low=-np.pi, high=np.pi) + phase_vec = u_dist.sample(sample_shape=(f0.size(0), self.harmonic_num + 1, 1)).to(F_mat.device) + phase_vec[:, 0, :] = 0 + + # generate sine waveforms + sine_waves = self.sine_amp * torch.sin(theta_mat + phase_vec) + + # generate uv signal + uv = self._f02uv(f0) + + # noise: for unvoiced should be similar to sine_amp + # std = self.sine_amp/3 -> max value ~ self.sine_amp + # . for voiced regions is self.noise_std + noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 + noise = noise_amp * torch.randn_like(sine_waves) + + # first: set the unvoiced part to 0 by uv + # then: additive noise + sine_waves = sine_waves * uv + noise + return sine_waves, uv, noise + + +class SourceModuleHnNSF(torch.nn.Module): + """ SourceModule for hn-nsf + SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0) + sampling_rate: sampling_rate in Hz + harmonic_num: number of harmonic above F0 (default: 0) + sine_amp: amplitude of sine source signal (default: 0.1) + add_noise_std: std of additive Gaussian noise (default: 0.003) + note that amplitude of noise in unvoiced is decided + by sine_amp + voiced_threshold: threhold to set U/V given F0 (default: 0) + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + uv (batchsize, length, 1) + """ + + def __init__(self, sampling_rate, upsample_scale, harmonic_num=0, sine_amp=0.1, + add_noise_std=0.003, voiced_threshod=0): + super(SourceModuleHnNSF, self).__init__() + + self.sine_amp = sine_amp + self.noise_std = add_noise_std + + # to produce sine waveforms + self.l_sin_gen = SineGen(sampling_rate, harmonic_num, + sine_amp, add_noise_std, voiced_threshod) + + # to merge source harmonics into a single excitation + self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) + self.l_tanh = torch.nn.Tanh() + + def forward(self, x): + """ + Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) + F0_sampled (batchsize, length, 1) + Sine_source (batchsize, length, 1) + noise_source (batchsize, length 1) + """ + # source for harmonic branch + with torch.no_grad(): + sine_wavs, uv, _ = self.l_sin_gen(x.transpose(1, 2)) + sine_wavs = sine_wavs.transpose(1, 2) + uv = uv.transpose(1, 2) + sine_merge = self.l_tanh(self.l_linear(sine_wavs)) + + # source for noise branch, in the same shape as uv + noise = torch.randn_like(uv) * self.sine_amp / 3 + return sine_merge, noise, uv + + +class HiFTGenerator(nn.Module): + """ + HiFTNet Generator: Neural Source Filter + ISTFTNet + https://arxiv.org/abs/2309.09493 + """ + def __init__( + self, + in_channels: int = 80, + base_channels: int = 512, + nb_harmonics: int = 8, + sampling_rate: int = 22050, + nsf_alpha: float = 0.1, + nsf_sigma: float = 0.003, + nsf_voiced_threshold: float = 10, + upsample_rates: tp.List[int] = [8, 8], + upsample_kernel_sizes: tp.List[int] = [16, 16], + istft_params: tp.Dict[str, int] = {"n_fft": 16, "hop_len": 4}, + resblock_kernel_sizes: tp.List[int] = [3, 7, 11], + resblock_dilation_sizes: tp.List[tp.List[int]] = [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + source_resblock_kernel_sizes: tp.List[int] = [7, 11], + source_resblock_dilation_sizes: tp.List[tp.List[int]] = [[1, 3, 5], [1, 3, 5]], + lrelu_slope: float = 0.1, + audio_limit: float = 0.99, + f0_predictor: torch.nn.Module = None, + ): + super(HiFTGenerator, self).__init__() + + self.out_channels = 1 + self.nb_harmonics = nb_harmonics + self.sampling_rate = sampling_rate + self.istft_params = istft_params + self.lrelu_slope = lrelu_slope + self.audio_limit = audio_limit + + self.num_kernels = len(resblock_kernel_sizes) + self.num_upsamples = len(upsample_rates) + self.m_source = SourceModuleHnNSF( + sampling_rate=sampling_rate, + upsample_scale=np.prod(upsample_rates) * istft_params["hop_len"], + harmonic_num=nb_harmonics, + sine_amp=nsf_alpha, + add_noise_std=nsf_sigma, + voiced_threshod=nsf_voiced_threshold) + self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates) * istft_params["hop_len"]) + + self.conv_pre = weight_norm( + Conv1d(in_channels, base_channels, 7, 1, padding=3) + ) + + # Up + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + base_channels // (2**i), + base_channels // (2**(i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + # Down + self.source_downs = nn.ModuleList() + self.source_resblocks = nn.ModuleList() + downsample_rates = [1] + upsample_rates[::-1][:-1] + downsample_cum_rates = np.cumprod(downsample_rates) + for i, (u, k, d) in enumerate(zip(downsample_cum_rates[::-1], source_resblock_kernel_sizes, + source_resblock_dilation_sizes)): + if u == 1: + self.source_downs.append( + Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), 1, 1) + ) + else: + self.source_downs.append( + Conv1d(istft_params["n_fft"] + 2, base_channels // (2 ** (i + 1)), u * 2, u, padding=(u // 2)) + ) + + self.source_resblocks.append( + ResBlock(base_channels // (2 ** (i + 1)), k, d) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = base_channels // (2**(i + 1)) + for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): + self.resblocks.append(ResBlock(ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, istft_params["n_fft"] + 2, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + self.reflection_pad = nn.ReflectionPad1d((1, 0)) + self.stft_window = torch.from_numpy(get_window("hann", istft_params["n_fft"], fftbins=True).astype(np.float32)) + self.f0_predictor = f0_predictor + + def _f02source(self, f0: torch.Tensor) -> torch.Tensor: + f0 = self.f0_upsamp(f0[:, None]).transpose(1, 2) # bs,n,t + + har_source, _, _ = self.m_source(f0) + return har_source.transpose(1, 2) + + def _stft(self, x): + spec = torch.stft( + x, + self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(x.device), + return_complex=True) + spec = torch.view_as_real(spec) # [B, F, TT, 2] + return spec[..., 0], spec[..., 1] + + def _istft(self, magnitude, phase): + magnitude = torch.clip(magnitude, max=1e2) + real = magnitude * torch.cos(phase) + img = magnitude * torch.sin(phase) + inverse_transform = torch.istft(torch.complex(real, img), self.istft_params["n_fft"], self.istft_params["hop_len"], self.istft_params["n_fft"], window=self.stft_window.to(magnitude.device)) + return inverse_transform + + def forward(self, x: torch.Tensor) -> torch.Tensor: + f0 = self.f0_predictor(x) + s = self._f02source(f0) + + s_stft_real, s_stft_imag = self._stft(s.squeeze(1)) + s_stft = torch.cat([s_stft_real, s_stft_imag], dim=1) + + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, self.lrelu_slope) + x = self.ups[i](x) + + if i == self.num_upsamples - 1: + x = self.reflection_pad(x) + + # fusion + si = self.source_downs[i](s_stft) + si = self.source_resblocks[i](si) + x = x + si + + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + + x = F.leaky_relu(x) + x = self.conv_post(x) + magnitude = torch.exp(x[:, :self.istft_params["n_fft"] // 2 + 1, :]) + phase = torch.sin(x[:, self.istft_params["n_fft"] // 2 + 1:, :]) # actually, sin is redundancy + + x = self._istft(magnitude, phase) + x = torch.clamp(x, -self.audio_limit, self.audio_limit) + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + self.source_module.remove_weight_norm() + for l in self.source_downs: + remove_weight_norm(l) + for l in self.source_resblocks: + l.remove_weight_norm() + + @torch.inference_mode() + def inference(self, mel: torch.Tensor) -> torch.Tensor: + return self.forward(x=mel) diff --git a/cosyvoice/llm/llm.py b/cosyvoice/llm/llm.py new file mode 100644 index 0000000..05c22ef --- /dev/null +++ b/cosyvoice/llm/llm.py @@ -0,0 +1,206 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Dict, Optional, Union +import torch +from torch import nn +import torch.nn.functional as F +from torch.nn.utils.rnn import pad_sequence, unpad_sequence +from cosyvoice.utils.common import IGNORE_ID +from cosyvoice.transformer.label_smoothing_loss import LabelSmoothingLoss +from cosyvoice.utils.common import th_accuracy + + +class TransformerLM(torch.nn.Module): + def __init__( + self, + text_encoder_input_size: int, + llm_input_size: int, + llm_output_size: int, + text_token_size: int, + speech_token_size: int, + text_encoder: torch.nn.Module, + llm: torch.nn.Module, + length_normalized_loss: bool = True, + lsm_weight: float = 0.0, + spk_embed_dim: int = 192, + ): + super().__init__() + self.llm_input_size = llm_input_size + self.speech_token_size = speech_token_size + # 1. build text token inputs related modules + self.text_embedding = torch.nn.Embedding(text_token_size, text_encoder_input_size) + self.text_encoder = text_encoder + self.text_encoder_affine_layer = nn.Linear( + self.text_encoder.output_size(), + llm_input_size + ) + + # 2. build speech token language model related modules + self.sos_eos = 0 + self.task_id = 1 + self.llm_embedding = torch.nn.Embedding(2, llm_input_size) + self.llm = llm + self.llm_decoder = nn.Linear(llm_output_size, speech_token_size + 1) + self.criterion_ce = LabelSmoothingLoss( + size=speech_token_size + 1, + padding_idx=IGNORE_ID, + smoothing=lsm_weight, + normalize_length=length_normalized_loss, + ) + + # 3. [Optional] build speech token related modules + self.speech_embedding = torch.nn.Embedding(speech_token_size, llm_input_size) + self.spk_embed_affine_layer = torch.nn.Linear(spk_embed_dim, llm_input_size) + + def encode( + self, + text: torch.Tensor, + text_lengths: torch.Tensor, + ): + encoder_out, encoder_mask = self.text_encoder(text, text_lengths, decoding_chunk_size=1, num_decoding_left_chunks=-1) + encoder_out_lens = encoder_mask.squeeze(1).sum(1) + encoder_out = self.text_encoder_affine_layer(encoder_out) + return encoder_out, encoder_out_lens + + def pad_unpad_sequence(self, sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len): + text_token = unpad_sequence(text_token, text_token_len.cpu(), batch_first=True) + speech_token = unpad_sequence(speech_token, speech_token_len.cpu(), batch_first=True) + lm_input = [torch.concat([sos_eos_emb.squeeze(dim=0), embedding[i], text_token[i], task_id_emb.squeeze(dim=0), speech_token[i]], dim=0) for i in range(len(text_token))] + lm_input_len = torch.tensor([i.size(0) for i in lm_input], dtype=torch.int32) + lm_input = pad_sequence(lm_input, batch_first=True, padding_value=IGNORE_ID) + return lm_input, lm_input_len + + def forward( + self, + batch: dict, + device: torch.device, + ) -> Dict[str, Optional[torch.Tensor]]: + """ + Args: + text: (B, L, D) + text_lengths: (B,) + audio: (B, T, N) or (B, T) + audio_lengths: (B,) + """ + text_token = batch['text_token'].to(device) + text_token_len = batch['text_token_len'].to(device) + speech_token = batch['speech_token'].to(device) + speech_token_len = batch['speech_token_len'].to(device) + embedding = batch['utt_embedding'].to(device) + + # 1. prepare llm_target + lm_target = [torch.tensor([IGNORE_ID] * (2 + text_token_len[i]) + speech_token[i, :speech_token_len[i]].tolist() + [self.speech_token_size]) for i in range(text_token.size(0))] + lm_target = pad_sequence(lm_target, batch_first=True, padding_value=IGNORE_ID).to(device) + + # 1. encode text_token + text_token = self.text_embedding(text_token) + text_token, text_token_len = self.encode(text_token, text_token_len) + + # 2. embedding projection + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + embedding = embedding.unsqueeze(1) + + # 3. eos and task_id + sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1) + task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1) + + # 4. encode speech_token + speech_token = self.speech_embedding(speech_token) + + # 5. unpad and pad + lm_input, lm_input_len = self.pad_unpad_sequence(sos_eos_emb, embedding, text_token, text_token_len, task_id_emb, speech_token, speech_token_len) + + # 6. run lm forward + lm_output, lm_output_mask = self.llm(lm_input, lm_input_len.to(device)) + logits = self.llm_decoder(lm_output) + loss = self.criterion_ce(logits, lm_target) + acc = th_accuracy(logits.view(-1, self.speech_token_size + 1), lm_target, ignore_label=IGNORE_ID) + return {'loss': loss, 'acc': acc} + + def sampling_ids( + self, + weighted_scores: torch.Tensor, + sampling: Union[bool, int, float] = True, + beam_size: int = 1, + ignore_eos: bool = True, + ): + while True: + prob, indices = weighted_scores.softmax(dim=-1).topk(sampling) + top_ids = prob.multinomial(beam_size, replacement=True) + top_ids = indices[top_ids] + if (not ignore_eos) or (self.speech_token_size not in top_ids): + break + return top_ids + + @torch.inference_mode() + def inference( + self, + text: torch.Tensor, + text_len: torch.Tensor, + prompt_text: torch.Tensor, + prompt_text_len: torch.Tensor, + prompt_speech_token: torch.Tensor, + prompt_speech_token_len: torch.Tensor, + embedding: torch.Tensor, + beam_size: int = 1, + sampling: int = 25, + max_token_text_ratio: float = 20, + min_token_text_ratio: float = 2, + ) -> torch.Tensor: + device = text.device + text = torch.concat([prompt_text, text], dim=1) + text_len += prompt_text_len + text = self.text_embedding(text) + + # 1. encode text + text, text_len = self.encode(text, text_len) + + # 2. encode embedding + if embedding.shape[0] != 0: + embedding = F.normalize(embedding, dim=1) + embedding = self.spk_embed_affine_layer(embedding) + embedding = embedding.unsqueeze(dim=1) + else: + embedding = torch.zeros(1, 0, self.llm_input_size).to(device) + + # 3. concat llm_input + sos_eos_emb = self.llm_embedding.weight[self.sos_eos].reshape(1, 1, -1) + task_id_emb = self.llm_embedding.weight[self.task_id].reshape(1, 1, -1) + if prompt_speech_token_len != 0: + prompt_speech_token_emb = self.speech_embedding(prompt_speech_token) + else: + prompt_speech_token_emb = torch.zeros(1, 0, self.llm_input_size).to(device) + lm_input = torch.concat([sos_eos_emb, embedding, text, task_id_emb, prompt_speech_token_emb], dim=1) + + # 4. cal min/max_length + min_len = int((text_len - prompt_text_len) * min_token_text_ratio) + max_len = int((text_len - prompt_text_len) * max_token_text_ratio) + + # 5. step by step decode + out_tokens = [] + offset = 0 + att_cache, cnn_cache = torch.zeros((0, 0, 0, 0), device=lm_input.device), torch.zeros((0, 0, 0, 0), device=lm_input.device) + for i in range(max_len): + y_pred, att_cache, cnn_cache = self.llm.forward_chunk(lm_input, offset=0, required_cache_size=-1, att_cache=att_cache, cnn_cache=cnn_cache, + att_mask=torch.tril(torch.ones((1, lm_input.shape[1], lm_input.shape[1]), device=lm_input.device)).to(torch.bool)) + logp = self.llm_decoder(y_pred[:, -1]).log_softmax(dim=-1) + top_ids = self.sampling_ids(logp.squeeze(dim=0), sampling, beam_size, ignore_eos=True if i < min_len else False).item() + if top_ids == self.speech_token_size: + break + out_tokens.append(top_ids) + offset += lm_input.size(1) + lm_input = self.speech_embedding.weight[top_ids].reshape(1, 1, -1) + + return torch.tensor([out_tokens], dtype=torch.int64, device=device) diff --git a/cosyvoice/transformer/__init__.py b/cosyvoice/transformer/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cosyvoice/transformer/activation.py b/cosyvoice/transformer/activation.py new file mode 100644 index 0000000..8cea548 --- /dev/null +++ b/cosyvoice/transformer/activation.py @@ -0,0 +1,84 @@ +# Copyright (c) 2020 Johns Hopkins University (Shinji Watanabe) +# 2020 Northwestern Polytechnical University (Pengcheng Guo) +# 2020 Mobvoi Inc (Binbin Zhang) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Swish() activation function for Conformer.""" + +import torch +from torch import nn, sin, pow +from torch.nn import Parameter + + +class Swish(torch.nn.Module): + """Construct an Swish object.""" + + def forward(self, x: torch.Tensor) -> torch.Tensor: + """Return Swish activation function.""" + return x * torch.sigmoid(x) + + +# Implementation adapted from https://github.com/EdwardDixon/snake under the MIT license. +# LICENSE is in incl_licenses directory. +class Snake(nn.Module): + ''' + Implementation of a sine-based periodic activation function + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter + References: + - This activation function is from this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snake(256) + >>> x = torch.randn(256) + >>> x = a1(x) + ''' + def __init__(self, in_features, alpha=1.0, alpha_trainable=True, alpha_logscale=False): + ''' + Initialization. + INPUT: + - in_features: shape of the input + - alpha: trainable parameter + alpha is initialized to 1 by default, higher values = higher-frequency. + alpha will be trained along with the rest of your model. + ''' + super(Snake, self).__init__() + self.in_features = in_features + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = Parameter(torch.zeros(in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = Parameter(torch.ones(in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + ''' + Forward pass of the function. + Applies the function to the input elementwise. + Snake ∶= x + 1/a * sin^2 (xa) + ''' + alpha = self.alpha.unsqueeze(0).unsqueeze(-1) # line up with x to [B, C, T] + if self.alpha_logscale: + alpha = torch.exp(alpha) + x = x + (1.0 / (alpha + self.no_div_by_zero)) * pow(sin(x * alpha), 2) + + return x diff --git a/cosyvoice/transformer/attention.py b/cosyvoice/transformer/attention.py new file mode 100644 index 0000000..cb6723a --- /dev/null +++ b/cosyvoice/transformer/attention.py @@ -0,0 +1,326 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Multi-Head Attention layer definition.""" + +import math +from typing import Tuple + +import torch +from torch import nn + + +class MultiHeadedAttention(nn.Module): + """Multi-Head Attention layer. + + Args: + n_head (int): The number of heads. + n_feat (int): The number of features. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, + n_head: int, + n_feat: int, + dropout_rate: float, + key_bias: bool = True): + """Construct an MultiHeadedAttention object.""" + super().__init__() + assert n_feat % n_head == 0 + # We assume d_v always equals d_k + self.d_k = n_feat // n_head + self.h = n_head + self.linear_q = nn.Linear(n_feat, n_feat) + self.linear_k = nn.Linear(n_feat, n_feat, bias=key_bias) + self.linear_v = nn.Linear(n_feat, n_feat) + self.linear_out = nn.Linear(n_feat, n_feat) + self.dropout = nn.Dropout(p=dropout_rate) + + def forward_qkv( + self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Transform query, key and value. + + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + + Returns: + torch.Tensor: Transformed query tensor, size + (#batch, n_head, time1, d_k). + torch.Tensor: Transformed key tensor, size + (#batch, n_head, time2, d_k). + torch.Tensor: Transformed value tensor, size + (#batch, n_head, time2, d_k). + + """ + n_batch = query.size(0) + q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k) + k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k) + v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k) + q = q.transpose(1, 2) # (batch, head, time1, d_k) + k = k.transpose(1, 2) # (batch, head, time2, d_k) + v = v.transpose(1, 2) # (batch, head, time2, d_k) + + return q, k, v + + def forward_attention( + self, + value: torch.Tensor, + scores: torch.Tensor, + mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool) + ) -> torch.Tensor: + """Compute attention context vector. + + Args: + value (torch.Tensor): Transformed value, size + (#batch, n_head, time2, d_k). + scores (torch.Tensor): Attention score, size + (#batch, n_head, time1, time2). + mask (torch.Tensor): Mask, size (#batch, 1, time2) or + (#batch, time1, time2), (0, 0, 0) means fake mask. + + Returns: + torch.Tensor: Transformed value (#batch, time1, d_model) + weighted by the attention score (#batch, time1, time2). + + """ + n_batch = value.size(0) + # NOTE(xcsong): When will `if mask.size(2) > 0` be True? + # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the + # 1st chunk to ease the onnx export.] + # 2. pytorch training + if mask.size(2) > 0: # time2 > 0 + mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2) + # For last chunk, time2 might be larger than scores.size(-1) + mask = mask[:, :, :, :scores.size(-1)] # (batch, 1, *, time2) + scores = scores.masked_fill(mask, -float('inf')) + attn = torch.softmax(scores, dim=-1).masked_fill( + mask, 0.0) # (batch, head, time1, time2) + # NOTE(xcsong): When will `if mask.size(2) > 0` be False? + # 1. onnx(16/-1, -1/-1, 16/0) + # 2. jit (16/-1, -1/-1, 16/0, 16/4) + else: + attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2) + + p_attn = self.dropout(attn) + x = torch.matmul(p_attn, value) # (batch, head, time1, d_k) + x = (x.transpose(1, 2).contiguous().view(n_batch, -1, + self.h * self.d_k) + ) # (batch, time1, d_model) + + return self.linear_out(x) # (batch, time1, d_model) + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + pos_emb: torch.Tensor = torch.empty(0), + cache: torch.Tensor = torch.zeros((0, 0, 0, 0)) + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute scaled dot product attention. + + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + mask (torch.Tensor): Mask tensor (#batch, 1, time2) or + (#batch, time1, time2). + 1.When applying cross attention between decoder and encoder, + the batch padding mask for input is in (#batch, 1, T) shape. + 2.When applying self attention of encoder, + the mask is in (#batch, T, T) shape. + 3.When applying self attention of decoder, + the mask is in (#batch, L, L) shape. + 4.If the different position in decoder see different block + of the encoder, such as Mocha, the passed in mask could be + in (#batch, L, T) shape. But there is no such case in current + CosyVoice. + cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + + + Returns: + torch.Tensor: Output tensor (#batch, time1, d_model). + torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + + """ + q, k, v = self.forward_qkv(query, key, value) + + # NOTE(xcsong): + # when export onnx model, for 1st chunk, we feed + # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode) + # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode). + # In all modes, `if cache.size(0) > 0` will alwayse be `True` + # and we will always do splitting and + # concatnation(this will simplify onnx export). Note that + # it's OK to concat & split zero-shaped tensors(see code below). + # when export jit model, for 1st chunk, we always feed + # cache(0, 0, 0, 0) since jit supports dynamic if-branch. + # >>> a = torch.ones((1, 2, 0, 4)) + # >>> b = torch.ones((1, 2, 3, 4)) + # >>> c = torch.cat((a, b), dim=2) + # >>> torch.equal(b, c) # True + # >>> d = torch.split(a, 2, dim=-1) + # >>> torch.equal(d[0], d[1]) # True + if cache.size(0) > 0: + key_cache, value_cache = torch.split(cache, + cache.size(-1) // 2, + dim=-1) + k = torch.cat([key_cache, k], dim=2) + v = torch.cat([value_cache, v], dim=2) + # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's + # non-trivial to calculate `next_cache_start` here. + new_cache = torch.cat((k, v), dim=-1) + + scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k) + return self.forward_attention(v, scores, mask), new_cache + + +class RelPositionMultiHeadedAttention(MultiHeadedAttention): + """Multi-Head Attention layer with relative position encoding. + Paper: https://arxiv.org/abs/1901.02860 + Args: + n_head (int): The number of heads. + n_feat (int): The number of features. + dropout_rate (float): Dropout rate. + """ + + def __init__(self, + n_head: int, + n_feat: int, + dropout_rate: float, + key_bias: bool = True): + """Construct an RelPositionMultiHeadedAttention object.""" + super().__init__(n_head, n_feat, dropout_rate, key_bias) + # linear transformation for positional encoding + self.linear_pos = nn.Linear(n_feat, n_feat, bias=False) + # these two learnable bias are used in matrix c and matrix d + # as described in https://arxiv.org/abs/1901.02860 Section 3.3 + self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k)) + self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k)) + torch.nn.init.xavier_uniform_(self.pos_bias_u) + torch.nn.init.xavier_uniform_(self.pos_bias_v) + + def rel_shift(self, x): + """Compute relative positional encoding. + + Args: + x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1). + time1 means the length of query vector. + + Returns: + torch.Tensor: Output tensor. + + """ + zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype) + x_padded = torch.cat([zero_pad, x], dim=-1) + + x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2)) + x = x_padded[:, :, 1:].view_as(x)[ + :, :, :, : x.size(-1) // 2 + 1 + ] # only keep the positions from 0 to time2 + return x + + def forward( + self, + query: torch.Tensor, + key: torch.Tensor, + value: torch.Tensor, + mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + pos_emb: torch.Tensor = torch.empty(0), + cache: torch.Tensor = torch.zeros((0, 0, 0, 0)) + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute 'Scaled Dot Product Attention' with rel. positional encoding. + Args: + query (torch.Tensor): Query tensor (#batch, time1, size). + key (torch.Tensor): Key tensor (#batch, time2, size). + value (torch.Tensor): Value tensor (#batch, time2, size). + mask (torch.Tensor): Mask tensor (#batch, 1, time2) or + (#batch, time1, time2), (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): Positional embedding tensor + (#batch, time2, size). + cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2), + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + Returns: + torch.Tensor: Output tensor (#batch, time1, d_model). + torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2) + where `cache_t == chunk_size * num_decoding_left_chunks` + and `head * d_k == size` + """ + q, k, v = self.forward_qkv(query, key, value) + q = q.transpose(1, 2) # (batch, time1, head, d_k) + + # NOTE(xcsong): + # when export onnx model, for 1st chunk, we feed + # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode) + # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode). + # In all modes, `if cache.size(0) > 0` will alwayse be `True` + # and we will always do splitting and + # concatnation(this will simplify onnx export). Note that + # it's OK to concat & split zero-shaped tensors(see code below). + # when export jit model, for 1st chunk, we always feed + # cache(0, 0, 0, 0) since jit supports dynamic if-branch. + # >>> a = torch.ones((1, 2, 0, 4)) + # >>> b = torch.ones((1, 2, 3, 4)) + # >>> c = torch.cat((a, b), dim=2) + # >>> torch.equal(b, c) # True + # >>> d = torch.split(a, 2, dim=-1) + # >>> torch.equal(d[0], d[1]) # True + if cache.size(0) > 0: + key_cache, value_cache = torch.split(cache, + cache.size(-1) // 2, + dim=-1) + k = torch.cat([key_cache, k], dim=2) + v = torch.cat([value_cache, v], dim=2) + # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's + # non-trivial to calculate `next_cache_start` here. + new_cache = torch.cat((k, v), dim=-1) + + n_batch_pos = pos_emb.size(0) + p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k) + p = p.transpose(1, 2) # (batch, head, time1, d_k) + + # (batch, head, time1, d_k) + q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2) + # (batch, head, time1, d_k) + q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2) + + # compute attention score + # first compute matrix a and matrix c + # as described in https://arxiv.org/abs/1901.02860 Section 3.3 + # (batch, head, time1, time2) + matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1)) + + # compute matrix b and matrix d + # (batch, head, time1, time2) + matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1)) + # NOTE(Xiang Lyu): Keep rel_shift since espnet rel_pos_emb is used + if matrix_ac.shape != matrix_bd.shape: + matrix_bd = self.rel_shift(matrix_bd) + + scores = (matrix_ac + matrix_bd) / math.sqrt( + self.d_k) # (batch, head, time1, time2) + + return self.forward_attention(v, scores, mask), new_cache diff --git a/cosyvoice/transformer/convolution.py b/cosyvoice/transformer/convolution.py new file mode 100644 index 0000000..4d5d961 --- /dev/null +++ b/cosyvoice/transformer/convolution.py @@ -0,0 +1,145 @@ +# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""ConvolutionModule definition.""" + +from typing import Tuple + +import torch +from torch import nn + + +class ConvolutionModule(nn.Module): + """ConvolutionModule in Conformer model.""" + + def __init__(self, + channels: int, + kernel_size: int = 15, + activation: nn.Module = nn.ReLU(), + norm: str = "batch_norm", + causal: bool = False, + bias: bool = True): + """Construct an ConvolutionModule object. + Args: + channels (int): The number of channels of conv layers. + kernel_size (int): Kernel size of conv layers. + causal (int): Whether use causal convolution or not + """ + super().__init__() + + self.pointwise_conv1 = nn.Conv1d( + channels, + 2 * channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + # self.lorder is used to distinguish if it's a causal convolution, + # if self.lorder > 0: it's a causal convolution, the input will be + # padded with self.lorder frames on the left in forward. + # else: it's a symmetrical convolution + if causal: + padding = 0 + self.lorder = kernel_size - 1 + else: + # kernel_size should be an odd number for none causal convolution + assert (kernel_size - 1) % 2 == 0 + padding = (kernel_size - 1) // 2 + self.lorder = 0 + self.depthwise_conv = nn.Conv1d( + channels, + channels, + kernel_size, + stride=1, + padding=padding, + groups=channels, + bias=bias, + ) + + assert norm in ['batch_norm', 'layer_norm'] + if norm == "batch_norm": + self.use_layer_norm = False + self.norm = nn.BatchNorm1d(channels) + else: + self.use_layer_norm = True + self.norm = nn.LayerNorm(channels) + + self.pointwise_conv2 = nn.Conv1d( + channels, + channels, + kernel_size=1, + stride=1, + padding=0, + bias=bias, + ) + self.activation = activation + + def forward( + self, + x: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + cache: torch.Tensor = torch.zeros((0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Compute convolution module. + Args: + x (torch.Tensor): Input tensor (#batch, time, channels). + mask_pad (torch.Tensor): used for batch padding (#batch, 1, time), + (0, 0, 0) means fake mask. + cache (torch.Tensor): left context cache, it is only + used in causal convolution (#batch, channels, cache_t), + (0, 0, 0) meas fake cache. + Returns: + torch.Tensor: Output tensor (#batch, time, channels). + """ + # exchange the temporal dimension and the feature dimension + x = x.transpose(1, 2) # (#batch, channels, time) + + # mask batch padding + if mask_pad.size(2) > 0: # time > 0 + x.masked_fill_(~mask_pad, 0.0) + + if self.lorder > 0: + if cache.size(2) == 0: # cache_t == 0 + x = nn.functional.pad(x, (self.lorder, 0), 'constant', 0.0) + else: + assert cache.size(0) == x.size(0) # equal batch + assert cache.size(1) == x.size(1) # equal channel + x = torch.cat((cache, x), dim=2) + assert (x.size(2) > self.lorder) + new_cache = x[:, :, -self.lorder:] + else: + # It's better we just return None if no cache is required, + # However, for JIT export, here we just fake one tensor instead of + # None. + new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + + # GLU mechanism + x = self.pointwise_conv1(x) # (batch, 2*channel, dim) + x = nn.functional.glu(x, dim=1) # (batch, channel, dim) + + # 1D Depthwise Conv + x = self.depthwise_conv(x) + if self.use_layer_norm: + x = x.transpose(1, 2) + x = self.activation(self.norm(x)) + if self.use_layer_norm: + x = x.transpose(1, 2) + x = self.pointwise_conv2(x) + # mask batch padding + if mask_pad.size(2) > 0: # time > 0 + x.masked_fill_(~mask_pad, 0.0) + + return x.transpose(1, 2), new_cache diff --git a/cosyvoice/transformer/decoder.py b/cosyvoice/transformer/decoder.py new file mode 100644 index 0000000..961c875 --- /dev/null +++ b/cosyvoice/transformer/decoder.py @@ -0,0 +1,396 @@ +# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang, Di Wu) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Decoder definition.""" +from typing import Tuple, List, Optional + +import torch +import torch.utils.checkpoint as ckpt +import logging + +from cosyvoice.transformer.decoder_layer import DecoderLayer +from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward +from cosyvoice.utils.class_utils import ( + COSYVOICE_EMB_CLASSES, + COSYVOICE_ATTENTION_CLASSES, + COSYVOICE_ACTIVATION_CLASSES, +) +from cosyvoice.utils.mask import (subsequent_mask, make_pad_mask) + + +class TransformerDecoder(torch.nn.Module): + """Base class of Transfomer decoder module. + Args: + vocab_size: output dim + encoder_output_size: dimension of attention + attention_heads: the number of heads of multi head attention + linear_units: the hidden units number of position-wise feedforward + num_blocks: the number of decoder blocks + dropout_rate: dropout rate + self_attention_dropout_rate: dropout rate for attention + input_layer: input layer type + use_output_layer: whether to use output layer + pos_enc_class: PositionalEncoding or ScaledPositionalEncoding + normalize_before: + True: use layer_norm before each sub-block of a layer. + False: use layer_norm after each sub-block of a layer. + src_attention: if false, encoder-decoder cross attention is not + applied, such as CIF model + key_bias: whether use bias in attention.linear_k, False for whisper models. + gradient_checkpointing: rerunning a forward-pass segment for each + checkpointed segment during backward. + tie_word_embedding: Tie or clone module weights depending of whether we are + using TorchScript or not + """ + + def __init__( + self, + vocab_size: int, + encoder_output_size: int, + attention_heads: int = 4, + linear_units: int = 2048, + num_blocks: int = 6, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + self_attention_dropout_rate: float = 0.0, + src_attention_dropout_rate: float = 0.0, + input_layer: str = "embed", + use_output_layer: bool = True, + normalize_before: bool = True, + src_attention: bool = True, + key_bias: bool = True, + activation_type: str = "relu", + gradient_checkpointing: bool = False, + tie_word_embedding: bool = False, + ): + super().__init__() + attention_dim = encoder_output_size + activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]() + + self.embed = torch.nn.Sequential( + torch.nn.Identity() if input_layer == "no_pos" else + torch.nn.Embedding(vocab_size, attention_dim), + COSYVOICE_EMB_CLASSES[input_layer](attention_dim, + positional_dropout_rate), + ) + + self.normalize_before = normalize_before + self.after_norm = torch.nn.LayerNorm(attention_dim, eps=1e-5) + self.use_output_layer = use_output_layer + if use_output_layer: + self.output_layer = torch.nn.Linear(attention_dim, vocab_size) + else: + self.output_layer = torch.nn.Identity() + self.num_blocks = num_blocks + self.decoders = torch.nn.ModuleList([ + DecoderLayer( + attention_dim, + COSYVOICE_ATTENTION_CLASSES["selfattn"]( + attention_heads, attention_dim, + self_attention_dropout_rate, key_bias), + COSYVOICE_ATTENTION_CLASSES["selfattn"]( + attention_heads, attention_dim, src_attention_dropout_rate, + key_bias) if src_attention else None, + PositionwiseFeedForward(attention_dim, linear_units, + dropout_rate, activation), + dropout_rate, + normalize_before, + ) for _ in range(self.num_blocks) + ]) + + self.gradient_checkpointing = gradient_checkpointing + self.tie_word_embedding = tie_word_embedding + + def forward( + self, + memory: torch.Tensor, + memory_mask: torch.Tensor, + ys_in_pad: torch.Tensor, + ys_in_lens: torch.Tensor, + r_ys_in_pad: torch.Tensor = torch.empty(0), + reverse_weight: float = 0.0, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Forward decoder. + Args: + memory: encoded memory, float32 (batch, maxlen_in, feat) + memory_mask: encoder memory mask, (batch, 1, maxlen_in) + ys_in_pad: padded input token ids, int64 (batch, maxlen_out) + ys_in_lens: input lengths of this batch (batch) + r_ys_in_pad: not used in transformer decoder, in order to unify api + with bidirectional decoder + reverse_weight: not used in transformer decoder, in order to unify + api with bidirectional decode + Returns: + (tuple): tuple containing: + x: decoded token score before softmax (batch, maxlen_out, + vocab_size) if use_output_layer is True, + torch.tensor(0.0), in order to unify api with bidirectional decoder + olens: (batch, ) + NOTE(xcsong): + We pass the `__call__` method of the modules instead of `forward` to the + checkpointing API because `__call__` attaches all the hooks of the module. + https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + """ + tgt = ys_in_pad + maxlen = tgt.size(1) + # tgt_mask: (B, 1, L) + tgt_mask = ~make_pad_mask(ys_in_lens, maxlen).unsqueeze(1) + tgt_mask = tgt_mask.to(tgt.device) + # m: (1, L, L) + m = subsequent_mask(tgt_mask.size(-1), + device=tgt_mask.device).unsqueeze(0) + # tgt_mask: (B, L, L) + tgt_mask = tgt_mask & m + x, _ = self.embed(tgt) + if self.gradient_checkpointing and self.training: + x = self.forward_layers_checkpointed(x, tgt_mask, memory, + memory_mask) + else: + x = self.forward_layers(x, tgt_mask, memory, memory_mask) + if self.normalize_before: + x = self.after_norm(x) + if self.use_output_layer: + x = self.output_layer(x) + olens = tgt_mask.sum(1) + return x, torch.tensor(0.0), olens + + def forward_layers(self, x: torch.Tensor, tgt_mask: torch.Tensor, + memory: torch.Tensor, + memory_mask: torch.Tensor) -> torch.Tensor: + for layer in self.decoders: + x, tgt_mask, memory, memory_mask = layer(x, tgt_mask, memory, + memory_mask) + return x + + @torch.jit.ignore(drop=True) + def forward_layers_checkpointed(self, x: torch.Tensor, + tgt_mask: torch.Tensor, + memory: torch.Tensor, + memory_mask: torch.Tensor) -> torch.Tensor: + for layer in self.decoders: + x, tgt_mask, memory, memory_mask = ckpt.checkpoint( + layer.__call__, x, tgt_mask, memory, memory_mask) + return x + + def forward_one_step( + self, + memory: torch.Tensor, + memory_mask: torch.Tensor, + tgt: torch.Tensor, + tgt_mask: torch.Tensor, + cache: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, List[torch.Tensor]]: + """Forward one step. + This is only used for decoding. + Args: + memory: encoded memory, float32 (batch, maxlen_in, feat) + memory_mask: encoded memory mask, (batch, 1, maxlen_in) + tgt: input token ids, int64 (batch, maxlen_out) + tgt_mask: input token mask, (batch, maxlen_out) + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (include 1.2) + cache: cached output list of (batch, max_time_out-1, size) + Returns: + y, cache: NN output value and cache per `self.decoders`. + y.shape` is (batch, maxlen_out, token) + """ + x, _ = self.embed(tgt) + new_cache = [] + for i, decoder in enumerate(self.decoders): + if cache is None: + c = None + else: + c = cache[i] + x, tgt_mask, memory, memory_mask = decoder(x, + tgt_mask, + memory, + memory_mask, + cache=c) + new_cache.append(x) + if self.normalize_before: + y = self.after_norm(x[:, -1]) + else: + y = x[:, -1] + if self.use_output_layer: + y = torch.log_softmax(self.output_layer(y), dim=-1) + return y, new_cache + + def tie_or_clone_weights(self, jit_mode: bool = True): + """Tie or clone module weights (between word_emb and output_layer) + depending of whether we are using TorchScript or not""" + if not self.use_output_layer: + return + if jit_mode: + logging.info("clone emb.weight to output.weight") + self.output_layer.weight = torch.nn.Parameter( + self.embed[0].weight.clone()) + else: + logging.info("tie emb.weight with output.weight") + self.output_layer.weight = self.embed[0].weight + + if getattr(self.output_layer, "bias", None) is not None: + self.output_layer.bias.data = torch.nn.functional.pad( + self.output_layer.bias.data, + ( + 0, + self.output_layer.weight.shape[0] - + self.output_layer.bias.shape[0], + ), + "constant", + 0, + ) + + +class BiTransformerDecoder(torch.nn.Module): + """Base class of Transfomer decoder module. + Args: + vocab_size: output dim + encoder_output_size: dimension of attention + attention_heads: the number of heads of multi head attention + linear_units: the hidden units number of position-wise feedforward + num_blocks: the number of decoder blocks + r_num_blocks: the number of right to left decoder blocks + dropout_rate: dropout rate + self_attention_dropout_rate: dropout rate for attention + input_layer: input layer type + use_output_layer: whether to use output layer + pos_enc_class: PositionalEncoding or ScaledPositionalEncoding + normalize_before: + True: use layer_norm before each sub-block of a layer. + False: use layer_norm after each sub-block of a layer. + key_bias: whether use bias in attention.linear_k, False for whisper models. + """ + + def __init__( + self, + vocab_size: int, + encoder_output_size: int, + attention_heads: int = 4, + linear_units: int = 2048, + num_blocks: int = 6, + r_num_blocks: int = 0, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + self_attention_dropout_rate: float = 0.0, + src_attention_dropout_rate: float = 0.0, + input_layer: str = "embed", + use_output_layer: bool = True, + normalize_before: bool = True, + key_bias: bool = True, + gradient_checkpointing: bool = False, + tie_word_embedding: bool = False, + ): + + super().__init__() + self.tie_word_embedding = tie_word_embedding + self.left_decoder = TransformerDecoder( + vocab_size, + encoder_output_size, + attention_heads, + linear_units, + num_blocks, + dropout_rate, + positional_dropout_rate, + self_attention_dropout_rate, + src_attention_dropout_rate, + input_layer, + use_output_layer, + normalize_before, + key_bias=key_bias, + gradient_checkpointing=gradient_checkpointing, + tie_word_embedding=tie_word_embedding) + + self.right_decoder = TransformerDecoder( + vocab_size, + encoder_output_size, + attention_heads, + linear_units, + r_num_blocks, + dropout_rate, + positional_dropout_rate, + self_attention_dropout_rate, + src_attention_dropout_rate, + input_layer, + use_output_layer, + normalize_before, + key_bias=key_bias, + gradient_checkpointing=gradient_checkpointing, + tie_word_embedding=tie_word_embedding) + + def forward( + self, + memory: torch.Tensor, + memory_mask: torch.Tensor, + ys_in_pad: torch.Tensor, + ys_in_lens: torch.Tensor, + r_ys_in_pad: torch.Tensor, + reverse_weight: float = 0.0, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Forward decoder. + Args: + memory: encoded memory, float32 (batch, maxlen_in, feat) + memory_mask: encoder memory mask, (batch, 1, maxlen_in) + ys_in_pad: padded input token ids, int64 (batch, maxlen_out) + ys_in_lens: input lengths of this batch (batch) + r_ys_in_pad: padded input token ids, int64 (batch, maxlen_out), + used for right to left decoder + reverse_weight: used for right to left decoder + Returns: + (tuple): tuple containing: + x: decoded token score before softmax (batch, maxlen_out, + vocab_size) if use_output_layer is True, + r_x: x: decoded token score (right to left decoder) + before softmax (batch, maxlen_out, vocab_size) + if use_output_layer is True, + olens: (batch, ) + """ + l_x, _, olens = self.left_decoder(memory, memory_mask, ys_in_pad, + ys_in_lens) + r_x = torch.tensor(0.0) + if reverse_weight > 0.0: + r_x, _, olens = self.right_decoder(memory, memory_mask, + r_ys_in_pad, ys_in_lens) + return l_x, r_x, olens + + def forward_one_step( + self, + memory: torch.Tensor, + memory_mask: torch.Tensor, + tgt: torch.Tensor, + tgt_mask: torch.Tensor, + cache: Optional[List[torch.Tensor]] = None, + ) -> Tuple[torch.Tensor, List[torch.Tensor]]: + """Forward one step. + This is only used for decoding. + Args: + memory: encoded memory, float32 (batch, maxlen_in, feat) + memory_mask: encoded memory mask, (batch, 1, maxlen_in) + tgt: input token ids, int64 (batch, maxlen_out) + tgt_mask: input token mask, (batch, maxlen_out) + dtype=torch.uint8 in PyTorch 1.2- + dtype=torch.bool in PyTorch 1.2+ (include 1.2) + cache: cached output list of (batch, max_time_out-1, size) + Returns: + y, cache: NN output value and cache per `self.decoders`. + y.shape` is (batch, maxlen_out, token) + """ + return self.left_decoder.forward_one_step(memory, memory_mask, tgt, + tgt_mask, cache) + + def tie_or_clone_weights(self, jit_mode: bool = True): + """Tie or clone module weights (between word_emb and output_layer) + depending of whether we are using TorchScript or not""" + self.left_decoder.tie_or_clone_weights(jit_mode) + self.right_decoder.tie_or_clone_weights(jit_mode) diff --git a/cosyvoice/transformer/decoder_layer.py b/cosyvoice/transformer/decoder_layer.py new file mode 100644 index 0000000..91c7c5d --- /dev/null +++ b/cosyvoice/transformer/decoder_layer.py @@ -0,0 +1,132 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Decoder self-attention layer definition.""" +from typing import Optional, Tuple + +import torch +from torch import nn + + +class DecoderLayer(nn.Module): + """Single decoder layer module. + + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` instance can be used as the argument. + src_attn (torch.nn.Module): Inter-attention module instance. + `MultiHeadedAttention` instance can be used as the argument. + If `None` is passed, Inter-attention is not used, such as + CIF, GPT, and other decoder only model. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward` instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: to use layer_norm after each sub-block. + """ + + def __init__( + self, + size: int, + self_attn: nn.Module, + src_attn: Optional[nn.Module], + feed_forward: nn.Module, + dropout_rate: float, + normalize_before: bool = True, + ): + """Construct an DecoderLayer object.""" + super().__init__() + self.size = size + self.self_attn = self_attn + self.src_attn = src_attn + self.feed_forward = feed_forward + self.norm1 = nn.LayerNorm(size, eps=1e-5) + self.norm2 = nn.LayerNorm(size, eps=1e-5) + self.norm3 = nn.LayerNorm(size, eps=1e-5) + self.dropout = nn.Dropout(dropout_rate) + self.normalize_before = normalize_before + + def forward( + self, + tgt: torch.Tensor, + tgt_mask: torch.Tensor, + memory: torch.Tensor, + memory_mask: torch.Tensor, + cache: Optional[torch.Tensor] = None + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute decoded features. + + Args: + tgt (torch.Tensor): Input tensor (#batch, maxlen_out, size). + tgt_mask (torch.Tensor): Mask for input tensor + (#batch, maxlen_out). + memory (torch.Tensor): Encoded memory + (#batch, maxlen_in, size). + memory_mask (torch.Tensor): Encoded memory mask + (#batch, maxlen_in). + cache (torch.Tensor): cached tensors. + (#batch, maxlen_out - 1, size). + + Returns: + torch.Tensor: Output tensor (#batch, maxlen_out, size). + torch.Tensor: Mask for output tensor (#batch, maxlen_out). + torch.Tensor: Encoded memory (#batch, maxlen_in, size). + torch.Tensor: Encoded memory mask (#batch, maxlen_in). + + """ + residual = tgt + if self.normalize_before: + tgt = self.norm1(tgt) + + if cache is None: + tgt_q = tgt + tgt_q_mask = tgt_mask + else: + # compute only the last frame query keeping dim: max_time_out -> 1 + assert cache.shape == ( + tgt.shape[0], + tgt.shape[1] - 1, + self.size, + ), "{cache.shape} == {(tgt.shape[0], tgt.shape[1] - 1, self.size)}" + tgt_q = tgt[:, -1:, :] + residual = residual[:, -1:, :] + tgt_q_mask = tgt_mask[:, -1:, :] + + x = residual + self.dropout( + self.self_attn(tgt_q, tgt, tgt, tgt_q_mask)[0]) + if not self.normalize_before: + x = self.norm1(x) + + if self.src_attn is not None: + residual = x + if self.normalize_before: + x = self.norm2(x) + x = residual + self.dropout( + self.src_attn(x, memory, memory, memory_mask)[0]) + if not self.normalize_before: + x = self.norm2(x) + + residual = x + if self.normalize_before: + x = self.norm3(x) + x = residual + self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm3(x) + + if cache is not None: + x = torch.cat([cache, x], dim=1) + + return x, tgt_mask, memory, memory_mask diff --git a/cosyvoice/transformer/embedding.py b/cosyvoice/transformer/embedding.py new file mode 100644 index 0000000..46130a5 --- /dev/null +++ b/cosyvoice/transformer/embedding.py @@ -0,0 +1,293 @@ +# Copyright (c) 2020 Mobvoi Inc. (authors: Binbin Zhang, Di Wu) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Positonal Encoding Module.""" + +import math +from typing import Tuple, Union + +import torch +import torch.nn.functional as F +import numpy as np + + +class PositionalEncoding(torch.nn.Module): + """Positional encoding. + + :param int d_model: embedding dim + :param float dropout_rate: dropout rate + :param int max_len: maximum input length + + PE(pos, 2i) = sin(pos/(10000^(2i/dmodel))) + PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel))) + """ + + def __init__(self, + d_model: int, + dropout_rate: float, + max_len: int = 5000, + reverse: bool = False): + """Construct an PositionalEncoding object.""" + super().__init__() + self.d_model = d_model + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.max_len = max_len + + self.pe = torch.zeros(self.max_len, self.d_model) + position = torch.arange(0, self.max_len, + dtype=torch.float32).unsqueeze(1) + div_term = torch.exp( + torch.arange(0, self.d_model, 2, dtype=torch.float32) * + -(math.log(10000.0) / self.d_model)) + self.pe[:, 0::2] = torch.sin(position * div_term) + self.pe[:, 1::2] = torch.cos(position * div_term) + self.pe = self.pe.unsqueeze(0) + + def forward(self, + x: torch.Tensor, + offset: Union[int, torch.Tensor] = 0) \ + -> Tuple[torch.Tensor, torch.Tensor]: + """Add positional encoding. + + Args: + x (torch.Tensor): Input. Its shape is (batch, time, ...) + offset (int, torch.tensor): position offset + + Returns: + torch.Tensor: Encoded tensor. Its shape is (batch, time, ...) + torch.Tensor: for compatibility to RelPositionalEncoding + """ + + self.pe = self.pe.to(x.device) + pos_emb = self.position_encoding(offset, x.size(1), False) + x = x * self.xscale + pos_emb + return self.dropout(x), self.dropout(pos_emb) + + def position_encoding(self, + offset: Union[int, torch.Tensor], + size: int, + apply_dropout: bool = True) -> torch.Tensor: + """ For getting encoding in a streaming fashion + + Attention!!!!! + we apply dropout only once at the whole utterance level in a none + streaming way, but will call this function several times with + increasing input size in a streaming scenario, so the dropout will + be applied several times. + + Args: + offset (int or torch.tensor): start offset + size (int): required size of position encoding + + Returns: + torch.Tensor: Corresponding encoding + """ + # How to subscript a Union type: + # https://github.com/pytorch/pytorch/issues/69434 + if isinstance(offset, int): + assert offset + size <= self.max_len + pos_emb = self.pe[:, offset:offset + size] + elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar + assert offset + size <= self.max_len + pos_emb = self.pe[:, offset:offset + size] + else: # for batched streaming decoding on GPU + assert torch.max(offset) + size <= self.max_len + index = offset.unsqueeze(1) + \ + torch.arange(0, size).to(offset.device) # B X T + flag = index > 0 + # remove negative offset + index = index * flag + pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model + + if apply_dropout: + pos_emb = self.dropout(pos_emb) + return pos_emb + + +class RelPositionalEncoding(PositionalEncoding): + """Relative positional encoding module. + See : Appendix B in https://arxiv.org/abs/1901.02860 + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + """ + + def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000): + """Initialize class.""" + super().__init__(d_model, dropout_rate, max_len, reverse=True) + + def forward(self, + x: torch.Tensor, + offset: Union[int, torch.Tensor] = 0) \ + -> Tuple[torch.Tensor, torch.Tensor]: + """Compute positional encoding. + Args: + x (torch.Tensor): Input tensor (batch, time, `*`). + Returns: + torch.Tensor: Encoded tensor (batch, time, `*`). + torch.Tensor: Positional embedding tensor (1, time, `*`). + """ + self.pe = self.pe.to(x.device) + x = x * self.xscale + pos_emb = self.position_encoding(offset, x.size(1), False) + return self.dropout(x), self.dropout(pos_emb) + + +class WhisperPositionalEncoding(PositionalEncoding): + """ Sinusoids position encoding used in openai-whisper.encoder + """ + + def __init__(self, d_model: int, dropout_rate: float, max_len: int = 1500): + super().__init__(d_model, dropout_rate, max_len) + self.xscale = 1.0 + log_timescale_increment = np.log(10000) / (d_model // 2 - 1) + inv_timescales = torch.exp(-log_timescale_increment * + torch.arange(d_model // 2)) + scaled_time = torch.arange(max_len)[:, np.newaxis] * \ + inv_timescales[np.newaxis, :] + pe = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) + delattr(self, "pe") + self.register_buffer("pe", pe.unsqueeze(0)) + + +class LearnablePositionalEncoding(PositionalEncoding): + """ Learnable position encoding used in openai-whisper.decoder + """ + + def __init__(self, d_model: int, dropout_rate: float, max_len: int = 448): + super().__init__(d_model, dropout_rate, max_len) + # NOTE(xcsong): overwrite self.pe & self.xscale + self.pe = torch.nn.Parameter(torch.empty(1, max_len, d_model)) + self.xscale = 1.0 + + +class NoPositionalEncoding(torch.nn.Module): + """ No position encoding + """ + + def __init__(self, d_model: int, dropout_rate: float): + super().__init__() + self.d_model = d_model + self.dropout = torch.nn.Dropout(p=dropout_rate) + + def forward(self, + x: torch.Tensor, + offset: Union[int, torch.Tensor] = 0) \ + -> Tuple[torch.Tensor, torch.Tensor]: + """ Just return zero vector for interface compatibility + """ + pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device) + return self.dropout(x), pos_emb + + def position_encoding(self, offset: Union[int, torch.Tensor], + size: int) -> torch.Tensor: + return torch.zeros(1, size, self.d_model) + + +class EspnetRelPositionalEncoding(torch.nn.Module): + """Relative positional encoding module (new implementation). + + Details can be found in https://github.com/espnet/espnet/pull/2816. + + See : Appendix B in https://arxiv.org/abs/1901.02860 + + Args: + d_model (int): Embedding dimension. + dropout_rate (float): Dropout rate. + max_len (int): Maximum input length. + + """ + + def __init__(self, d_model, dropout_rate, max_len=5000): + """Construct an PositionalEncoding object.""" + super(EspnetRelPositionalEncoding, self).__init__() + self.d_model = d_model + self.xscale = math.sqrt(self.d_model) + self.dropout = torch.nn.Dropout(p=dropout_rate) + self.pe = None + self.extend_pe(torch.tensor(0.0).expand(1, max_len)) + + def extend_pe(self, x): + """Reset the positional encodings.""" + if self.pe is not None: + # self.pe contains both positive and negative parts + # the length of self.pe is 2 * input_len - 1 + if self.pe.size(1) >= x.size(1) * 2 - 1: + if self.pe.dtype != x.dtype or self.pe.device != x.device: + self.pe = self.pe.to(dtype=x.dtype, device=x.device) + return + # Suppose `i` means to the position of query vecotr and `j` means the + # position of key vector. We use position relative positions when keys + # are to the left (i>j) and negative relative positions otherwise (i torch.Tensor: + """ For getting encoding in a streaming fashion + + Attention!!!!! + we apply dropout only once at the whole utterance level in a none + streaming way, but will call this function several times with + increasing input size in a streaming scenario, so the dropout will + be applied several times. + + Args: + offset (int or torch.tensor): start offset + size (int): required size of position encoding + + Returns: + torch.Tensor: Corresponding encoding + """ + pos_emb = self.pe[ + :, + self.pe.size(1) // 2 - size + 1 : self.pe.size(1) // 2 + size, + ] + return pos_emb diff --git a/cosyvoice/transformer/encoder.py b/cosyvoice/transformer/encoder.py new file mode 100644 index 0000000..7e8bd23 --- /dev/null +++ b/cosyvoice/transformer/encoder.py @@ -0,0 +1,472 @@ +# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu) +# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Encoder definition.""" +from typing import Tuple + +import torch +import torch.utils.checkpoint as ckpt + +from cosyvoice.transformer.convolution import ConvolutionModule +from cosyvoice.transformer.encoder_layer import TransformerEncoderLayer +from cosyvoice.transformer.encoder_layer import ConformerEncoderLayer +from cosyvoice.transformer.positionwise_feed_forward import PositionwiseFeedForward +from cosyvoice.utils.class_utils import ( + COSYVOICE_EMB_CLASSES, + COSYVOICE_SUBSAMPLE_CLASSES, + COSYVOICE_ATTENTION_CLASSES, + COSYVOICE_ACTIVATION_CLASSES, +) +from cosyvoice.utils.mask import make_pad_mask +from cosyvoice.utils.mask import add_optional_chunk_mask + + +class BaseEncoder(torch.nn.Module): + + def __init__( + self, + input_size: int, + output_size: int = 256, + attention_heads: int = 4, + linear_units: int = 2048, + num_blocks: int = 6, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + input_layer: str = "conv2d", + pos_enc_layer_type: str = "abs_pos", + normalize_before: bool = True, + static_chunk_size: int = 0, + use_dynamic_chunk: bool = False, + global_cmvn: torch.nn.Module = None, + use_dynamic_left_chunk: bool = False, + gradient_checkpointing: bool = False, + ): + """ + Args: + input_size (int): input dim + output_size (int): dimension of attention + attention_heads (int): the number of heads of multi head attention + linear_units (int): the hidden units number of position-wise feed + forward + num_blocks (int): the number of decoder blocks + dropout_rate (float): dropout rate + attention_dropout_rate (float): dropout rate in attention + positional_dropout_rate (float): dropout rate after adding + positional encoding + input_layer (str): input layer type. + optional [linear, conv2d, conv2d6, conv2d8] + pos_enc_layer_type (str): Encoder positional encoding layer type. + opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] + normalize_before (bool): + True: use layer_norm before each sub-block of a layer. + False: use layer_norm after each sub-block of a layer. + static_chunk_size (int): chunk size for static chunk training and + decoding + use_dynamic_chunk (bool): whether use dynamic chunk size for + training or not, You can only use fixed chunk(chunk_size > 0) + or dyanmic chunk size(use_dynamic_chunk = True) + global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module + use_dynamic_left_chunk (bool): whether use dynamic left chunk in + dynamic chunk training + key_bias: whether use bias in attention.linear_k, False for whisper models. + gradient_checkpointing: rerunning a forward-pass segment for each + checkpointed segment during backward. + """ + super().__init__() + self._output_size = output_size + + self.global_cmvn = global_cmvn + self.embed = COSYVOICE_SUBSAMPLE_CLASSES[input_layer]( + input_size, + output_size, + dropout_rate, + COSYVOICE_EMB_CLASSES[pos_enc_layer_type](output_size, + positional_dropout_rate), + ) + + self.normalize_before = normalize_before + self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) + self.static_chunk_size = static_chunk_size + self.use_dynamic_chunk = use_dynamic_chunk + self.use_dynamic_left_chunk = use_dynamic_left_chunk + self.gradient_checkpointing = gradient_checkpointing + + def output_size(self) -> int: + return self._output_size + + def forward( + self, + xs: torch.Tensor, + xs_lens: torch.Tensor, + decoding_chunk_size: int = 0, + num_decoding_left_chunks: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """Embed positions in tensor. + + Args: + xs: padded input tensor (B, T, D) + xs_lens: input length (B) + decoding_chunk_size: decoding chunk size for dynamic chunk + 0: default for training, use random dynamic chunk. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + num_decoding_left_chunks: number of left chunks, this is for decoding, + the chunk size is decoding_chunk_size. + >=0: use num_decoding_left_chunks + <0: use all left chunks + Returns: + encoder output tensor xs, and subsampled masks + xs: padded output tensor (B, T' ~= T/subsample_rate, D) + masks: torch.Tensor batch padding mask after subsample + (B, 1, T' ~= T/subsample_rate) + NOTE(xcsong): + We pass the `__call__` method of the modules instead of `forward` to the + checkpointing API because `__call__` attaches all the hooks of the module. + https://discuss.pytorch.org/t/any-different-between-model-input-and-model-forward-input/3690/2 + """ + T = xs.size(1) + masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T) + if self.global_cmvn is not None: + xs = self.global_cmvn(xs) + xs, pos_emb, masks = self.embed(xs, masks) + mask_pad = masks # (B, 1, T/subsample_rate) + chunk_masks = add_optional_chunk_mask(xs, masks, + self.use_dynamic_chunk, + self.use_dynamic_left_chunk, + decoding_chunk_size, + self.static_chunk_size, + num_decoding_left_chunks) + if self.gradient_checkpointing and self.training: + xs = self.forward_layers_checkpointed(xs, chunk_masks, pos_emb, + mask_pad) + else: + xs = self.forward_layers(xs, chunk_masks, pos_emb, mask_pad) + if self.normalize_before: + xs = self.after_norm(xs) + # Here we assume the mask is not changed in encoder layers, so just + # return the masks before encoder layers, and the masks will be used + # for cross attention with decoder later + return xs, masks + + def forward_layers(self, xs: torch.Tensor, chunk_masks: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor) -> torch.Tensor: + for layer in self.encoders: + xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) + return xs + + @torch.jit.ignore(drop=True) + def forward_layers_checkpointed(self, xs: torch.Tensor, + chunk_masks: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor) -> torch.Tensor: + for layer in self.encoders: + xs, chunk_masks, _, _ = ckpt.checkpoint(layer.__call__, xs, + chunk_masks, pos_emb, + mask_pad) + return xs + + def forward_chunk( + self, + xs: torch.Tensor, + offset: int, + required_cache_size: int, + att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0), + cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0), + att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ Forward just one chunk + + Args: + xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim), + where `time == (chunk_size - 1) * subsample_rate + \ + subsample.right_context + 1` + offset (int): current offset in encoder output time stamp + required_cache_size (int): cache size required for next chunk + compuation + >=0: actual cache size + <0: means all history cache is required + att_cache (torch.Tensor): cache tensor for KEY & VALUE in + transformer/conformer attention, with shape + (elayers, head, cache_t1, d_k * 2), where + `head * d_k == hidden-dim` and + `cache_t1 == chunk_size * num_decoding_left_chunks`. + cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer, + (elayers, b=1, hidden-dim, cache_t2), where + `cache_t2 == cnn.lorder - 1` + + Returns: + torch.Tensor: output of current input xs, + with shape (b=1, chunk_size, hidden-dim). + torch.Tensor: new attention cache required for next chunk, with + dynamic shape (elayers, head, ?, d_k * 2) + depending on required_cache_size. + torch.Tensor: new conformer cnn cache required for next chunk, with + same shape as the original cnn_cache. + + """ + assert xs.size(0) == 1 + # tmp_masks is just for interface compatibility + tmp_masks = torch.ones(1, + xs.size(1), + device=xs.device, + dtype=torch.bool) + tmp_masks = tmp_masks.unsqueeze(1) + if self.global_cmvn is not None: + xs = self.global_cmvn(xs) + # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim) + xs, pos_emb, _ = self.embed(xs, tmp_masks, offset) + # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim) + elayers, cache_t1 = att_cache.size(0), att_cache.size(2) + chunk_size = xs.size(1) + attention_key_size = cache_t1 + chunk_size + pos_emb = self.embed.position_encoding(offset=offset - cache_t1, + size=attention_key_size) + if required_cache_size < 0: + next_cache_start = 0 + elif required_cache_size == 0: + next_cache_start = attention_key_size + else: + next_cache_start = max(attention_key_size - required_cache_size, 0) + r_att_cache = [] + r_cnn_cache = [] + for i, layer in enumerate(self.encoders): + # NOTE(xcsong): Before layer.forward + # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2), + # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2) + xs, _, new_att_cache, new_cnn_cache = layer( + xs, + att_mask, + pos_emb, + att_cache=att_cache[i:i + 1] if elayers > 0 else att_cache, + cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache) + # NOTE(xcsong): After layer.forward + # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2), + # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2) + r_att_cache.append(new_att_cache[:, :, next_cache_start:, :]) + r_cnn_cache.append(new_cnn_cache.unsqueeze(0)) + if self.normalize_before: + xs = self.after_norm(xs) + + # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2), + # ? may be larger than cache_t1, it depends on required_cache_size + r_att_cache = torch.cat(r_att_cache, dim=0) + # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2) + r_cnn_cache = torch.cat(r_cnn_cache, dim=0) + + return (xs, r_att_cache, r_cnn_cache) + + def forward_chunk_by_chunk( + self, + xs: torch.Tensor, + decoding_chunk_size: int, + num_decoding_left_chunks: int = -1, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ Forward input chunk by chunk with chunk_size like a streaming + fashion + + Here we should pay special attention to computation cache in the + streaming style forward chunk by chunk. Three things should be taken + into account for computation in the current network: + 1. transformer/conformer encoder layers output cache + 2. convolution in conformer + 3. convolution in subsampling + + However, we don't implement subsampling cache for: + 1. We can control subsampling module to output the right result by + overlapping input instead of cache left context, even though it + wastes some computation, but subsampling only takes a very + small fraction of computation in the whole model. + 2. Typically, there are several covolution layers with subsampling + in subsampling module, it is tricky and complicated to do cache + with different convolution layers with different subsampling + rate. + 3. Currently, nn.Sequential is used to stack all the convolution + layers in subsampling, we need to rewrite it to make it work + with cache, which is not prefered. + Args: + xs (torch.Tensor): (1, max_len, dim) + chunk_size (int): decoding chunk size + """ + assert decoding_chunk_size > 0 + # The model is trained by static or dynamic chunk + assert self.static_chunk_size > 0 or self.use_dynamic_chunk + subsampling = self.embed.subsampling_rate + context = self.embed.right_context + 1 # Add current frame + stride = subsampling * decoding_chunk_size + decoding_window = (decoding_chunk_size - 1) * subsampling + context + num_frames = xs.size(1) + att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) + cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) + outputs = [] + offset = 0 + required_cache_size = decoding_chunk_size * num_decoding_left_chunks + + # Feed forward overlap input step by step + for cur in range(0, num_frames - context + 1, stride): + end = min(cur + decoding_window, num_frames) + chunk_xs = xs[:, cur:end, :] + (y, att_cache, + cnn_cache) = self.forward_chunk(chunk_xs, offset, + required_cache_size, att_cache, + cnn_cache) + outputs.append(y) + offset += y.size(1) + ys = torch.cat(outputs, 1) + masks = torch.ones((1, 1, ys.size(1)), + device=ys.device, + dtype=torch.bool) + return ys, masks + + +class TransformerEncoder(BaseEncoder): + """Transformer encoder module.""" + + def __init__( + self, + input_size: int, + output_size: int = 256, + attention_heads: int = 4, + linear_units: int = 2048, + num_blocks: int = 6, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + input_layer: str = "conv2d", + pos_enc_layer_type: str = "abs_pos", + normalize_before: bool = True, + static_chunk_size: int = 0, + use_dynamic_chunk: bool = False, + global_cmvn: torch.nn.Module = None, + use_dynamic_left_chunk: bool = False, + key_bias: bool = True, + selfattention_layer_type: str = "selfattn", + activation_type: str = "relu", + gradient_checkpointing: bool = False, + ): + """ Construct TransformerEncoder + + See Encoder for the meaning of each parameter. + """ + super().__init__(input_size, output_size, attention_heads, + linear_units, num_blocks, dropout_rate, + positional_dropout_rate, attention_dropout_rate, + input_layer, pos_enc_layer_type, normalize_before, + static_chunk_size, use_dynamic_chunk, global_cmvn, + use_dynamic_left_chunk, gradient_checkpointing) + activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]() + self.encoders = torch.nn.ModuleList([ + TransformerEncoderLayer( + output_size, + COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type](attention_heads, + output_size, + attention_dropout_rate, + key_bias), + PositionwiseFeedForward(output_size, linear_units, + dropout_rate, activation), + dropout_rate, normalize_before) for _ in range(num_blocks) + ]) + + +class ConformerEncoder(BaseEncoder): + """Conformer encoder module.""" + + def __init__( + self, + input_size: int, + output_size: int = 256, + attention_heads: int = 4, + linear_units: int = 2048, + num_blocks: int = 6, + dropout_rate: float = 0.1, + positional_dropout_rate: float = 0.1, + attention_dropout_rate: float = 0.0, + input_layer: str = "conv2d", + pos_enc_layer_type: str = "rel_pos", + normalize_before: bool = True, + static_chunk_size: int = 0, + use_dynamic_chunk: bool = False, + global_cmvn: torch.nn.Module = None, + use_dynamic_left_chunk: bool = False, + positionwise_conv_kernel_size: int = 1, + macaron_style: bool = True, + selfattention_layer_type: str = "rel_selfattn", + activation_type: str = "swish", + use_cnn_module: bool = True, + cnn_module_kernel: int = 15, + causal: bool = False, + cnn_module_norm: str = "batch_norm", + key_bias: bool = True, + gradient_checkpointing: bool = False, + ): + """Construct ConformerEncoder + + Args: + input_size to use_dynamic_chunk, see in BaseEncoder + positionwise_conv_kernel_size (int): Kernel size of positionwise + conv1d layer. + macaron_style (bool): Whether to use macaron style for + positionwise layer. + selfattention_layer_type (str): Encoder attention layer type, + the parameter has no effect now, it's just for configure + compatibility. + activation_type (str): Encoder activation function type. + use_cnn_module (bool): Whether to use convolution module. + cnn_module_kernel (int): Kernel size of convolution module. + causal (bool): whether to use causal convolution or not. + key_bias: whether use bias in attention.linear_k, False for whisper models. + """ + super().__init__(input_size, output_size, attention_heads, + linear_units, num_blocks, dropout_rate, + positional_dropout_rate, attention_dropout_rate, + input_layer, pos_enc_layer_type, normalize_before, + static_chunk_size, use_dynamic_chunk, global_cmvn, + use_dynamic_left_chunk, gradient_checkpointing) + activation = COSYVOICE_ACTIVATION_CLASSES[activation_type]() + + # self-attention module definition + encoder_selfattn_layer_args = ( + attention_heads, + output_size, + attention_dropout_rate, + key_bias, + ) + # feed-forward module definition + positionwise_layer_args = ( + output_size, + linear_units, + dropout_rate, + activation, + ) + # convolution module definition + convolution_layer_args = (output_size, cnn_module_kernel, activation, + cnn_module_norm, causal) + + self.encoders = torch.nn.ModuleList([ + ConformerEncoderLayer( + output_size, + COSYVOICE_ATTENTION_CLASSES[selfattention_layer_type]( + *encoder_selfattn_layer_args), + PositionwiseFeedForward(*positionwise_layer_args), + PositionwiseFeedForward( + *positionwise_layer_args) if macaron_style else None, + ConvolutionModule( + *convolution_layer_args) if use_cnn_module else None, + dropout_rate, + normalize_before, + ) for _ in range(num_blocks) + ]) diff --git a/cosyvoice/transformer/encoder_layer.py b/cosyvoice/transformer/encoder_layer.py new file mode 100644 index 0000000..dfd758b --- /dev/null +++ b/cosyvoice/transformer/encoder_layer.py @@ -0,0 +1,236 @@ +# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu) +# 2022 Xingchen Song (sxc19@mails.tsinghua.edu.cn) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Encoder self-attention layer definition.""" + +from typing import Optional, Tuple + +import torch +from torch import nn + + +class TransformerEncoderLayer(nn.Module): + """Encoder layer module. + + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` + instance can be used as the argument. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward`, instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: to use layer_norm after each sub-block. + """ + + def __init__( + self, + size: int, + self_attn: torch.nn.Module, + feed_forward: torch.nn.Module, + dropout_rate: float, + normalize_before: bool = True, + ): + """Construct an EncoderLayer object.""" + super().__init__() + self.self_attn = self_attn + self.feed_forward = feed_forward + self.norm1 = nn.LayerNorm(size, eps=1e-5) + self.norm2 = nn.LayerNorm(size, eps=1e-5) + self.dropout = nn.Dropout(dropout_rate) + self.size = size + self.normalize_before = normalize_before + + def forward( + self, + x: torch.Tensor, + mask: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute encoded features. + + Args: + x (torch.Tensor): (#batch, time, size) + mask (torch.Tensor): Mask tensor for the input (#batch, time,time), + (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): just for interface compatibility + to ConformerEncoderLayer + mask_pad (torch.Tensor): does not used in transformer layer, + just for unified api with conformer. + att_cache (torch.Tensor): Cache tensor of the KEY & VALUE + (#batch=1, head, cache_t1, d_k * 2), head * d_k == size. + cnn_cache (torch.Tensor): Convolution cache in conformer layer + (#batch=1, size, cache_t2), not used here, it's for interface + compatibility to ConformerEncoderLayer. + Returns: + torch.Tensor: Output tensor (#batch, time, size). + torch.Tensor: Mask tensor (#batch, time, time). + torch.Tensor: att_cache tensor, + (#batch=1, head, cache_t1 + time, d_k * 2). + torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2). + + """ + residual = x + if self.normalize_before: + x = self.norm1(x) + x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb=pos_emb, cache=att_cache) + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.norm1(x) + + residual = x + if self.normalize_before: + x = self.norm2(x) + x = residual + self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm2(x) + + fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + return x, mask, new_att_cache, fake_cnn_cache + + +class ConformerEncoderLayer(nn.Module): + """Encoder layer module. + Args: + size (int): Input dimension. + self_attn (torch.nn.Module): Self-attention module instance. + `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` + instance can be used as the argument. + feed_forward (torch.nn.Module): Feed-forward module instance. + `PositionwiseFeedForward` instance can be used as the argument. + feed_forward_macaron (torch.nn.Module): Additional feed-forward module + instance. + `PositionwiseFeedForward` instance can be used as the argument. + conv_module (torch.nn.Module): Convolution module instance. + `ConvlutionModule` instance can be used as the argument. + dropout_rate (float): Dropout rate. + normalize_before (bool): + True: use layer_norm before each sub-block. + False: use layer_norm after each sub-block. + """ + + def __init__( + self, + size: int, + self_attn: torch.nn.Module, + feed_forward: Optional[nn.Module] = None, + feed_forward_macaron: Optional[nn.Module] = None, + conv_module: Optional[nn.Module] = None, + dropout_rate: float = 0.1, + normalize_before: bool = True, + ): + """Construct an EncoderLayer object.""" + super().__init__() + self.self_attn = self_attn + self.feed_forward = feed_forward + self.feed_forward_macaron = feed_forward_macaron + self.conv_module = conv_module + self.norm_ff = nn.LayerNorm(size, eps=1e-5) # for the FNN module + self.norm_mha = nn.LayerNorm(size, eps=1e-5) # for the MHA module + if feed_forward_macaron is not None: + self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-5) + self.ff_scale = 0.5 + else: + self.ff_scale = 1.0 + if self.conv_module is not None: + self.norm_conv = nn.LayerNorm(size, eps=1e-5) # for the CNN module + self.norm_final = nn.LayerNorm( + size, eps=1e-5) # for the final output of the block + self.dropout = nn.Dropout(dropout_rate) + self.size = size + self.normalize_before = normalize_before + + def forward( + self, + x: torch.Tensor, + mask: torch.Tensor, + pos_emb: torch.Tensor, + mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), + att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)), + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Compute encoded features. + + Args: + x (torch.Tensor): (#batch, time, size) + mask (torch.Tensor): Mask tensor for the input (#batch, time,time), + (0, 0, 0) means fake mask. + pos_emb (torch.Tensor): positional encoding, must not be None + for ConformerEncoderLayer. + mask_pad (torch.Tensor): batch padding mask used for conv module. + (#batch, 1,time), (0, 0, 0) means fake mask. + att_cache (torch.Tensor): Cache tensor of the KEY & VALUE + (#batch=1, head, cache_t1, d_k * 2), head * d_k == size. + cnn_cache (torch.Tensor): Convolution cache in conformer layer + (#batch=1, size, cache_t2) + Returns: + torch.Tensor: Output tensor (#batch, time, size). + torch.Tensor: Mask tensor (#batch, time, time). + torch.Tensor: att_cache tensor, + (#batch=1, head, cache_t1 + time, d_k * 2). + torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2). + """ + + # whether to use macaron style + if self.feed_forward_macaron is not None: + residual = x + if self.normalize_before: + x = self.norm_ff_macaron(x) + x = residual + self.ff_scale * self.dropout( + self.feed_forward_macaron(x)) + if not self.normalize_before: + x = self.norm_ff_macaron(x) + + # multi-headed self-attention module + residual = x + if self.normalize_before: + x = self.norm_mha(x) + x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, + att_cache) + x = residual + self.dropout(x_att) + if not self.normalize_before: + x = self.norm_mha(x) + + # convolution module + # Fake new cnn cache here, and then change it in conv_module + new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device) + if self.conv_module is not None: + residual = x + if self.normalize_before: + x = self.norm_conv(x) + x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache) + x = residual + self.dropout(x) + + if not self.normalize_before: + x = self.norm_conv(x) + + # feed forward module + residual = x + if self.normalize_before: + x = self.norm_ff(x) + + x = residual + self.ff_scale * self.dropout(self.feed_forward(x)) + if not self.normalize_before: + x = self.norm_ff(x) + + if self.conv_module is not None: + x = self.norm_final(x) + + return x, mask, new_att_cache, new_cnn_cache diff --git a/cosyvoice/transformer/label_smoothing_loss.py b/cosyvoice/transformer/label_smoothing_loss.py new file mode 100644 index 0000000..feacabf --- /dev/null +++ b/cosyvoice/transformer/label_smoothing_loss.py @@ -0,0 +1,96 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Label smoothing module.""" + +import torch +from torch import nn + + +class LabelSmoothingLoss(nn.Module): + """Label-smoothing loss. + + In a standard CE loss, the label's data distribution is: + [0,1,2] -> + [ + [1.0, 0.0, 0.0], + [0.0, 1.0, 0.0], + [0.0, 0.0, 1.0], + ] + + In the smoothing version CE Loss,some probabilities + are taken from the true label prob (1.0) and are divided + among other labels. + + e.g. + smoothing=0.1 + [0,1,2] -> + [ + [0.9, 0.05, 0.05], + [0.05, 0.9, 0.05], + [0.05, 0.05, 0.9], + ] + + Args: + size (int): the number of class + padding_idx (int): padding class id which will be ignored for loss + smoothing (float): smoothing rate (0.0 means the conventional CE) + normalize_length (bool): + normalize loss by sequence length if True + normalize loss by batch size if False + """ + + def __init__(self, + size: int, + padding_idx: int, + smoothing: float, + normalize_length: bool = False): + """Construct an LabelSmoothingLoss object.""" + super(LabelSmoothingLoss, self).__init__() + self.criterion = nn.KLDivLoss(reduction="none") + self.padding_idx = padding_idx + self.confidence = 1.0 - smoothing + self.smoothing = smoothing + self.size = size + self.normalize_length = normalize_length + + def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: + """Compute loss between x and target. + + The model outputs and data labels tensors are flatten to + (batch*seqlen, class) shape and a mask is applied to the + padding part which should not be calculated for loss. + + Args: + x (torch.Tensor): prediction (batch, seqlen, class) + target (torch.Tensor): + target signal masked with self.padding_id (batch, seqlen) + Returns: + loss (torch.Tensor) : The KL loss, scalar float value + """ + assert x.size(2) == self.size + batch_size = x.size(0) + x = x.view(-1, self.size) + target = target.view(-1) + # use zeros_like instead of torch.no_grad() for true_dist, + # since no_grad() can not be exported by JIT + true_dist = torch.zeros_like(x) + true_dist.fill_(self.smoothing / (self.size - 1)) + ignore = target == self.padding_idx # (B,) + total = len(target) - ignore.sum().item() + target = target.masked_fill(ignore, 0) # avoid -1 index + true_dist.scatter_(1, target.unsqueeze(1), self.confidence) + kl = self.criterion(torch.log_softmax(x, dim=1), true_dist) + denom = total if self.normalize_length else batch_size + return kl.masked_fill(ignore.unsqueeze(1), 0).sum() / denom diff --git a/cosyvoice/transformer/positionwise_feed_forward.py b/cosyvoice/transformer/positionwise_feed_forward.py new file mode 100644 index 0000000..b7a2cf6 --- /dev/null +++ b/cosyvoice/transformer/positionwise_feed_forward.py @@ -0,0 +1,115 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Positionwise feed forward layer definition.""" + +import torch + + +class PositionwiseFeedForward(torch.nn.Module): + """Positionwise feed forward layer. + + FeedForward are appied on each position of the sequence. + The output dim is same with the input dim. + + Args: + idim (int): Input dimenstion. + hidden_units (int): The number of hidden units. + dropout_rate (float): Dropout rate. + activation (torch.nn.Module): Activation function + """ + + def __init__( + self, + idim: int, + hidden_units: int, + dropout_rate: float, + activation: torch.nn.Module = torch.nn.ReLU(), + ): + """Construct a PositionwiseFeedForward object.""" + super(PositionwiseFeedForward, self).__init__() + self.w_1 = torch.nn.Linear(idim, hidden_units) + self.activation = activation + self.dropout = torch.nn.Dropout(dropout_rate) + self.w_2 = torch.nn.Linear(hidden_units, idim) + + def forward(self, xs: torch.Tensor) -> torch.Tensor: + """Forward function. + + Args: + xs: input tensor (B, L, D) + Returns: + output tensor, (B, L, D) + """ + return self.w_2(self.dropout(self.activation(self.w_1(xs)))) + + +class MoEFFNLayer(torch.nn.Module): + """ + Mixture of expert with Positionwise feed forward layer + See also figure 1 in https://arxiv.org/pdf/2305.15663.pdf + The output dim is same with the input dim. + + Modified from https://github.com/Lightning-AI/lit-gpt/pull/823 + https://github.com/mistralai/mistral-src/blob/b46d6/moe_one_file_ref.py#L203-L219 + Args: + n_expert: number of expert. + n_expert_per_token: The actual number of experts used for each frame + idim (int): Input dimenstion. + hidden_units (int): The number of hidden units. + dropout_rate (float): Dropout rate. + activation (torch.nn.Module): Activation function + """ + + def __init__( + self, + n_expert: int, + n_expert_per_token: int, + idim: int, + hidden_units: int, + dropout_rate: float, + activation: torch.nn.Module = torch.nn.ReLU(), + ): + super(MoEFFNLayer, self).__init__() + self.gate = torch.nn.Linear(idim, n_expert, bias=False) + self.experts = torch.nn.ModuleList( + PositionwiseFeedForward(idim, hidden_units, dropout_rate, + activation) for _ in range(n_expert)) + self.n_expert_per_token = n_expert_per_token + + def forward(self, xs: torch.Tensor) -> torch.Tensor: + """Foward function. + Args: + xs: input tensor (B, L, D) + Returns: + output tensor, (B, L, D) + + """ + B, L, D = xs.size( + ) # batch size, sequence length, embedding dimension (idim) + xs = xs.view(-1, D) # (B*L, D) + router = self.gate(xs) # (B*L, n_expert) + logits, indices = torch.topk( + router, self.n_expert_per_token + ) # probs:(B*L, n_expert), indices: (B*L, n_expert) + weights = torch.nn.functional.softmax( + logits, dim=1, + dtype=torch.float).to(dtype=xs.dtype) # (B*L, n_expert_per_token) + output = torch.zeros_like(xs) # (B*L, D) + for i, expert in enumerate(self.experts): + mask = indices == i + batch_idx, ith_expert = torch.where(mask) + output[batch_idx] += weights[batch_idx, ith_expert, None] * expert( + xs[batch_idx]) + return output.view(B, L, D) diff --git a/cosyvoice/transformer/subsampling.py b/cosyvoice/transformer/subsampling.py new file mode 100644 index 0000000..e17c2e3 --- /dev/null +++ b/cosyvoice/transformer/subsampling.py @@ -0,0 +1,383 @@ +# Copyright (c) 2021 Mobvoi Inc (Binbin Zhang, Di Wu) +# 2024 Alibaba Inc (Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Subsampling layer definition.""" + +from typing import Tuple, Union + +import torch + + +class BaseSubsampling(torch.nn.Module): + + def __init__(self): + super().__init__() + self.right_context = 0 + self.subsampling_rate = 1 + + def position_encoding(self, offset: Union[int, torch.Tensor], + size: int) -> torch.Tensor: + return self.pos_enc.position_encoding(offset, size) + + +class EmbedinigNoSubsampling(BaseSubsampling): + """Embedding input without subsampling + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + super().__init__() + self.embed = torch.nn.Embedding(idim, odim) + self.pos_enc = pos_enc_class + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.embed(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask + + +class LinearNoSubsampling(BaseSubsampling): + """Linear transform the input without subsampling + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an linear object.""" + super().__init__() + self.out = torch.nn.Sequential( + torch.nn.Linear(idim, odim), + torch.nn.LayerNorm(odim, eps=1e-5), + torch.nn.Dropout(dropout_rate), + ) + self.pos_enc = pos_enc_class + self.right_context = 0 + self.subsampling_rate = 1 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.out(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask + + +class Conv1dSubsampling2(BaseSubsampling): + """Convolutional 1D subsampling (to 1/2 length). + It is designed for Whisper, ref: + https://github.com/openai/whisper/blob/main/whisper/model.py + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv1dSubsampling2 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv1d(idim, odim, kernel_size=3, padding=1), + torch.nn.GELU(), + torch.nn.Conv1d(odim, odim, kernel_size=3, stride=2, padding=1), + torch.nn.GELU(), + ) + self.pos_enc = pos_enc_class + # The right context for every conv layer is computed by: + # (kernel_size - 1) * frame_rate_of_this_layer + self.subsampling_rate = 2 + # 4 = (3 - 1) * 1 + (3 - 1) * 1 + self.right_context = 4 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 2. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 2. + torch.Tensor: positional encoding + + """ + time = x.size(1) + x = x.transpose(1, 2) # (b, f, t) + x = self.conv(x) + x = x.transpose(1, 2) # (b, t, f) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, (time + 1) % 2::2] + + +class Conv2dSubsampling4(BaseSubsampling): + """Convolutional 2D subsampling (to 1/4 length). + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv2dSubsampling4 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv2d(1, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 3, 2), + torch.nn.ReLU(), + ) + self.out = torch.nn.Sequential( + torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)) + self.pos_enc = pos_enc_class + # The right context for every conv layer is computed by: + # (kernel_size - 1) * frame_rate_of_this_layer + self.subsampling_rate = 4 + # 6 = (3 - 1) * 1 + (3 - 1) * 2 + self.right_context = 6 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 4. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 4. + torch.Tensor: positional encoding + + """ + x = x.unsqueeze(1) # (b, c=1, t, f) + x = self.conv(x) + b, c, t, f = x.size() + x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f)) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2] + + +class Conv2dSubsampling6(BaseSubsampling): + """Convolutional 2D subsampling (to 1/6 length). + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + pos_enc (torch.nn.Module): Custom position encoding layer. + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv2dSubsampling6 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv2d(1, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 5, 3), + torch.nn.ReLU(), + ) + self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), + odim) + self.pos_enc = pos_enc_class + # 10 = (3 - 1) * 1 + (5 - 1) * 2 + self.subsampling_rate = 6 + self.right_context = 10 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 6. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 6. + torch.Tensor: positional encoding + """ + x = x.unsqueeze(1) # (b, c, t, f) + x = self.conv(x) + b, c, t, f = x.size() + x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f)) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3] + + +class Conv2dSubsampling8(BaseSubsampling): + """Convolutional 2D subsampling (to 1/8 length). + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an Conv2dSubsampling8 object.""" + super().__init__() + self.conv = torch.nn.Sequential( + torch.nn.Conv2d(1, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 3, 2), + torch.nn.ReLU(), + torch.nn.Conv2d(odim, odim, 3, 2), + torch.nn.ReLU(), + ) + self.linear = torch.nn.Linear( + odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim) + self.pos_enc = pos_enc_class + self.subsampling_rate = 8 + # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4 + self.right_context = 14 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Subsample x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: Subsampled tensor (#batch, time', odim), + where time' = time // 8. + torch.Tensor: Subsampled mask (#batch, 1, time'), + where time' = time // 8. + torch.Tensor: positional encoding + """ + x = x.unsqueeze(1) # (b, c, t, f) + x = self.conv(x) + b, c, t, f = x.size() + x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f)) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2] + + +class LegacyLinearNoSubsampling(BaseSubsampling): + """Linear transform the input without subsampling + + Args: + idim (int): Input dimension. + odim (int): Output dimension. + dropout_rate (float): Dropout rate. + + """ + + def __init__(self, idim: int, odim: int, dropout_rate: float, + pos_enc_class: torch.nn.Module): + """Construct an linear object.""" + super().__init__() + self.out = torch.nn.Sequential( + torch.nn.Linear(idim, odim), + torch.nn.LayerNorm(odim, eps=1e-5), + torch.nn.Dropout(dropout_rate), + torch.nn.ReLU(), + ) + self.pos_enc = pos_enc_class + self.right_context = 0 + self.subsampling_rate = 1 + + def forward( + self, + x: torch.Tensor, + x_mask: torch.Tensor, + offset: Union[int, torch.Tensor] = 0 + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """Input x. + + Args: + x (torch.Tensor): Input tensor (#batch, time, idim). + x_mask (torch.Tensor): Input mask (#batch, 1, time). + + Returns: + torch.Tensor: linear input tensor (#batch, time', odim), + where time' = time . + torch.Tensor: linear input mask (#batch, 1, time'), + where time' = time . + + """ + x = self.out(x) + x, pos_emb = self.pos_enc(x, offset) + return x, pos_emb, x_mask diff --git a/cosyvoice/utils/__init__.py b/cosyvoice/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cosyvoice/utils/class_utils.py b/cosyvoice/utils/class_utils.py new file mode 100644 index 0000000..b8cc471 --- /dev/null +++ b/cosyvoice/utils/class_utils.py @@ -0,0 +1,70 @@ +# Copyright [2023-11-28] +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import torch + +from cosyvoice.transformer.activation import Swish +from cosyvoice.transformer.subsampling import ( + LinearNoSubsampling, + EmbedinigNoSubsampling, + Conv1dSubsampling2, + Conv2dSubsampling4, + Conv2dSubsampling6, + Conv2dSubsampling8, +) +from cosyvoice.transformer.embedding import (PositionalEncoding, + RelPositionalEncoding, + WhisperPositionalEncoding, + LearnablePositionalEncoding, + NoPositionalEncoding) +from cosyvoice.transformer.attention import (MultiHeadedAttention, + RelPositionMultiHeadedAttention) +from cosyvoice.transformer.embedding import EspnetRelPositionalEncoding +from cosyvoice.transformer.subsampling import LegacyLinearNoSubsampling + + +COSYVOICE_ACTIVATION_CLASSES = { + "hardtanh": torch.nn.Hardtanh, + "tanh": torch.nn.Tanh, + "relu": torch.nn.ReLU, + "selu": torch.nn.SELU, + "swish": getattr(torch.nn, "SiLU", Swish), + "gelu": torch.nn.GELU, +} + +COSYVOICE_SUBSAMPLE_CLASSES = { + "linear": LinearNoSubsampling, + "linear_legacy": LegacyLinearNoSubsampling, + "embed": EmbedinigNoSubsampling, + "conv1d2": Conv1dSubsampling2, + "conv2d": Conv2dSubsampling4, + "conv2d6": Conv2dSubsampling6, + "conv2d8": Conv2dSubsampling8, + 'paraformer_dummy': torch.nn.Identity +} + +COSYVOICE_EMB_CLASSES = { + "embed": PositionalEncoding, + "abs_pos": PositionalEncoding, + "rel_pos": RelPositionalEncoding, + "rel_pos_espnet": EspnetRelPositionalEncoding, + "no_pos": NoPositionalEncoding, + "abs_pos_whisper": WhisperPositionalEncoding, + "embed_learnable_pe": LearnablePositionalEncoding, +} + +COSYVOICE_ATTENTION_CLASSES = { + "selfattn": MultiHeadedAttention, + "rel_selfattn": RelPositionMultiHeadedAttention, +} diff --git a/cosyvoice/utils/common.py b/cosyvoice/utils/common.py new file mode 100644 index 0000000..73b438e --- /dev/null +++ b/cosyvoice/utils/common.py @@ -0,0 +1,93 @@ +# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +"""Unility functions for Transformer.""" + +from typing import List + +import torch + +IGNORE_ID = -1 + + +def pad_list(xs: List[torch.Tensor], pad_value: int): + """Perform padding for the list of tensors. + + Args: + xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)]. + pad_value (float): Value for padding. + + Returns: + Tensor: Padded tensor (B, Tmax, `*`). + + Examples: + >>> x = [torch.ones(4), torch.ones(2), torch.ones(1)] + >>> x + [tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])] + >>> pad_list(x, 0) + tensor([[1., 1., 1., 1.], + [1., 1., 0., 0.], + [1., 0., 0., 0.]]) + + """ + max_len = max([len(item) for item in xs]) + batchs = len(xs) + ndim = xs[0].ndim + if ndim == 1: + pad_res = torch.zeros(batchs, + max_len, + dtype=xs[0].dtype, + device=xs[0].device) + elif ndim == 2: + pad_res = torch.zeros(batchs, + max_len, + xs[0].shape[1], + dtype=xs[0].dtype, + device=xs[0].device) + elif ndim == 3: + pad_res = torch.zeros(batchs, + max_len, + xs[0].shape[1], + xs[0].shape[2], + dtype=xs[0].dtype, + device=xs[0].device) + else: + raise ValueError(f"Unsupported ndim: {ndim}") + pad_res.fill_(pad_value) + for i in range(batchs): + pad_res[i, :len(xs[i])] = xs[i] + return pad_res + + +def th_accuracy(pad_outputs: torch.Tensor, pad_targets: torch.Tensor, + ignore_label: int) -> torch.Tensor: + """Calculate accuracy. + + Args: + pad_outputs (Tensor): Prediction tensors (B * Lmax, D). + pad_targets (LongTensor): Target label tensors (B, Lmax). + ignore_label (int): Ignore label id. + + Returns: + torch.Tensor: Accuracy value (0.0 - 1.0). + + """ + pad_pred = pad_outputs.view(pad_targets.size(0), pad_targets.size(1), + pad_outputs.size(1)).argmax(2) + mask = pad_targets != ignore_label + numerator = torch.sum( + pad_pred.masked_select(mask) == pad_targets.masked_select(mask)) + denominator = torch.sum(mask) + return (numerator / denominator).detach() diff --git a/cosyvoice/utils/executor.py b/cosyvoice/utils/executor.py new file mode 100644 index 0000000..c12e52d --- /dev/null +++ b/cosyvoice/utils/executor.py @@ -0,0 +1,110 @@ +# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from contextlib import nullcontext +import os + +import torch +import torch.distributed as dist + +from cosyvoice.utils.train_utils import update_parameter_and_lr, log_per_step, log_per_save, batch_forward, batch_backward, save_model, cosyvoice_join + + +class Executor: + + def __init__(self): + self.step = 0 + self.epoch = 0 + self.rank = int(os.environ.get('RANK', 0)) + self.device = torch.device('cuda:{}'.format(self.rank)) + + def train_one_epoc(self, model, optimizer, scheduler, train_data_loader, cv_data_loader, writer, info_dict, group_join): + ''' Train one epoch + ''' + + lr = optimizer.param_groups[0]['lr'] + logging.info('Epoch {} TRAIN info lr {} rank {}'.format(self.epoch, lr, self.rank)) + logging.info('using accumulate grad, new batch size is {} times' + ' larger than before'.format(info_dict['accum_grad'])) + # A context manager to be used in conjunction with an instance of + # torch.nn.parallel.DistributedDataParallel to be able to train + # with uneven inputs across participating processes. + model.train() + model_context = model.join if info_dict['train_engine'] == 'torch_ddp' else nullcontext + with model_context(): + for batch_idx, batch_dict in enumerate(train_data_loader): + info_dict["tag"] = "TRAIN" + info_dict["step"] = self.step + info_dict["epoch"] = self.epoch + info_dict["batch_idx"] = batch_idx + if cosyvoice_join(group_join, info_dict): + break + + # Disable gradient synchronizations across DDP processes. + # Within this context, gradients will be accumulated on module + # variables, which will later be synchronized. + if info_dict['train_engine'] == 'torch_ddp' and (batch_idx + 1) % info_dict["accum_grad"] != 0: + context = model.no_sync + # Used for single gpu training and DDP gradient synchronization + # processes. + else: + context = nullcontext + + with context(): + info_dict = batch_forward(model, batch_dict, info_dict) + info_dict = batch_backward(model, info_dict) + + info_dict = update_parameter_and_lr(model, optimizer, scheduler, info_dict) + log_per_step(writer, info_dict) + # NOTE specify save_per_step in cosyvoice.yaml if you want to enable step save + if info_dict['save_per_step'] > 0 and (self.step + 1) % info_dict['save_per_step'] == 0 and (batch_idx + 1) % info_dict["accum_grad"] == 0: + dist.barrier() + self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=False) + model.train() + if (batch_idx + 1) % info_dict["accum_grad"] == 0: + self.step += 1 + dist.barrier() + self.cv(model, cv_data_loader, writer, info_dict, on_batch_end=True) + + @torch.inference_mode() + def cv(self, model, cv_data_loader, writer, info_dict, on_batch_end=True): + ''' Cross validation on + ''' + logging.info('Epoch {} Step {} on_batch_end {} CV rank {}'.format(self.epoch, self.step + 1, on_batch_end, self.rank)) + model.eval() + total_num_utts, total_loss_dict = 0, {} # avoid division by 0 + for batch_idx, batch_dict in enumerate(cv_data_loader): + info_dict["tag"] = "CV" + info_dict["step"] = self.step + info_dict["epoch"] = self.epoch + info_dict["batch_idx"] = batch_idx + + num_utts = len(batch_dict["utts"]) + total_num_utts += num_utts + + info_dict = batch_forward(model, batch_dict, info_dict) + + for k, v in info_dict['loss_dict'].items(): + if k not in total_loss_dict: + total_loss_dict[k] = [] + total_loss_dict[k].append(v.item() * num_utts) + log_per_step(None, info_dict) + for k, v in total_loss_dict.items(): + total_loss_dict[k] = sum(v) / total_num_utts + info_dict['loss_dict'] = total_loss_dict + log_per_save(writer, info_dict) + model_name = 'epoch_{}_whole'.format(self.epoch) if on_batch_end else 'epoch_{}_step_{}'.format(self.epoch, self.step + 1) + save_model(model, model_name, info_dict) diff --git a/cosyvoice/utils/file_utils.py b/cosyvoice/utils/file_utils.py new file mode 100644 index 0000000..92c448b --- /dev/null +++ b/cosyvoice/utils/file_utils.py @@ -0,0 +1,41 @@ +# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import torchaudio + + +def read_lists(list_file): + lists = [] + with open(list_file, 'r', encoding='utf8') as fin: + for line in fin: + lists.append(line.strip()) + return lists + +def read_json_lists(list_file): + lists = read_lists(list_file) + results = {} + for fn in lists: + with open(fn, 'r', encoding='utf8') as fin: + results.update(json.load(fin)) + return results + +def load_wav(wav, target_sr): + speech, sample_rate = torchaudio.load(wav) + speech = speech.mean(dim=0, keepdim=True) + if sample_rate != target_sr: + assert sample_rate > target_sr, 'wav sample rate {} must be greater than {}'.format(sample_rate, target_sr) + speech = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=target_sr)(speech) + return speech diff --git a/cosyvoice/utils/frontend_utils.py b/cosyvoice/utils/frontend_utils.py new file mode 100644 index 0000000..dee829f --- /dev/null +++ b/cosyvoice/utils/frontend_utils.py @@ -0,0 +1,120 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu, Zhihao Du) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +chinese_char_pattern = re.compile(r'[\u4e00-\u9fff]+') + +# whether contain chinese character +def contains_chinese(text): + return bool(chinese_char_pattern.search(text)) + + +# replace special symbol +def replace_corner_mark(text): + text = text.replace('²', '平方') + text = text.replace('³', '立方') + return text + + +# remove meaningless symbol +def remove_bracket(text): + text = text.replace('(', '').replace(')', '') + text = text.replace('【', '').replace('】', '') + text = text.replace('`', '').replace('`', '') + text = text.replace("——", " ") + return text + + +# spell Arabic numerals +def spell_out_number(text: str, inflect_parser): + new_text = [] + st = None + for i, c in enumerate(text): + if not c.isdigit(): + if st is not None: + num_str = inflect_parser.number_to_words(text[st: i]) + new_text.append(num_str) + st = None + new_text.append(c) + else: + if st is None: + st = i + if st is not None and st < len(text): + num_str = inflect_parser.number_to_words(text[st:]) + new_text.append(num_str) + return ''.join(new_text) + + +# split paragrah logic: +# 1. per sentence max len token_max_n, min len token_min_n, merge if last sentence len less than merge_len +# 2. cal sentence len according to lang +# 3. split sentence according to puncatation +def split_paragraph(text: str, tokenize, lang="zh", token_max_n=80, token_min_n=60, merge_len=20, comma_split=False): + def calc_utt_length(_text: str): + if lang == "zh": + return len(_text) + else: + return len(tokenize(_text)) + + def should_merge(_text: str): + if lang == "zh": + return len(_text) < merge_len + else: + return len(tokenize(_text)) < merge_len + + if lang == "zh": + pounc = ['。', '?', '!', ';', ':', '.', '?', '!', ';'] + else: + pounc = ['.', '?', '!', ';', ':'] + if comma_split: + pounc.extend([',', ',']) + st = 0 + utts = [] + for i, c in enumerate(text): + if c in pounc: + if len(text[st: i]) > 0: + utts.append(text[st: i] + c) + if i + 1 < len(text) and text[i + 1] in ['"', '”']: + tmp = utts.pop(-1) + utts.append(tmp + text[i + 1]) + st = i + 2 + else: + st = i + 1 + final_utts = [] + cur_utt = "" + for utt in utts: + if calc_utt_length(cur_utt + utt) > token_max_n and calc_utt_length(cur_utt) > token_min_n: + final_utts.append(cur_utt) + cur_utt = "" + cur_utt = cur_utt + utt + if len(cur_utt) > 0: + if should_merge(cur_utt) and len(final_utts) != 0: + final_utts[-1] = final_utts[-1] + cur_utt + else: + final_utts.append(cur_utt) + + return final_utts + + +# remove blank between chinese character +def replace_blank(text: str): + out_str = [] + for i, c in enumerate(text): + if c == " ": + if ((text[i + 1].isascii() and text[i + 1] != " ") and + (text[i - 1].isascii() and text[i - 1] != " ")): + out_str.append(c) + else: + out_str.append(c) + return "".join(out_str) diff --git a/cosyvoice/utils/mask.py b/cosyvoice/utils/mask.py new file mode 100644 index 0000000..2b460bb --- /dev/null +++ b/cosyvoice/utils/mask.py @@ -0,0 +1,227 @@ +# Copyright (c) 2019 Shigeki Karita +# 2020 Mobvoi Inc (Binbin Zhang) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import torch +''' +def subsequent_mask( + size: int, + device: torch.device = torch.device("cpu"), +) -> torch.Tensor: + """Create mask for subsequent steps (size, size). + + This mask is used only in decoder which works in an auto-regressive mode. + This means the current step could only do attention with its left steps. + + In encoder, fully attention is used when streaming is not necessary and + the sequence is not long. In this case, no attention mask is needed. + + When streaming is need, chunk-based attention is used in encoder. See + subsequent_chunk_mask for the chunk-based attention mask. + + Args: + size (int): size of mask + str device (str): "cpu" or "cuda" or torch.Tensor.device + dtype (torch.device): result dtype + + Returns: + torch.Tensor: mask + + Examples: + >>> subsequent_mask(3) + [[1, 0, 0], + [1, 1, 0], + [1, 1, 1]] + """ + ret = torch.ones(size, size, device=device, dtype=torch.bool) + return torch.tril(ret) +''' + + +def subsequent_mask( + size: int, + device: torch.device = torch.device("cpu"), +) -> torch.Tensor: + """Create mask for subsequent steps (size, size). + + This mask is used only in decoder which works in an auto-regressive mode. + This means the current step could only do attention with its left steps. + + In encoder, fully attention is used when streaming is not necessary and + the sequence is not long. In this case, no attention mask is needed. + + When streaming is need, chunk-based attention is used in encoder. See + subsequent_chunk_mask for the chunk-based attention mask. + + Args: + size (int): size of mask + str device (str): "cpu" or "cuda" or torch.Tensor.device + dtype (torch.device): result dtype + + Returns: + torch.Tensor: mask + + Examples: + >>> subsequent_mask(3) + [[1, 0, 0], + [1, 1, 0], + [1, 1, 1]] + """ + arange = torch.arange(size, device=device) + mask = arange.expand(size, size) + arange = arange.unsqueeze(-1) + mask = mask <= arange + return mask + + +def subsequent_chunk_mask( + size: int, + chunk_size: int, + num_left_chunks: int = -1, + device: torch.device = torch.device("cpu"), +) -> torch.Tensor: + """Create mask for subsequent steps (size, size) with chunk size, + this is for streaming encoder + + Args: + size (int): size of mask + chunk_size (int): size of chunk + num_left_chunks (int): number of left chunks + <0: use full chunk + >=0: use num_left_chunks + device (torch.device): "cpu" or "cuda" or torch.Tensor.device + + Returns: + torch.Tensor: mask + + Examples: + >>> subsequent_chunk_mask(4, 2) + [[1, 1, 0, 0], + [1, 1, 0, 0], + [1, 1, 1, 1], + [1, 1, 1, 1]] + """ + ret = torch.zeros(size, size, device=device, dtype=torch.bool) + for i in range(size): + if num_left_chunks < 0: + start = 0 + else: + start = max((i // chunk_size - num_left_chunks) * chunk_size, 0) + ending = min((i // chunk_size + 1) * chunk_size, size) + ret[i, start:ending] = True + return ret + + +def add_optional_chunk_mask(xs: torch.Tensor, + masks: torch.Tensor, + use_dynamic_chunk: bool, + use_dynamic_left_chunk: bool, + decoding_chunk_size: int, + static_chunk_size: int, + num_decoding_left_chunks: int, + enable_full_context: bool = True): + """ Apply optional mask for encoder. + + Args: + xs (torch.Tensor): padded input, (B, L, D), L for max length + mask (torch.Tensor): mask for xs, (B, 1, L) + use_dynamic_chunk (bool): whether to use dynamic chunk or not + use_dynamic_left_chunk (bool): whether to use dynamic left chunk for + training. + decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's + 0: default for training, use random dynamic chunk. + <0: for decoding, use full chunk. + >0: for decoding, use fixed chunk size as set. + static_chunk_size (int): chunk size for static chunk training/decoding + if it's greater than 0, if use_dynamic_chunk is true, + this parameter will be ignored + num_decoding_left_chunks: number of left chunks, this is for decoding, + the chunk size is decoding_chunk_size. + >=0: use num_decoding_left_chunks + <0: use all left chunks + enable_full_context (bool): + True: chunk size is either [1, 25] or full context(max_len) + False: chunk size ~ U[1, 25] + + Returns: + torch.Tensor: chunk mask of the input xs. + """ + # Whether to use chunk mask or not + if use_dynamic_chunk: + max_len = xs.size(1) + if decoding_chunk_size < 0: + chunk_size = max_len + num_left_chunks = -1 + elif decoding_chunk_size > 0: + chunk_size = decoding_chunk_size + num_left_chunks = num_decoding_left_chunks + else: + # chunk size is either [1, 25] or full context(max_len). + # Since we use 4 times subsampling and allow up to 1s(100 frames) + # delay, the maximum frame is 100 / 4 = 25. + chunk_size = torch.randint(1, max_len, (1, )).item() + num_left_chunks = -1 + if chunk_size > max_len // 2 and enable_full_context: + chunk_size = max_len + else: + chunk_size = chunk_size % 25 + 1 + if use_dynamic_left_chunk: + max_left_chunks = (max_len - 1) // chunk_size + num_left_chunks = torch.randint(0, max_left_chunks, + (1, )).item() + chunk_masks = subsequent_chunk_mask(xs.size(1), chunk_size, + num_left_chunks, + xs.device) # (L, L) + chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L) + chunk_masks = masks & chunk_masks # (B, L, L) + elif static_chunk_size > 0: + num_left_chunks = num_decoding_left_chunks + chunk_masks = subsequent_chunk_mask(xs.size(1), static_chunk_size, + num_left_chunks, + xs.device) # (L, L) + chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L) + chunk_masks = masks & chunk_masks # (B, L, L) + else: + chunk_masks = masks + return chunk_masks + + +def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor: + """Make mask tensor containing indices of padded part. + + See description of make_non_pad_mask. + + Args: + lengths (torch.Tensor): Batch of lengths (B,). + Returns: + torch.Tensor: Mask tensor containing indices of padded part. + + Examples: + >>> lengths = [5, 3, 2] + >>> make_pad_mask(lengths) + masks = [[0, 0, 0, 0 ,0], + [0, 0, 0, 1, 1], + [0, 0, 1, 1, 1]] + """ + batch_size = lengths.size(0) + max_len = max_len if max_len > 0 else lengths.max().item() + seq_range = torch.arange(0, + max_len, + dtype=torch.int64, + device=lengths.device) + seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) + seq_length_expand = lengths.unsqueeze(-1) + mask = seq_range_expand >= seq_length_expand + return mask diff --git a/cosyvoice/utils/scheduler.py b/cosyvoice/utils/scheduler.py new file mode 100644 index 0000000..eed1ea0 --- /dev/null +++ b/cosyvoice/utils/scheduler.py @@ -0,0 +1,717 @@ +# Copyright (c) 2020 Mobvoi Inc (Binbin Zhang) +# 2022 Ximalaya Inc (Yuguang Yang) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# Modified from ESPnet(https://github.com/espnet/espnet) +# NeMo(https://github.com/NVIDIA/NeMo) + +from typing import Union + +import math +import warnings +import torch +from torch.optim.lr_scheduler import _LRScheduler + + +class WarmupLR(_LRScheduler): + """The WarmupLR scheduler + + This scheduler is almost same as NoamLR Scheduler except for following + difference: + + NoamLR: + lr = optimizer.lr * model_size ** -0.5 + * min(step ** -0.5, step * warmup_step ** -1.5) + WarmupLR: + lr = optimizer.lr * warmup_step ** 0.5 + * min(step ** -0.5, step * warmup_step ** -1.5) + + Note that the maximum lr equals to optimizer.lr in this scheduler. + + """ + + def __init__( + self, + optimizer: torch.optim.Optimizer, + warmup_steps: Union[int, float] = 25000, + last_epoch: int = -1, + ): + self.warmup_steps = warmup_steps + + # __init__() must be invoked before setting field + # because step() is also invoked in __init__() + super().__init__(optimizer, last_epoch) + + def __repr__(self): + return f"{self.__class__.__name__}(warmup_steps={self.warmup_steps})" + + def get_lr(self): + step_num = self.last_epoch + 1 + if self.warmup_steps == 0: + return [lr * step_num**-0.5 for lr in self.base_lrs] + else: + return [ + lr * self.warmup_steps**0.5 * + min(step_num**-0.5, step_num * self.warmup_steps**-1.5) + for lr in self.base_lrs + ] + + def set_step(self, step: int): + self.last_epoch = step + + +class WarmupPolicy(_LRScheduler): + """Adds warmup kwargs and warmup logic to lr policy. + All arguments should be passed as kwargs for clarity, + Args: + warmup_steps: Number of training steps in warmup stage + warmup_ratio: Ratio of warmup steps to total steps + max_steps: Total number of steps while training or `None` for + infinite training + """ + + def __init__(self, + optimizer, + *, + warmup_steps=None, + warmup_ratio=None, + max_steps=None, + min_lr=0.0, + last_epoch=-1): + assert not (warmup_steps is not None and warmup_ratio is not None),\ + "Either use particular number of step or ratio" + assert warmup_ratio is None or max_steps is not None, \ + "If there is a ratio, there should be a total steps" + + # It is necessary to assign all attributes *before* __init__, + # as class is wrapped by an inner class. + self.max_steps = max_steps + if warmup_steps is not None: + self.warmup_steps = warmup_steps + elif warmup_ratio is not None: + self.warmup_steps = int(warmup_ratio * max_steps) + else: + self.warmup_steps = 0 + + self.min_lr = min_lr + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed " + "by the scheduler, please use `get_last_lr()`.", + UserWarning, + stacklevel=2) + + step = self.last_epoch + + if step <= self.warmup_steps and self.warmup_steps > 0: + return self._get_warmup_lr(step) + + if step > self.max_steps: + return [self.min_lr for _ in self.base_lrs] + + return self._get_lr(step) + + def _get_warmup_lr(self, step): + lr_val = (step + 1) / (self.warmup_steps + 1) + return [initial_lr * lr_val for initial_lr in self.base_lrs] + + def _get_lr(self, step): + """Simple const lr policy""" + return self.base_lrs + + +class SquareRootConstantPolicy(_LRScheduler): + """Adds warmup kwargs and warmup logic to lr policy. + All arguments should be passed as kwargs for clarity, + Args: + warmup_steps: Number of training steps in warmup stage + warmup_ratio: Ratio of warmup steps to total steps + max_steps: Total number of steps while training or `None` for + infinite training + """ + + def __init__(self, + optimizer, + *, + constant_steps=None, + constant_ratio=None, + max_steps=None, + min_lr=0.0, + last_epoch=-1): + assert not (constant_steps is not None + and constant_ratio is not None), \ + "Either use particular number of step or ratio" + assert constant_ratio is None or max_steps is not None, \ + "If there is a ratio, there should be a total steps" + + # It is necessary to assign all attributes *before* __init__, + # as class is wrapped by an inner class. + self.max_steps = max_steps + if constant_steps is not None: + self.constant_steps = constant_steps + elif constant_ratio is not None: + self.constant_steps = int(constant_ratio * max_steps) + else: + self.constant_steps = 0 + + self.constant_lr = 1 / (constant_steps**0.5) + self.min_lr = min_lr + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed " + "by the scheduler, please use `get_last_lr()`.", + UserWarning, + stacklevel=2) + + step = self.last_epoch + + if step <= self.constant_steps: + return [self.constant_lr for _ in self.base_lrs] + + if step > self.max_steps: + return [self.min_lr for _ in self.base_lrs] + + return self._get_lr(step) + + def _get_lr(self, step): + """Simple const lr policy""" + return self.base_lrs + + +class WarmupHoldPolicy(WarmupPolicy): + """Variant of WarmupPolicy which maintains high + learning rate for a defined number of steps. + All arguments should be passed as kwargs for clarity, + Args: + warmup_steps: Number of training steps in warmup stage + warmup_ratio: Ratio of warmup steps to total steps + hold_steps: Number of training steps to + hold the learning rate after warm up + hold_ratio: Ratio of hold steps to total steps + max_steps: Total number of steps while training or `None` for + infinite training + """ + + def __init__( + self, + optimizer, + *, + warmup_steps=None, + warmup_ratio=None, + hold_steps=None, + hold_ratio=None, + max_steps=None, + min_lr=0.0, + last_epoch=-1, + ): + assert not (hold_steps is not None and hold_ratio is not None), \ + "Either use particular number of step or ratio" + assert hold_ratio is None or max_steps is not None, \ + "If there is a ratio, there should be a total steps" + + self.min_lr = min_lr + self._last_warmup_lr = 0.0 + + # Necessary to duplicate as class attributes are hidden in inner class + self.max_steps = max_steps + if warmup_steps is not None: + self.warmup_steps = warmup_steps + elif warmup_ratio is not None: + self.warmup_steps = int(warmup_ratio * max_steps) + else: + self.warmup_steps = 0 + + if hold_steps is not None: + self.hold_steps = hold_steps + self.warmup_steps + elif hold_ratio is not None: + self.hold_steps = int(hold_ratio * max_steps) + self.warmup_steps + else: + self.hold_steps = 0 + + super().__init__( + optimizer, + warmup_steps=warmup_steps, + warmup_ratio=warmup_ratio, + max_steps=max_steps, + last_epoch=last_epoch, + min_lr=min_lr, + ) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed by the scheduler," + " " + "please use `get_last_lr()`.", + UserWarning, + stacklevel=2) + + step = self.last_epoch + + # Warmup phase + if step <= self.warmup_steps and self.warmup_steps > 0: + return self._get_warmup_lr(step) + + # Hold phase + if (step >= self.warmup_steps) and (step < self.hold_steps): + return self.base_lrs + + if step > self.max_steps: + return [self.min_lr for _ in self.base_lrs] + + return self._get_lr(step) + + +class WarmupAnnealHoldPolicy(_LRScheduler): + """Adds warmup kwargs and warmup logic to lr policy. + All arguments should be passed as kwargs for clarity, + Args: + warmup_steps: Number of training steps in warmup stage + warmup_ratio: Ratio of warmup steps to total steps + max_steps: Total number of steps while training or `None` for + infinite training + min_lr: Minimum lr to hold the learning rate after decay at. + constant_steps: Number of steps to keep lr constant at. + constant_ratio: Ratio of steps to keep lr constant. + """ + + def __init__( + self, + optimizer, + *, + warmup_steps=None, + warmup_ratio=None, + constant_steps=None, + constant_ratio=None, + max_steps=None, + min_lr=0.0, + last_epoch=-1, + ): + assert not (warmup_steps is not None + and warmup_ratio is not None), \ + "Either use particular number of step or ratio" + assert not (constant_steps is not None + and constant_ratio is not None), \ + "Either use constant_steps or constant_ratio" + assert warmup_ratio is None or max_steps is not None, \ + "If there is a ratio, there should be a total steps" + + # It is necessary to assign all attributes *before* __init__, + # as class is wrapped by an inner class. + self.max_steps = max_steps + + if warmup_steps is not None: + self.warmup_steps = warmup_steps + elif warmup_ratio is not None: + self.warmup_steps = int(warmup_ratio * max_steps) + else: + self.warmup_steps = 0 + + if constant_steps is not None: + self.constant_steps = constant_steps + elif constant_ratio is not None: + self.constant_steps = int(constant_ratio * max_steps) + else: + self.constant_steps = 0 + + self.decay_steps = max_steps - (self.constant_steps + + self.warmup_steps) + + self.min_lr = min_lr + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed " + "by the scheduler, please use `get_last_lr()`.", + UserWarning, + stacklevel=2) + + step = self.last_epoch + + # Warmup steps + if self.warmup_steps > 0 and step <= self.warmup_steps: + return self._get_warmup_lr(step) + + # Constant steps after warmup and decay + if self.constant_steps > 0 and ( + self.warmup_steps + self.decay_steps) < step <= self.max_steps: + return self._get_constant_lr(step) + + # Min lr after max steps of updates + if step > self.max_steps: + return [self.min_lr for _ in self.base_lrs] + + return self._get_lr(step) + + def _get_warmup_lr(self, step): + lr_val = (step + 1) / (self.warmup_steps + 1) + return [initial_lr * lr_val for initial_lr in self.base_lrs] + + def _get_constant_lr(self, step): + return [self.min_lr for _ in self.base_lrs] + + def _get_lr(self, step): + """Simple const lr policy""" + return self.base_lrs + + +def _squareroot_annealing(initial_lr, step, max_steps, min_lr): + mult = ((max_steps - step) / max_steps)**0.5 + out_lr = initial_lr * mult + out_lr = max(out_lr, min_lr) + return out_lr + + +def _square_annealing(initial_lr, step, max_steps, min_lr): + mult = ((max_steps - step) / max_steps)**2 + out_lr = initial_lr * mult + out_lr = max(out_lr, min_lr) + return out_lr + + +def _cosine_annealing(initial_lr, step, max_steps, min_lr): + mult = 0.5 * (1 + math.cos(math.pi * step / max_steps)) + out_lr = (initial_lr - min_lr) * mult + min_lr + return out_lr + + +def _linear_warmup_with_cosine_annealing(max_lr, warmup_steps, step, + decay_steps, min_lr): + assert max_lr > min_lr + # Use linear warmup for the initial part. + if warmup_steps > 0 and step <= warmup_steps: + return max_lr * float(step) / float(warmup_steps) + + # For any steps larger than `decay_steps`, use `min_lr`. + if step > warmup_steps + decay_steps: + return min_lr + + # If we are done with the warmup period, use the decay style. + num_steps_ = step - warmup_steps + decay_steps_ = decay_steps + decay_ratio = float(num_steps_) / float(decay_steps_) + assert decay_ratio >= 0.0 + assert decay_ratio <= 1.0 + delta_lr = max_lr - min_lr + + coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0) + + return min_lr + coeff * delta_lr + + +def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle): + if cycle: + multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps) + decay_steps *= multiplier + else: + step = min(step, decay_steps) + p = step / decay_steps + lr = (initial_lr - min_lr) * math.pow(1.0 - p, power) + lr += min_lr + return lr + + +def _noam_hold_annealing(initial_lr, step, warmup_steps, hold_steps, + decay_rate, min_lr): + # hold_steps = total number of steps + # to hold the LR, not the warmup + hold steps. + T_warmup_decay = max(1, warmup_steps**decay_rate) + T_hold_decay = max(1, (step - hold_steps)**decay_rate) + lr = (initial_lr * T_warmup_decay) / T_hold_decay + lr = max(lr, min_lr) + return lr + + +class SquareAnnealing(WarmupPolicy): + + def __init__(self, + optimizer, + *, + max_steps, + min_lr=1e-5, + last_epoch=-1, + **kwargs): + super().__init__(optimizer=optimizer, + max_steps=max_steps, + last_epoch=last_epoch, + min_lr=min_lr, + **kwargs) + + def _get_lr(self, step): + new_lrs = [ + _square_annealing( + initial_lr=initial_lr, + step=step - self.warmup_steps, + max_steps=self.max_steps - self.warmup_steps, + min_lr=self.min_lr, + ) for initial_lr in self.base_lrs + ] + return new_lrs + + +class SquareRootAnnealing(WarmupPolicy): + + def __init__(self, + optimizer, + *, + max_steps, + min_lr=0, + last_epoch=-1, + **kwargs): + super().__init__(optimizer=optimizer, + max_steps=max_steps, + last_epoch=last_epoch, + min_lr=min_lr, + **kwargs) + + def _get_lr(self, step): + new_lrs = [ + _squareroot_annealing(initial_lr=initial_lr, + step=step, + max_steps=self.max_steps, + min_lr=self.min_lr) + for initial_lr in self.base_lrs + ] + return new_lrs + + +class CosineAnnealing(WarmupAnnealHoldPolicy): + + def __init__(self, + optimizer, + *, + max_steps, + min_lr=0, + last_epoch=-1, + **kwargs): + super().__init__(optimizer=optimizer, + max_steps=max_steps, + last_epoch=last_epoch, + min_lr=min_lr, + **kwargs) + + def _get_lr(self, step): + for initial_lr in self.base_lrs: + if initial_lr < self.min_lr: + raise ValueError( + f"{self} received an initial learning rate " + f"that was lower than the minimum learning rate.") + + if self.constant_steps is None or self.constant_steps == 0: + new_lrs = [ + _cosine_annealing( + initial_lr=initial_lr, + step=step - self.warmup_steps, + max_steps=self.max_steps - self.warmup_steps, + min_lr=self.min_lr, + ) for initial_lr in self.base_lrs + ] + else: + new_lrs = self._get_linear_warmup_with_cosine_annealing_lr(step) + return new_lrs + + def _get_warmup_lr(self, step): + if self.constant_steps is None or self.constant_steps == 0: + return super()._get_warmup_lr(step) + else: + # Use linear warmup for the initial part. + return self._get_linear_warmup_with_cosine_annealing_lr(step) + + def _get_constant_lr(self, step): + # Only called when `constant_steps` > 0. + return self._get_linear_warmup_with_cosine_annealing_lr(step) + + def _get_linear_warmup_with_cosine_annealing_lr(self, step): + # Cosine Schedule for Megatron LM, + # slightly different warmup schedule + constant LR at the end. + new_lrs = [ + _linear_warmup_with_cosine_annealing( + max_lr=self.base_lrs[0], + warmup_steps=self.warmup_steps, + step=step, + decay_steps=self.decay_steps, + min_lr=self.min_lr, + ) for _ in self.base_lrs + ] + return new_lrs + + +class NoamAnnealing(_LRScheduler): + + def __init__(self, + optimizer, + *, + d_model, + warmup_steps=None, + warmup_ratio=None, + max_steps=None, + min_lr=0.0, + last_epoch=-1): + self._normalize = d_model**(-0.5) + assert not (warmup_steps is not None + and warmup_ratio is not None), \ + "Either use particular number of step or ratio" + assert warmup_ratio is None or max_steps is not None, \ + "If there is a ratio, there should be a total steps" + + # It is necessary to assign all attributes *before* __init__, + # as class is wrapped by an inner class. + self.max_steps = max_steps + if warmup_steps is not None: + self.warmup_steps = warmup_steps + elif warmup_ratio is not None: + self.warmup_steps = int(warmup_ratio * max_steps) + else: + self.warmup_steps = 0 + + self.min_lr = min_lr + super().__init__(optimizer, last_epoch) + + def get_lr(self): + if not self._get_lr_called_within_step: + warnings.warn( + "To get the last learning rate computed " + "by the scheduler, please use `get_last_lr()`.", + UserWarning, + stacklevel=2) + + step = max(1, self.last_epoch) + + for initial_lr in self.base_lrs: + if initial_lr < self.min_lr: + raise ValueError( + f"{self} received an initial learning rate " + f"that was lower than the minimum learning rate.") + + new_lrs = [ + self._noam_annealing(initial_lr=initial_lr, step=step) + for initial_lr in self.base_lrs + ] + return new_lrs + + def _noam_annealing(self, initial_lr, step): + if self.warmup_steps > 0: + mult = self._normalize * min(step**(-0.5), + step * (self.warmup_steps**(-1.5))) + else: + mult = self._normalize * step**(-0.5) + + out_lr = initial_lr * mult + if step > self.warmup_steps: + out_lr = max(out_lr, self.min_lr) + return out_lr + + +class NoamHoldAnnealing(WarmupHoldPolicy): + + def __init__(self, + optimizer, + *, + max_steps, + decay_rate=0.5, + min_lr=0.0, + last_epoch=-1, + **kwargs): + """ + From Nemo: + Implementation of the Noam Hold Annealing policy + from the SqueezeFormer paper. + + Unlike NoamAnnealing, the peak learning rate + can be explicitly set for this scheduler. + The schedule first performs linear warmup, + then holds the peak LR, then decays with some schedule for + the remainder of the steps. + Therefore the min-lr is still dependent + on the hyper parameters selected. + + It's schedule is determined by three factors- + + Warmup Steps: Initial stage, where linear warmup + occurs uptil the peak LR is reached. Unlike NoamAnnealing, + the peak LR is explicitly stated here instead of a scaling factor. + + Hold Steps: Intermediate stage, where the peak LR + is maintained for some number of steps. In this region, + the high peak LR allows the model to converge faster + if training is stable. However the high LR + may also cause instability during training. + Should usually be a significant fraction of training + steps (around 30-40% of the entire training steps). + + Decay Steps: Final stage, where the LR rapidly decays + with some scaling rate (set by decay rate). + To attain Noam decay, use 0.5, + for Squeezeformer recommended decay, use 1.0. + The fast decay after prolonged high LR during + hold phase allows for rapid convergence. + + References: + - [Squeezeformer: + An Efficient Transformer for Automatic Speech Recognition] + (https://arxiv.org/abs/2206.00888) + + Args: + optimizer: Pytorch compatible Optimizer object. + warmup_steps: Number of training steps in warmup stage + warmup_ratio: Ratio of warmup steps to total steps + hold_steps: Number of training steps to + hold the learning rate after warm up + hold_ratio: Ratio of hold steps to total steps + max_steps: Total number of steps while training or `None` for + infinite training + decay_rate: Float value describing the polynomial decay + after the hold period. Default value + of 0.5 corresponds to Noam decay. + min_lr: Minimum learning rate. + """ + self.decay_rate = decay_rate + super().__init__(optimizer=optimizer, + max_steps=max_steps, + last_epoch=last_epoch, + min_lr=min_lr, + **kwargs) + + def _get_lr(self, step): + if self.warmup_steps is None or self.warmup_steps == 0: + raise ValueError( + "Noam scheduler cannot be used without warmup steps") + + if self.hold_steps > 0: + hold_steps = self.hold_steps - self.warmup_steps + else: + hold_steps = 0 + + new_lrs = [ + _noam_hold_annealing( + initial_lr, + step=step, + warmup_steps=self.warmup_steps, + hold_steps=hold_steps, + decay_rate=self.decay_rate, + min_lr=self.min_lr, + ) for initial_lr in self.base_lrs + ] + return new_lrs + + def set_step(self, step: int): + self.last_epoch = step diff --git a/cosyvoice/utils/train_utils.py b/cosyvoice/utils/train_utils.py new file mode 100644 index 0000000..df3a321 --- /dev/null +++ b/cosyvoice/utils/train_utils.py @@ -0,0 +1,286 @@ +# Copyright (c) 2021 Mobvoi Inc. (authors: Binbin Zhang) +# 2023 Horizon Inc. (authors: Xingchen Song) +# 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from contextlib import nullcontext +import logging +import os +import torch +import json +import re +import datetime +import yaml + +import deepspeed +import torch.optim as optim +import torch.distributed as dist + +from torch.utils.tensorboard import SummaryWriter +from torch.utils.data import DataLoader +from torch.nn.utils import clip_grad_norm_ + +from deepspeed.runtime.zero.stage_1_and_2 import estimate_zero2_model_states_mem_needs_all_live + +from cosyvoice.dataset.dataset import Dataset +from cosyvoice.utils.scheduler import WarmupLR, NoamHoldAnnealing + + +def init_distributed(args): + world_size = int(os.environ.get('WORLD_SIZE', 1)) + local_rank = int(os.environ.get('LOCAL_RANK', 0)) + rank = int(os.environ.get('RANK', 0)) + logging.info('training on multiple gpus, this gpu {}'.format(local_rank) + + ', rank {}, world_size {}'.format(rank, world_size)) + if args.train_engine == 'torch_ddp': + torch.cuda.set_device(local_rank) + dist.init_process_group(args.dist_backend) + else: + deepspeed.init_distributed(dist_backend=args.dist_backend) + return world_size, local_rank, rank + + +def init_dataset_and_dataloader(args, configs): + train_dataset = Dataset(args.train_data, data_pipeline=configs['data_pipeline'], mode='train', shuffle=True, partition=True) + cv_dataset = Dataset(args.cv_data, data_pipeline=configs['data_pipeline'], mode='train', shuffle=False, partition=False) + + # do not use persistent_workers=True, as whisper tokenizer opens tiktoken file each time when the for loop starts + train_data_loader = DataLoader(train_dataset, + batch_size=None, + pin_memory=args.pin_memory, + num_workers=args.num_workers, + prefetch_factor=args.prefetch) + cv_data_loader = DataLoader(cv_dataset, + batch_size=None, + pin_memory=args.pin_memory, + num_workers=args.num_workers, + prefetch_factor=args.prefetch) + return train_dataset, cv_dataset, train_data_loader, cv_data_loader + + + +def check_modify_and_save_config(args, configs): + if args.train_engine == "torch_ddp": + configs['train_conf']["dtype"] = 'fp32' + else: + with open(args.deepspeed_config, 'r') as fin: + ds_configs = json.load(fin) + if "fp16" in ds_configs and ds_configs["fp16"]["enabled"]: + configs['train_conf']["dtype"] = "fp16" + elif "bf16" in ds_configs and ds_configs["bf16"]["enabled"]: + configs['train_conf']["dtype"] = "bf16" + else: + configs['train_conf']["dtype"] = "fp32" + assert ds_configs["train_micro_batch_size_per_gpu"] == 1 + # if use deepspeed, override ddp config + configs['train_conf']['save_per_step'] = int(configs['train_conf']['save_per_step'] * configs['train_conf']['accum_grad'] / ds_configs["gradient_accumulation_steps"]) + configs['train_conf']['accum_grad'] = ds_configs["gradient_accumulation_steps"] + configs['train_conf']['grad_clip'] = ds_configs["gradient_clipping"] + configs['train_conf']['log_interval'] = ds_configs["steps_per_print"] + return configs + + +def wrap_cuda_model(args, model): + local_world_size = int(os.environ.get('LOCAL_WORLD_SIZE', 1)) + world_size = int(os.environ.get('WORLD_SIZE', 1)) + if args.train_engine == "torch_ddp": # native pytorch ddp + assert (torch.cuda.is_available()) + model.cuda() + model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) + else: + if int(os.environ.get('RANK', 0)) == 0: + logging.info("Estimating model states memory needs (zero2)...") + estimate_zero2_model_states_mem_needs_all_live( + model, + num_gpus_per_node=local_world_size, + num_nodes=world_size // local_world_size) + return model + + +def init_optimizer_and_scheduler(args, configs, model): + if configs['train_conf']['optim'] == 'adam': + optimizer = optim.Adam(model.parameters(), **configs['train_conf']['optim_conf']) + elif configs['train_conf']['optim'] == 'adamw': + optimizer = optim.AdamW(model.parameters(), **configs['train_conf']['optim_conf']) + else: + raise ValueError("unknown optimizer: " + configs['train_conf']) + + if configs['train_conf']['scheduler'] == 'warmuplr': + scheduler_type = WarmupLR + scheduler = WarmupLR(optimizer, **configs['train_conf']['scheduler_conf']) + elif configs['train_conf']['scheduler'] == 'NoamHoldAnnealing': + scheduler_type = NoamHoldAnnealing + scheduler = NoamHoldAnnealing(optimizer, **configs['train_conf']['scheduler_conf']) + else: + raise ValueError("unknown scheduler: " + configs['train_conf']) + + # use deepspeed optimizer for speedup + if args.train_engine == "deepspeed": + def scheduler(opt): + return scheduler_type(opt, **configs['train_conf']['scheduler_conf']) + model, optimizer, _, scheduler = deepspeed.initialize( + args=args, + model=model, + optimizer=None, + lr_scheduler=scheduler, + model_parameters=model.parameters()) + + return model, optimizer, scheduler + + +def init_summarywriter(args): + writer = None + if int(os.environ.get('RANK', 0)) == 0: + os.makedirs(args.model_dir, exist_ok=True) + writer = SummaryWriter(args.tensorboard_dir) + return writer + + +def save_model(model, model_name, info_dict): + rank = int(os.environ.get('RANK', 0)) + model_dir = info_dict["model_dir"] + save_model_path = os.path.join(model_dir, '{}.pt'.format(model_name)) + + if info_dict["train_engine"] == "torch_ddp": + if rank == 0: + torch.save(model.module.state_dict(), save_model_path) + else: + with torch.no_grad(): + model.save_checkpoint(save_dir=model_dir, + tag=model_name, + client_state=info_dict) + if rank == 0: + info_path = re.sub('.pt$', '.yaml', save_model_path) + info_dict['save_time'] = datetime.datetime.now().strftime('%d/%m/%Y %H:%M:%S') + with open(info_path, 'w') as fout: + data = yaml.dump(info_dict) + fout.write(data) + logging.info('[Rank {}] Checkpoint: save to checkpoint {}'.format(rank, save_model_path)) + + +def cosyvoice_join(group_join, info_dict): + world_size = int(os.environ.get('WORLD_SIZE', 1)) + local_rank = int(os.environ.get('LOCAL_RANK', 0)) + rank = int(os.environ.get('RANK', 0)) + + if info_dict["batch_idx"] != 0: + # we try to join all rank in both ddp and deepspeed mode, in case different rank has different lr + try: + dist.monitored_barrier(group=group_join, + timeout=group_join.options._timeout) + return False + except RuntimeError as e: + logging.info("Detected uneven workload distribution: {}\n".format(e) + + "Break current worker to manually join all workers, " + + "world_size {}, current rank {}, current local_rank {}\n". + format(world_size, rank, local_rank)) + return True + else: + return False + + +def batch_forward(model, batch, info_dict): + device = int(os.environ.get('LOCAL_RANK', 0)) + + dtype = info_dict["dtype"] + if dtype == "fp16": + dtype = torch.float16 + elif dtype == "bf16": + dtype = torch.bfloat16 + else: # fp32 + dtype = torch.float32 + + if info_dict['train_engine'] == 'torch_ddp': + autocast = nullcontext() + else: + autocast = torch.cuda.amp.autocast(enabled=True, dtype=dtype, cache_enabled=False) + + with autocast: + info_dict['loss_dict'] = model(batch, device) + return info_dict + + +def batch_backward(model, info_dict): + if info_dict["train_engine"] == "deepspeed": + scaled_loss = model.backward(info_dict['loss_dict']['loss']) + else: + scaled_loss = info_dict['loss_dict']['loss'] / info_dict['accum_grad'] + scaled_loss.backward() + + info_dict['loss_dict']['loss'] = scaled_loss + return info_dict + + +def update_parameter_and_lr(model, optimizer, scheduler, info_dict): + grad_norm = 0.0 + if info_dict['train_engine'] == "deepspeed": + info_dict["is_gradient_accumulation_boundary"] = model.is_gradient_accumulation_boundary() + model.step() + grad_norm = model.get_global_grad_norm() + elif (info_dict['batch_idx'] + 1) % info_dict["accum_grad"] == 0: + grad_norm = clip_grad_norm_(model.parameters(), info_dict['grad_clip']) + if torch.isfinite(grad_norm): + optimizer.step() + optimizer.zero_grad() + scheduler.step() + info_dict["lr"] = optimizer.param_groups[0]['lr'] + info_dict["grad_norm"] = grad_norm + return info_dict + + +def log_per_step(writer, info_dict): + tag = info_dict["tag"] + epoch = info_dict.get('epoch', 0) + step = info_dict["step"] + batch_idx = info_dict["batch_idx"] + loss_dict = info_dict['loss_dict'] + rank = int(os.environ.get('RANK', 0)) + + # only rank 0 write to tensorboard to avoid multi-process write + if writer is not None: + if (info_dict['train_engine'] == 'deepspeed' and info_dict['is_gradient_accumulation_boundary'] is True) or \ + (info_dict['train_engine'] == 'torch_ddp' and (info_dict['batch_idx'] + 1) % info_dict['accum_grad'] == 0): + for k in ['epoch', 'lr', 'grad_norm']: + writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1) + for k, v in loss_dict.items(): + writer.add_scalar('{}/{}'.format(tag, k), v, step + 1) + + # TRAIN & CV, Shell log (stdout) + if (info_dict['batch_idx'] + 1) % info_dict['log_interval'] == 0: + log_str = '{} Batch {}/{} '.format(tag, epoch, batch_idx + 1) + for name, value in loss_dict.items(): + log_str += '{} {:.6f} '.format(name, value) + if tag == "TRAIN": + log_str += 'lr {:.8f} grad_norm {:.6f}'.format( + info_dict["lr"], info_dict['grad_norm']) + log_str += ' rank {}'.format(rank) + logging.debug(log_str) + + +def log_per_save(writer, info_dict): + tag = info_dict["tag"] + epoch = info_dict["epoch"] + step = info_dict["step"] + loss_dict = info_dict["loss_dict"] + lr = info_dict['lr'] + rank = int(os.environ.get('RANK', 0)) + logging.info( + 'Epoch {} Step {} CV info lr {} {} rank {}'.format( + epoch, step + 1, lr, rank, ' '.join(['{}_{}'.format(k, v) for k, v in loss_dict.items()]))) + + if writer is not None: + for k in ['epoch', 'lr']: + writer.add_scalar('{}/{}'.format(tag, k), info_dict[k], step + 1) + for k, v in loss_dict.items(): + writer.add_scalar('{}/{}'.format(tag, k), v, step + 1) diff --git a/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml b/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml new file mode 100644 index 0000000..10206e6 --- /dev/null +++ b/examples/libritts/cosyvoice/conf/cosyvoice.fromscratch.yaml @@ -0,0 +1,197 @@ +# set random seed, so that you may reproduce your result. +__set_seed1: !apply:random.seed [1986] +__set_seed2: !apply:numpy.random.seed [1986] +__set_seed3: !apply:torch.manual_seed [1986] +__set_seed4: !apply:torch.cuda.manual_seed_all [1986] + +# fixed params +sample_rate: 22050 +text_encoder_input_size: 512 +llm_input_size: 1024 +llm_output_size: 1024 +spk_embed_dim: 192 + +# model params +# for all class/function included in this repo, we use ! or ! for intialization, so that user may find all corresponding class/function according to one single yaml. +# for system/third_party class/function, we do not require this. +llm: !new:cosyvoice.llm.llm.TransformerLM + text_encoder_input_size: !ref + llm_input_size: !ref + llm_output_size: !ref + text_token_size: 51866 + speech_token_size: 4096 + length_normalized_loss: True + lsm_weight: 0 + spk_embed_dim: !ref + text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder + input_size: !ref + output_size: 1024 + attention_heads: 8 + linear_units: 2048 + num_blocks: 3 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0 + normalize_before: True + input_layer: 'linear' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + use_cnn_module: False + macaron_style: False + use_dynamic_chunk: False + use_dynamic_left_chunk: False + static_chunk_size: 1 + llm: !new:cosyvoice.transformer.encoder.TransformerEncoder + input_size: !ref + output_size: !ref + attention_heads: 8 + linear_units: 2048 + num_blocks: 7 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0 + input_layer: 'linear_legacy' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + static_chunk_size: 1 + +flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec + input_size: 512 + output_size: 80 + spk_embed_dim: !ref + output_type: 'mel' + vocab_size: 4096 + input_frame_rate: 50 + only_mask_loss: True + encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder + output_size: 512 + attention_heads: 8 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.1 + normalize_before: True + input_layer: 'linear' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + input_size: 512 + use_cnn_module: False + macaron_style: False + length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator + channels: 80 + sampling_ratios: [1, 1, 1, 1] + decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM + in_channels: 240 + n_spks: 1 + spk_emb_dim: 80 + cfm_params: !new:omegaconf.DictConfig + content: + sigma_min: 1e-06 + solver: 'euler' + t_scheduler: 'cosine' + training_cfg_rate: 0.2 + inference_cfg_rate: 0.7 + reg_loss_type: 'l1' + estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder + in_channels: 320 + out_channels: 80 + channels: [256, 256] + dropout: 0 + attention_head_dim: 64 + n_blocks: 4 + num_mid_blocks: 12 + num_heads: 8 + act_fn: 'gelu' + +hift: !new:cosyvoice.hifigan.generator.HiFTGenerator + in_channels: 80 + base_channels: 512 + nb_harmonics: 8 + sampling_rate: !ref + nsf_alpha: 0.1 + nsf_sigma: 0.003 + nsf_voiced_threshold: 10 + upsample_rates: [8, 8] + upsample_kernel_sizes: [16, 16] + istft_params: + n_fft: 16 + hop_len: 4 + resblock_kernel_sizes: [3, 7, 11] + resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + source_resblock_kernel_sizes: [7, 11] + source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]] + lrelu_slope: 0.1 + audio_limit: 0.99 + f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor + num_class: 1 + in_channels: 80 + cond_channels: 512 + +# processor functions +parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener +get_tokenizer: !name:whisper.tokenizer.get_tokenizer + multilingual: True + num_languages: 100 + language: 'en' + task: 'transcribe' +allowed_special: 'all' +tokenize: !name:cosyvoice.dataset.processor.tokenize + get_tokenizer: !ref + allowed_special: !ref +filter: !name:cosyvoice.dataset.processor.filter + max_length: 40960 + min_length: 0 + token_max_length: 200 + token_min_length: 1 +resample: !name:cosyvoice.dataset.processor.resample + resample_rate: !ref +feat_extractor: !name:matcha.utils.audio.mel_spectrogram + n_fft: 1024 + num_mels: 80 + sampling_rate: !ref + hop_size: 256 + win_size: 1024 + fmin: 0 + fmax: 8000 + center: False +compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank + feat_extractor: !ref +parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding + normalize: True +shuffle: !name:cosyvoice.dataset.processor.shuffle + shuffle_size: 1000 +sort: !name:cosyvoice.dataset.processor.sort + sort_size: 500 # sort_size should be less than shuffle_size +batch: !name:cosyvoice.dataset.processor.batch + batch_type: 'dynamic' + max_frames_in_batch: 12000 +padding: !name:cosyvoice.dataset.processor.padding + +# dataset processor pipeline +data_pipeline: [ + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , +] + +# train conf +train_conf: + optim: adam + optim_conf: + lr: 0.002 # change to 0.001 if you want to train flow from scratch + scheduler: warmuplr + scheduler_conf: + warmup_steps: 25000 + max_epoch: 200 + grad_clip: 5 + accum_grad: 2 + log_interval: 100 + save_per_step: -1 \ No newline at end of file diff --git a/examples/libritts/cosyvoice/conf/cosyvoice.yaml b/examples/libritts/cosyvoice/conf/cosyvoice.yaml new file mode 100644 index 0000000..cc5eee0 --- /dev/null +++ b/examples/libritts/cosyvoice/conf/cosyvoice.yaml @@ -0,0 +1,197 @@ +# set random seed, so that you may reproduce your result. +__set_seed1: !apply:random.seed [1986] +__set_seed2: !apply:numpy.random.seed [1986] +__set_seed3: !apply:torch.manual_seed [1986] +__set_seed4: !apply:torch.cuda.manual_seed_all [1986] + +# fixed params +sample_rate: 22050 +text_encoder_input_size: 512 +llm_input_size: 1024 +llm_output_size: 1024 +spk_embed_dim: 192 + +# model params +# for all class/function included in this repo, we use ! or ! for intialization, so that user may find all corresponding class/function according to one single yaml. +# for system/third_party class/function, we do not require this. +llm: !new:cosyvoice.llm.llm.TransformerLM + text_encoder_input_size: !ref + llm_input_size: !ref + llm_output_size: !ref + text_token_size: 51866 + speech_token_size: 4096 + length_normalized_loss: True + lsm_weight: 0 + spk_embed_dim: !ref + text_encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder + input_size: !ref + output_size: 1024 + attention_heads: 16 + linear_units: 4096 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0 + normalize_before: True + input_layer: 'linear' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + use_cnn_module: False + macaron_style: False + use_dynamic_chunk: False + use_dynamic_left_chunk: False + static_chunk_size: 1 + llm: !new:cosyvoice.transformer.encoder.TransformerEncoder + input_size: !ref + output_size: !ref + attention_heads: 16 + linear_units: 4096 + num_blocks: 14 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0 + input_layer: 'linear_legacy' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + static_chunk_size: 1 + +flow: !new:cosyvoice.flow.flow.MaskedDiffWithXvec + input_size: 512 + output_size: 80 + spk_embed_dim: !ref + output_type: 'mel' + vocab_size: 4096 + input_frame_rate: 50 + only_mask_loss: True + encoder: !new:cosyvoice.transformer.encoder.ConformerEncoder + output_size: 512 + attention_heads: 8 + linear_units: 2048 + num_blocks: 6 + dropout_rate: 0.1 + positional_dropout_rate: 0.1 + attention_dropout_rate: 0.1 + normalize_before: True + input_layer: 'linear' + pos_enc_layer_type: 'rel_pos_espnet' + selfattention_layer_type: 'rel_selfattn' + input_size: 512 + use_cnn_module: False + macaron_style: False + length_regulator: !new:cosyvoice.flow.length_regulator.InterpolateRegulator + channels: 80 + sampling_ratios: [1, 1, 1, 1] + decoder: !new:cosyvoice.flow.flow_matching.ConditionalCFM + in_channels: 240 + n_spks: 1 + spk_emb_dim: 80 + cfm_params: !new:omegaconf.DictConfig + content: + sigma_min: 1e-06 + solver: 'euler' + t_scheduler: 'cosine' + training_cfg_rate: 0.2 + inference_cfg_rate: 0.7 + reg_loss_type: 'l1' + estimator: !new:cosyvoice.flow.decoder.ConditionalDecoder + in_channels: 320 + out_channels: 80 + channels: [256, 256] + dropout: 0 + attention_head_dim: 64 + n_blocks: 4 + num_mid_blocks: 12 + num_heads: 8 + act_fn: 'gelu' + +hift: !new:cosyvoice.hifigan.generator.HiFTGenerator + in_channels: 80 + base_channels: 512 + nb_harmonics: 8 + sampling_rate: !ref + nsf_alpha: 0.1 + nsf_sigma: 0.003 + nsf_voiced_threshold: 10 + upsample_rates: [8, 8] + upsample_kernel_sizes: [16, 16] + istft_params: + n_fft: 16 + hop_len: 4 + resblock_kernel_sizes: [3, 7, 11] + resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5], [1, 3, 5]] + source_resblock_kernel_sizes: [7, 11] + source_resblock_dilation_sizes: [[1, 3, 5], [1, 3, 5]] + lrelu_slope: 0.1 + audio_limit: 0.99 + f0_predictor: !new:cosyvoice.hifigan.f0_predictor.ConvRNNF0Predictor + num_class: 1 + in_channels: 80 + cond_channels: 512 + +# processor functions +parquet_opener: !name:cosyvoice.dataset.processor.parquet_opener +get_tokenizer: !name:whisper.tokenizer.get_tokenizer + multilingual: True + num_languages: 100 + language: 'en' + task: 'transcribe' +allowed_special: 'all' +tokenize: !name:cosyvoice.dataset.processor.tokenize + get_tokenizer: !ref + allowed_special: !ref +filter: !name:cosyvoice.dataset.processor.filter + max_length: 40960 + min_length: 0 + token_max_length: 200 + token_min_length: 1 +resample: !name:cosyvoice.dataset.processor.resample + resample_rate: !ref +feat_extractor: !name:matcha.utils.audio.mel_spectrogram + n_fft: 1024 + num_mels: 80 + sampling_rate: !ref + hop_size: 256 + win_size: 1024 + fmin: 0 + fmax: 8000 + center: False +compute_fbank: !name:cosyvoice.dataset.processor.compute_fbank + feat_extractor: !ref +parse_embedding: !name:cosyvoice.dataset.processor.parse_embedding + normalize: True +shuffle: !name:cosyvoice.dataset.processor.shuffle + shuffle_size: 1000 +sort: !name:cosyvoice.dataset.processor.sort + sort_size: 500 # sort_size should be less than shuffle_size +batch: !name:cosyvoice.dataset.processor.batch + batch_type: 'dynamic' + max_frames_in_batch: 2000 +padding: !name:cosyvoice.dataset.processor.padding + +# dataset processor pipeline +data_pipeline: [ + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , + !ref , +] + +# train conf +train_conf: + optim: adam + optim_conf: + lr: 0.001 + scheduler: warmuplr + scheduler_conf: + warmup_steps: 2500 + max_epoch: 200 + grad_clip: 5 + accum_grad: 2 + log_interval: 100 + save_per_step: -1 \ No newline at end of file diff --git a/examples/libritts/cosyvoice/conf/ds_stage2.json b/examples/libritts/cosyvoice/conf/ds_stage2.json new file mode 100644 index 0000000..2b2de3d --- /dev/null +++ b/examples/libritts/cosyvoice/conf/ds_stage2.json @@ -0,0 +1,42 @@ +{ + "train_micro_batch_size_per_gpu": 1, + "gradient_accumulation_steps": 1, + "steps_per_print": 100, + "gradient_clipping": 5, + "fp16": { + "enabled": false, + "auto_cast": false, + "loss_scale": 0, + "initial_scale_power": 16, + "loss_scale_window": 256, + "hysteresis": 2, + "consecutive_hysteresis": false, + "min_loss_scale": 1 + }, + "bf16": { + "enabled": false + }, + "zero_force_ds_cpu_optimizer": false, + "zero_optimization": { + "stage": 2, + "offload_optimizer": { + "device": "none", + "pin_memory": true + }, + "allgather_partitions": true, + "allgather_bucket_size": 5e8, + "overlap_comm": false, + "reduce_scatter": true, + "reduce_bucket_size": 5e8, + "contiguous_gradients" : true + }, + "optimizer": { + "type": "AdamW", + "params": { + "lr": 0.001, + "weight_decay": 0.0001, + "torch_adam": true, + "adam_w_mode": true + } + } +} \ No newline at end of file diff --git a/examples/libritts/cosyvoice/cosyvoice b/examples/libritts/cosyvoice/cosyvoice new file mode 100644 index 0000000..3903806 --- /dev/null +++ b/examples/libritts/cosyvoice/cosyvoice @@ -0,0 +1 @@ +../../../cosyvoice \ No newline at end of file diff --git a/examples/libritts/cosyvoice/local/download_and_untar.sh b/examples/libritts/cosyvoice/local/download_and_untar.sh new file mode 100644 index 0000000..cd32fb6 --- /dev/null +++ b/examples/libritts/cosyvoice/local/download_and_untar.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +# Copyright 2014 Johns Hopkins University (author: Daniel Povey) +# Apache 2.0 + +remove_archive=false + +if [ "$1" == --remove-archive ]; then + remove_archive=true + shift +fi + +if [ $# -ne 3 ]; then + echo "Usage: $0 [--remove-archive] " + echo "e.g.: $0 /export/a15/vpanayotov/data www.openslr.org/resources/11 dev-clean" + echo "With --remove-archive it will remove the archive after successfully un-tarring it." + echo " can be one of: dev-clean, test-clean, dev-other, test-other," + echo " train-clean-100, train-clean-360, train-other-500." + exit 1 +fi + +data=$1 +url=$2 +part=$3 + +if [ ! -d "$data" ]; then + echo "$0: no such directory $data" + exit 1 +fi + +part_ok=false +list="dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500" +for x in $list; do + if [ "$part" == $x ]; then part_ok=true; fi +done +if ! $part_ok; then + echo "$0: expected to be one of $list, but got '$part'" + exit 1 +fi + +if [ -z "$url" ]; then + echo "$0: empty URL base." + exit 1 +fi + +if [ -f $data/LibriSpeech/$part/.complete ]; then + echo "$0: data part $part was already successfully extracted, nothing to do." + exit 0 +fi + + +# sizes of the archive files in bytes. This is some older versions. +sizes_old="371012589 347390293 379743611 361838298 6420417880 23082659865 30626749128" +# sizes_new is the archive file sizes of the final release. Some of these sizes are of +# things we probably won't download. +sizes_new="337926286 314305928 695964615 297279345 87960560420 33373768 346663984 328757843 6387309499 23049477885 30593501606" + +if [ -f $data/$part.tar.gz ]; then + size=$(/bin/ls -l $data/$part.tar.gz | awk '{print $5}') + size_ok=false + for s in $sizes_old $sizes_new; do if [ $s == $size ]; then size_ok=true; fi; done + if ! $size_ok; then + echo "$0: removing existing file $data/$part.tar.gz because its size in bytes $size" + echo "does not equal the size of one of the archives." + rm $data/$part.tar.gz + else + echo "$data/$part.tar.gz exists and appears to be complete." + fi +fi + +if [ ! -f $data/$part.tar.gz ]; then + if ! which wget >/dev/null; then + echo "$0: wget is not installed." + exit 1 + fi + full_url=$url/$part.tar.gz + echo "$0: downloading data from $full_url. This may take some time, please be patient." + + if ! wget -P $data --no-check-certificate $full_url; then + echo "$0: error executing wget $full_url" + exit 1 + fi +fi + +if ! tar -C $data -xvzf $data/$part.tar.gz; then + echo "$0: error un-tarring archive $data/$part.tar.gz" + exit 1 +fi + +touch $data/LibriSpeech/$part/.complete + +echo "$0: Successfully downloaded and un-tarred $data/$part.tar.gz" + +if $remove_archive; then + echo "$0: removing $data/$part.tar.gz file since --remove-archive option was supplied." + rm $data/$part.tar.gz +fi diff --git a/examples/libritts/cosyvoice/local/prepare_data.py b/examples/libritts/cosyvoice/local/prepare_data.py new file mode 100644 index 0000000..9248226 --- /dev/null +++ b/examples/libritts/cosyvoice/local/prepare_data.py @@ -0,0 +1,51 @@ +import argparse +import logging +import glob +import os +from tqdm import tqdm + + +logger = logging.getLogger() + +def main(): + wavs = list(glob.glob('{}/*/*/*wav'.format(args.src_dir))) + + utt2wav, utt2text, utt2spk, spk2utt = {}, {}, {}, {} + for wav in tqdm(wavs): + txt = wav.replace('.wav', '.normalized.txt') + if not os.path.exists(txt): + logger.warning('{} do not exsist'.format(txt)) + continue + with open(txt) as f: + content = ''.join(l.replace('\n', '') for l in f.readline()) + utt = os.path.basename(wav).replace('.wav', '') + spk = utt.split('_')[0] + utt2wav[utt] = wav + utt2text[utt] = content + utt2spk[utt] = spk + if spk not in spk2utt: + spk2utt[spk] = [] + spk2utt[spk].append(utt) + + with open('{}/wav.scp'.format(args.des_dir), 'w') as f: + for k, v in utt2wav.items(): + f.write('{} {}\n'.format(k, v)) + with open('{}/text'.format(args.des_dir), 'w') as f: + for k, v in utt2text.items(): + f.write('{} {}\n'.format(k, v)) + with open('{}/utt2spk'.format(args.des_dir), 'w') as f: + for k, v in utt2spk.items(): + f.write('{} {}\n'.format(k, v)) + with open('{}/spk2utt'.format(args.des_dir), 'w') as f: + for k, v in spk2utt.items(): + f.write('{} {}\n'.format(k, ' '.join(v))) + return + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--src_dir', + type=str) + parser.add_argument('--des_dir', + type=str) + args = parser.parse_args() + main() diff --git a/examples/libritts/cosyvoice/path.sh b/examples/libritts/cosyvoice/path.sh new file mode 100644 index 0000000..513f4eb --- /dev/null +++ b/examples/libritts/cosyvoice/path.sh @@ -0,0 +1,3 @@ +# NOTE(kan-bayashi): Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=../../../:../../../third_party/AcademiCodec:../../../third_party/Matcha-TTS:$PYTHONPATH diff --git a/examples/libritts/cosyvoice/run.sh b/examples/libritts/cosyvoice/run.sh new file mode 100644 index 0000000..96eca9b --- /dev/null +++ b/examples/libritts/cosyvoice/run.sh @@ -0,0 +1,105 @@ +#!/bin/bash +# Copyright 2024 Alibaba Inc. All Rights Reserved. +. ./path.sh || exit 1; + +stage=-1 +stop_stage=3 + +data_url=www.openslr.org/resources/60 +data_dir=/mnt/lyuxiang.lx/data/tts/openslr/libritts +pretrained_model_dir=../../../pretrained_models/CosyVoice-300M + +if [ ${stage} -le -1 ] && [ ${stop_stage} -ge -1 ]; then + echo "Data Download" + for part in dev-clean test-clean dev-other test-other train-clean-100 train-clean-360 train-other-500; do + local/download_and_untar.sh ${data_dir} ${data_url} ${part} + done +fi + +if [ ${stage} -le 0 ] && [ ${stop_stage} -ge 0 ]; then + echo "Data preparation, prepare wav.scp/text/utt2spk/spk2utt" + for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do + mkdir -p data/$x + python local/prepare_data.py --src_dir $data_dir/LibriTTS/$x --des_dir data/$x + done +fi + +if [ ${stage} -le 1 ] && [ ${stop_stage} -ge 1 ]; then + echo "Extract campplus speaker embedding, you will get spk2embedding.pt and utt2embedding.pt in data/$x dir" + for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do + tools/extract_embedding.py --dir data/$x \ + --onnx_path $pretrained_model_dir/campplus.onnx + done +fi + +if [ ${stage} -le 2 ] && [ ${stop_stage} -ge 2 ]; then + echo "Extract discrete speech token, you will get utt2speech_token.pt in data/$x dir" + for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do + tools/extract_speech_token.py --dir data/$x \ + --onnx_path $pretrained_model_dir/speech_tokenizer_v1.onnx + done +fi + +if [ ${stage} -le 3 ] && [ ${stop_stage} -ge 3 ]; then + echo "Prepare required parquet format data, you should have prepared wav.scp/text/utt2spk/spk2utt/utt2embedding.pt/spk2embedding.pt/utt2speech_token.pt" + for x in train-clean-100 train-clean-360 train-other-500 dev-clean dev-other test-clean test-other; do + mkdir -p data/$x/parquet + tools/make_parquet_list.py --num_utts_per_parquet 1000 \ + --num_processes 10 \ + --src_dir data/$x \ + --des_dir data/$x/parquet + done +fi + +# inference +if [ ${stage} -le 4 ] && [ ${stop_stage} -ge 4 ]; then + echo "Run inference. Please make sure utt in tts_text is in prompt_data" + for mode in sft zero_shot; do + python cosyvoice/bin/inference.py --mode $mode \ + --gpu 0 \ + --config conf/cosyvoice.yaml \ + --prompt_data data/test-clean/parquet/data.list \ + --prompt_utt2data data/test-clean/parquet/utt2data.list \ + --tts_text `pwd`/tts_text.json \ + --llm_model $pretrained_model_dir/llm.pt \ + --flow_model $pretrained_model_dir/flow.pt \ + --hifigan_model $pretrained_model_dir/hift.pt \ + --result_dir `pwd`/exp/cosyvoice/test-clean/$mode + done +fi + +# train llm +export CUDA_VISIBLE_DEVICES="0,1,2,3" +num_gpus=$(echo $CUDA_VISIBLE_DEVICES | awk -F "," '{print NF}') +job_id=1986 +dist_backend="nccl" +num_workers=2 +prefetch=100 +train_engine=torch_ddp +if [ ${stage} -le 5 ] && [ ${stop_stage} -ge 5 ]; then + echo "Run train. We only support llm traning for now. If your want to train from scratch, please use conf/cosyvoice.fromscratch.yaml" + if [ $train_engine == 'deepspeed' ]; then + echo "Notice deepspeed has its own optimizer config. Modify conf/ds_stage2.json if necessary" + fi + cat data/{train-clean-100,train-clean-360,train-other-500}/parquet/data.list > data/train.data.list + cat data/{dev-clean,dev-other}/parquet/data.list > data/dev.data.list + for model in llm; do + torchrun --nnodes=1 --nproc_per_node=$num_gpus \ + --rdzv_id=$job_id --rdzv_backend="c10d" --rdzv_endpoint="localhost:0" \ + cosyvoice/bin/train.py \ + --train_engine $train_engine \ + --config conf/cosyvoice.yaml \ + --train_data data/train.data.list \ + --cv_data data/dev.data.list \ + --model $model \ + --checkpoint $pretrained_model_dir/$model.pt \ + --model_dir `pwd`/exp/cosyvoice/$model/$train_engine \ + --tensorboard_dir `pwd`/tensorboard/cosyvoice/$model/$train_engine \ + --ddp.dist_backend $dist_backend \ + --num_workers ${num_workers} \ + --prefetch ${prefetch} \ + --pin_memory \ + --deepspeed_config ./conf/ds_stage2.json \ + --deepspeed.save_states model+optimizer + done +fi \ No newline at end of file diff --git a/examples/libritts/cosyvoice/tools b/examples/libritts/cosyvoice/tools new file mode 100644 index 0000000..c92f417 --- /dev/null +++ b/examples/libritts/cosyvoice/tools @@ -0,0 +1 @@ +../../../tools \ No newline at end of file diff --git a/examples/libritts/cosyvoice/tts_text.json b/examples/libritts/cosyvoice/tts_text.json new file mode 100644 index 0000000..9f3e8d9 --- /dev/null +++ b/examples/libritts/cosyvoice/tts_text.json @@ -0,0 +1,5 @@ +{ + "1089_134686_000002_000000": [ + "hello, my name is Jack. What is your name?" + ] +} \ No newline at end of file diff --git a/gpu_diagnostics.py b/gpu_diagnostics.py new file mode 100644 index 0000000..24aa5c4 --- /dev/null +++ b/gpu_diagnostics.py @@ -0,0 +1,24 @@ +import torch + +def gpu_diagnostics(): + if torch.cuda.is_available(): + print("GPU 诊断报告:") + print("="*40) + for i in range(torch.cuda.device_count()): + props = torch.cuda.get_device_properties(i) + total_memory = props.total_memory / (1024 * 1024) + reserved_memory = torch.cuda.memory_reserved(i) / (1024 * 1024) + allocated_memory = torch.cuda.memory_allocated(i) / (1024 * 1024) + free_memory = total_memory - allocated_memory + + print(f"GPU {i}: {props.name}") + print(f" 总显存 : {round(total_memory, 2)} MB") + print(f" 已保留显存 : {round(reserved_memory, 2)} MB") + print(f" 已分配显存 : {round(allocated_memory, 2)} MB") + print(f" 空闲显存 : {round(free_memory, 2)} MB") + print("="*40) + else: + print("未找到 GPU,使用 CPU") + +if __name__ == "__main__": + gpu_diagnostics() diff --git a/hf_download/hub/version_diffusers_cache.txt b/hf_download/hub/version_diffusers_cache.txt new file mode 100644 index 0000000..56a6051 --- /dev/null +++ b/hf_download/hub/version_diffusers_cache.txt @@ -0,0 +1 @@ +1 \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ae52d46 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,24 @@ +conformer==0.3.2 +diffusers==0.27.2 +gdown==5.1.0 +gradio==4.32.2 +grpcio==1.57.0 +grpcio-tools==1.57.0 +hydra-core==1.3.2 +HyperPyYAML==1.2.2 +inflect==7.3.1 +librosa==0.10.2 +lightning==2.2.4 +matplotlib==3.7.5 +modelscope==1.15.0 +networkx==3.1 +omegaconf==2.3.0 +onnxruntime-gpu==1.16.0 +openai-whisper +protobuf==4.25 +pydantic==2.7.0 +rich==13.7.1 +soundfile==0.12.1 +tensorboard==2.14.0 +wget==3.2 +PySoundFile \ No newline at end of file diff --git a/runtime/python/Dockerfile b/runtime/python/Dockerfile new file mode 100644 index 0000000..a9a43a5 --- /dev/null +++ b/runtime/python/Dockerfile @@ -0,0 +1,12 @@ +FROM nvidia/cuda:11.8.0-cudnn8-devel-ubuntu20.04 +ENV DEBIAN_FRONTEND=noninteractive + +WORKDIR /opt/CosyVoice + +RUN sed -i s@/archive.ubuntu.com/@/mirrors.aliyun.com/@g /etc/apt/sources.list +RUN apt-get update -y +RUN apt-get -y install python3-dev cmake python3-pip git +RUN git clone --recursive https://github.com/FunAudioLLM/CosyVoice.git +RUN cd CosyVoice && pip3 install -r requirements.txt -i https://mirrors.aliyun.com/pypi/simple/ --trusted-host=mirrors.aliyun.com +RUN cd CosyVoice/runtime/python && python3 -m grpc_tools.protoc -I. --python_out=. --grpc_python_out=. cosyvoice.proto +CMD ["/bin/bash", "-c", "cd /opt/CosyVoice/CosyVoice/runtime/python && . ./path/sh && python3 server.py --port 50000 --max_conc 4 --model_dir speech_tts/CosyVoice-300M && sleep infinity"] \ No newline at end of file diff --git a/runtime/python/client.py b/runtime/python/client.py new file mode 100644 index 0000000..d7d30c8 --- /dev/null +++ b/runtime/python/client.py @@ -0,0 +1,103 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append('{}/../..'.format(ROOT_DIR)) +sys.path.append('{}/../../third_party/AcademiCodec'.format(ROOT_DIR)) +sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR)) +import logging +import argparse +import torchaudio +import cosyvoice_pb2 +import cosyvoice_pb2_grpc +import grpc +import torch +import numpy as np +from cosyvoice.utils.file_utils import load_wav + + +def main(): + with grpc.insecure_channel("{}:{}".format(args.host, args.port)) as channel: + stub = cosyvoice_pb2_grpc.CosyVoiceStub(channel) + request = cosyvoice_pb2.Request() + if args.mode == 'sft': + logging.info('send sft request') + sft_request = cosyvoice_pb2.sftRequest() + sft_request.spk_id = args.spk_id + sft_request.tts_text = args.tts_text + request.sft_request.CopyFrom(sft_request) + elif args.mode == 'zero_shot': + logging.info('send zero_shot request') + zero_shot_request = cosyvoice_pb2.zeroshotRequest() + zero_shot_request.tts_text = args.tts_text + zero_shot_request.prompt_text = args.prompt_text + prompt_speech = load_wav(args.prompt_wav, 16000) + zero_shot_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes() + request.zero_shot_request.CopyFrom(zero_shot_request) + elif args.mode == 'cross_lingual': + logging.info('send cross_lingual request') + cross_lingual_request = cosyvoice_pb2.crosslingualRequest() + cross_lingual_request.tts_text = args.tts_text + prompt_speech = load_wav(args.prompt_wav, 16000) + cross_lingual_request.prompt_audio = (prompt_speech.numpy() * (2**15)).astype(np.int16).tobytes() + request.cross_lingual_request.CopyFrom(cross_lingual_request) + else: + logging.info('send instruct request') + instruct_request = cosyvoice_pb2.instructRequest() + instruct_request.tts_text = args.tts_text + instruct_request.spk_id = args.spk_id + instruct_request.instruct_text = args.instruct_text + request.instruct_request.CopyFrom(instruct_request) + + response = stub.Inference(request) + logging.info('save response to {}'.format(args.tts_wav)) + tts_speech = torch.from_numpy(np.array(np.frombuffer(response.tts_audio, dtype=np.int16))).unsqueeze(dim=0) + torchaudio.save(args.tts_wav, tts_speech, target_sr) + logging.info('get response') + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--host', + type=str, + default='0.0.0.0') + parser.add_argument('--port', + type=int, + default='50000') + parser.add_argument('--mode', + default='sft', + choices=['sft', 'zero_shot', 'cross_lingual', 'instruct'], + help='request mode') + parser.add_argument('--tts_text', + type=str, + default='你好,我是通义千问语音合成大模型,请问有什么可以帮您的吗?') + parser.add_argument('--spk_id', + type=str, + default='中文女') + parser.add_argument('--prompt_text', + type=str, + default='希望你以后能够做的比我还好呦。') + parser.add_argument('--prompt_wav', + type=str, + default='../../zero_shot_prompt.wav') + parser.add_argument('--instruct_text', + type=str, + default='Theo \'Crimson\', is a fiery, passionate rebel leader. Fights with fervor for justice, but struggles with impulsiveness.') + parser.add_argument('--tts_wav', + type=str, + default='demo.wav') + args = parser.parse_args() + prompt_sr, target_sr = 16000, 22050 + main() diff --git a/runtime/python/cosyvoice.proto b/runtime/python/cosyvoice.proto new file mode 100644 index 0000000..babf3e7 --- /dev/null +++ b/runtime/python/cosyvoice.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; + +package cosyvoice; +option go_package = "protos/"; + +service CosyVoice{ + rpc Inference(Request) returns (Response) {} +} + +message Request{ + oneof RequestPayload { + sftRequest sft_request = 1; + zeroshotRequest zero_shot_request = 2; + crosslingualRequest cross_lingual_request = 3; + instructRequest instruct_request = 4; + } +} + +message sftRequest{ + string spk_id = 1; + string tts_text = 2; +} + +message zeroshotRequest{ + string tts_text = 1; + string prompt_text = 2; + bytes prompt_audio = 3; +} + +message crosslingualRequest{ + string tts_text = 1; + bytes prompt_audio = 2; +} + +message instructRequest{ + string tts_text = 1; + string spk_id = 2; + string instruct_text = 3; +} + +message Response{ + bytes tts_audio = 1; +} \ No newline at end of file diff --git a/runtime/python/path.sh b/runtime/python/path.sh new file mode 100644 index 0000000..f83d12b --- /dev/null +++ b/runtime/python/path.sh @@ -0,0 +1,3 @@ +# NOTE(kan-bayashi): Use UTF-8 in Python to avoid UnicodeDecodeError when LC_ALL=C +export PYTHONIOENCODING=UTF-8 +export PYTHONPATH=../../:../../third_party/AcademiCodec:../../third_party/Matcha-TTS:$PYTHONPATH diff --git a/runtime/python/server.py b/runtime/python/server.py new file mode 100644 index 0000000..7641610 --- /dev/null +++ b/runtime/python/server.py @@ -0,0 +1,85 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append('{}/../..'.format(ROOT_DIR)) +sys.path.append('{}/../../third_party/AcademiCodec'.format(ROOT_DIR)) +sys.path.append('{}/../../third_party/Matcha-TTS'.format(ROOT_DIR)) +from concurrent import futures +import argparse +import cosyvoice_pb2 +import cosyvoice_pb2_grpc +import logging +logging.getLogger('matplotlib').setLevel(logging.WARNING) +import grpc +import torch +import numpy as np +from cosyvoice.cli.cosyvoice import CosyVoice + +logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s %(levelname)s %(message)s') + +class CosyVoiceServiceImpl(cosyvoice_pb2_grpc.CosyVoiceServicer): + def __init__(self, args): + self.cosyvoice = CosyVoice(args.model_dir) + logging.info('grpc service initialized') + + def Inference(self, request, context): + if request.HasField('sft_request'): + logging.info('get sft inference request') + model_output = self.cosyvoice.inference_sft(request.sft_request.tts_text, request.sft_request.spk_id) + elif request.HasField('zero_shot_request'): + logging.info('get zero_shot inference request') + prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(request.zero_shot_request.prompt_audio, dtype=np.int16))).unsqueeze(dim=0) + prompt_speech_16k = prompt_speech_16k.float() / (2**15) + model_output = self.cosyvoice.inference_zero_shot(request.zero_shot_request.tts_text, request.zero_shot_request.prompt_text, prompt_speech_16k) + elif request.HasField('cross_lingual_request'): + logging.info('get cross_lingual inference request') + prompt_speech_16k = torch.from_numpy(np.array(np.frombuffer(request.cross_lingual_request.prompt_audio, dtype=np.int16))).unsqueeze(dim=0) + prompt_speech_16k = prompt_speech_16k.float() / (2**15) + model_output = self.cosyvoice.inference_cross_lingual(request.cross_lingual_request.tts_text, prompt_speech_16k) + else: + logging.info('get instruct inference request') + model_output = self.cosyvoice.inference_instruct(request.instruct_request.tts_text, request.instruct_request.spk_id, request.instruct_request.instruct_text) + + logging.info('send inference response') + response = cosyvoice_pb2.Response() + response.tts_audio = (model_output['tts_speech'].numpy() * (2 ** 15)).astype(np.int16).tobytes() + return response + +def main(): + grpcServer = grpc.server(futures.ThreadPoolExecutor(max_workers=args.max_conc), maximum_concurrent_rpcs=args.max_conc) + cosyvoice_pb2_grpc.add_CosyVoiceServicer_to_server(CosyVoiceServiceImpl(args), grpcServer) + grpcServer.add_insecure_port('0.0.0.0:{}'.format(args.port)) + grpcServer.start() + logging.info("server listening on 0.0.0.0:{}".format(args.port)) + grpcServer.wait_for_termination() + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--port', + type=int, + default=50000) + parser.add_argument('--max_conc', + type=int, + default=4) + parser.add_argument('--model_dir', + type=str, + required=True, + default='speech_tts/CosyVoice-300M', + help='local path or modelscope repo id') + args = parser.parse_args() + main() diff --git a/test.py b/test.py new file mode 100644 index 0000000..df16559 --- /dev/null +++ b/test.py @@ -0,0 +1,9 @@ +from cosyvoice.cli.cosyvoice import CosyVoice +from cosyvoice.utils.file_utils import load_wav +import torchaudio + +cosyvoice = CosyVoice('./pretrained_models/CosyVoice-300M-SFT') +# sft usage +print(cosyvoice.list_avaliable_spks()) +output = cosyvoice.inference_sft('你好,我是通义生成式语音大模型,请问有什么可以帮您的吗?', '中文女') +torchaudio.save('sft.wav', output['tts_speech'], 22050) \ No newline at end of file diff --git a/third_party/AcademiCodec/.gitignore b/third_party/AcademiCodec/.gitignore new file mode 100644 index 0000000..f36f86a --- /dev/null +++ b/third_party/AcademiCodec/.gitignore @@ -0,0 +1,3 @@ +ckpt +outputdir +__pycache__ diff --git a/third_party/AcademiCodec/academicodec/__init__.py b/third_party/AcademiCodec/academicodec/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/AcademiCodec/academicodec/binary.py b/third_party/AcademiCodec/academicodec/binary.py new file mode 100644 index 0000000..862cb46 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/binary.py @@ -0,0 +1,155 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Raw binary format for Encodec compressed audio. Actual compression API is in `encodec.compress`.""" +import io +import json +import struct +import typing as tp + +# format is `ECDC` magic code, followed by the header size as uint32. +# Then an uint8 indicates the protocol version (0.) +# The header is then provided as json and should contain all required +# informations for decoding. A raw stream of bytes is then provided +# and should be interpretable using the json header. +_encodec_header_struct = struct.Struct('!4sBI') +_ENCODEC_MAGIC = b'ECDC' + + +def write_ecdc_header(fo: tp.IO[bytes], metadata: tp.Any): + meta_dumped = json.dumps(metadata).encode('utf-8') + version = 0 + header = _encodec_header_struct.pack(_ENCODEC_MAGIC, version, + len(meta_dumped)) + fo.write(header) + fo.write(meta_dumped) + fo.flush() + + +def _read_exactly(fo: tp.IO[bytes], size: int) -> bytes: + buf = b"" + while len(buf) < size: + new_buf = fo.read(size) + if not new_buf: + raise EOFError("Impossible to read enough data from the stream, " + f"{size} bytes remaining.") + buf += new_buf + size -= len(new_buf) + return buf + + +def read_ecdc_header(fo: tp.IO[bytes]): + header_bytes = _read_exactly(fo, _encodec_header_struct.size) + magic, version, meta_size = _encodec_header_struct.unpack(header_bytes) + if magic != _ENCODEC_MAGIC: + raise ValueError("File is not in ECDC format.") + if version != 0: + raise ValueError("Version not supported.") + meta_bytes = _read_exactly(fo, meta_size) + return json.loads(meta_bytes.decode('utf-8')) + + +class BitPacker: + """Simple bit packer to handle ints with a non standard width, e.g. 10 bits. + Note that for some bandwidth (1.5, 3), the codebook representation + will not cover an integer number of bytes. + + Args: + bits (int): number of bits per value that will be pushed. + fo (IO[bytes]): file-object to push the bytes to. + """ + + def __init__(self, bits: int, fo: tp.IO[bytes]): + self._current_value = 0 + self._current_bits = 0 + self.bits = bits + self.fo = fo + + def push(self, value: int): + """Push a new value to the stream. This will immediately + write as many uint8 as possible to the underlying file-object.""" + self._current_value += (value << self._current_bits) + self._current_bits += self.bits + while self._current_bits >= 8: + lower_8bits = self._current_value & 0xff + self._current_bits -= 8 + self._current_value >>= 8 + self.fo.write(bytes([lower_8bits])) + + def flush(self): + """Flushes the remaining partial uint8, call this at the end + of the stream to encode.""" + if self._current_bits: + self.fo.write(bytes([self._current_value])) + self._current_value = 0 + self._current_bits = 0 + self.fo.flush() + + +class BitUnpacker: + """BitUnpacker does the opposite of `BitPacker`. + + Args: + bits (int): number of bits of the values to decode. + fo (IO[bytes]): file-object to push the bytes to. + """ + + def __init__(self, bits: int, fo: tp.IO[bytes]): + self.bits = bits + self.fo = fo + self._mask = (1 << bits) - 1 + self._current_value = 0 + self._current_bits = 0 + + def pull(self) -> tp.Optional[int]: + """ + Pull a single value from the stream, potentially reading some + extra bytes from the underlying file-object. + Returns `None` when reaching the end of the stream. + """ + while self._current_bits < self.bits: + buf = self.fo.read(1) + if not buf: + return None + character = buf[0] + self._current_value += character << self._current_bits + self._current_bits += 8 + + out = self._current_value & self._mask + self._current_value >>= self.bits + self._current_bits -= self.bits + return out + + +def test(): + import torch + torch.manual_seed(1234) + for rep in range(4): + length: int = torch.randint(10, 2_000, (1, )).item() + bits: int = torch.randint(1, 16, (1, )).item() + tokens: tp.List[int] = torch.randint(2**bits, (length, )).tolist() + rebuilt: tp.List[int] = [] + buf = io.BytesIO() + packer = BitPacker(bits, buf) + for token in tokens: + packer.push(token) + packer.flush() + buf.seek(0) + unpacker = BitUnpacker(bits, buf) + while True: + value = unpacker.pull() + if value is None: + break + rebuilt.append(value) + assert len(rebuilt) >= len(tokens), (len(rebuilt), len(tokens)) + # The flushing mechanism might lead to "ghost" values at the end of the stream. + assert len(rebuilt) <= len(tokens) + 8 // bits, (len(rebuilt), + len(tokens), bits) + for idx, (a, b) in enumerate(zip(tokens, rebuilt)): + assert a == b, (idx, a, b) + + +if __name__ == '__main__': + test() diff --git a/third_party/AcademiCodec/academicodec/models/encodec/__init__.py b/third_party/AcademiCodec/academicodec/models/encodec/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/AcademiCodec/academicodec/models/encodec/dataset.py b/third_party/AcademiCodec/academicodec/models/encodec/dataset.py new file mode 100644 index 0000000..e63d00d --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/dataset.py @@ -0,0 +1,32 @@ +import glob +import random + +import torch +import torchaudio +from torch.utils.data import Dataset + + +class NSynthDataset(Dataset): + """Dataset to load NSynth data.""" + + def __init__(self, audio_dir): + super().__init__() + self.filenames = [] + self.filenames.extend(glob.glob(audio_dir + "/*.wav")) + print(len(self.filenames)) + _, self.sr = torchaudio.load(self.filenames[0]) + self.max_len = 24000 # 24000 + + def __len__(self): + return len(self.filenames) + + def __getitem__(self, index): + ans = torch.zeros(1, self.max_len) + audio = torchaudio.load(self.filenames[index])[0] + if audio.shape[1] > self.max_len: + st = random.randint(0, audio.shape[1] - self.max_len - 1) + ed = st + self.max_len + return audio[:, st:ed] + else: + ans[:, :audio.shape[1]] = audio + return ans diff --git a/third_party/AcademiCodec/academicodec/models/encodec/distributed/distributed.py b/third_party/AcademiCodec/academicodec/models/encodec/distributed/distributed.py new file mode 100644 index 0000000..a02aff2 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/distributed/distributed.py @@ -0,0 +1,149 @@ +# ------------------------------------------ +# Diffsound +# code based https://github.com/cientgu/VQ-Diffusion +# ------------------------------------------ +import pickle + +import torch +from torch import distributed as dist +from torch.utils import data + +LOCAL_PROCESS_GROUP = None + + +def is_primary(): + return get_rank() == 0 + + +def get_rank(): + if not dist.is_available(): + return 0 + + if not dist.is_initialized(): + return 0 + + return dist.get_rank() + + +def get_local_rank(): + if not dist.is_available(): + return 0 + + if not dist.is_initialized(): + return 0 + + if LOCAL_PROCESS_GROUP is None: + raise ValueError("tensorfn.distributed.LOCAL_PROCESS_GROUP is None") + + return dist.get_rank(group=LOCAL_PROCESS_GROUP) + + +def synchronize(): + if not dist.is_available(): + return + + if not dist.is_initialized(): + return + + world_size = dist.get_world_size() + + if world_size == 1: + return + + dist.barrier() + + +def get_world_size(): + if not dist.is_available(): + return 1 + + if not dist.is_initialized(): + return 1 + + return dist.get_world_size() + + +def is_distributed(): + raise RuntimeError('Please debug this function!') + return get_world_size() > 1 + + +def all_reduce(tensor, op=dist.ReduceOp.SUM, async_op=False): + world_size = get_world_size() + + if world_size == 1: + return tensor + dist.all_reduce(tensor, op=op, async_op=async_op) + + return tensor + + +def all_gather(data): + world_size = get_world_size() + + if world_size == 1: + return [data] + + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to("cuda") + + local_size = torch.IntTensor([tensor.numel()]).to("cuda") + size_list = [torch.IntTensor([1]).to("cuda") for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + tensor_list = [] + for _ in size_list: + tensor_list.append(torch.ByteTensor(size=(max_size, )).to("cuda")) + + if local_size != max_size: + padding = torch.ByteTensor(size=(max_size - local_size, )).to("cuda") + tensor = torch.cat((tensor, padding), 0) + + dist.all_gather(tensor_list, tensor) + + data_list = [] + + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def reduce_dict(input_dict, average=True): + world_size = get_world_size() + + if world_size < 2: + return input_dict + + with torch.no_grad(): + keys = [] + values = [] + + for k in sorted(input_dict.keys()): + keys.append(k) + values.append(input_dict[k]) + + values = torch.stack(values, 0) + dist.reduce(values, dst=0) + + if dist.get_rank() == 0 and average: + values /= world_size + + reduced_dict = {k: v for k, v in zip(keys, values)} + + return reduced_dict + + +def data_sampler(dataset, shuffle, distributed): + if distributed: + return data.distributed.DistributedSampler(dataset, shuffle=shuffle) + + if shuffle: + return data.RandomSampler(dataset) + + else: + return data.SequentialSampler(dataset) diff --git a/third_party/AcademiCodec/academicodec/models/encodec/distributed/launch.py b/third_party/AcademiCodec/academicodec/models/encodec/distributed/launch.py new file mode 100644 index 0000000..ca230f8 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/distributed/launch.py @@ -0,0 +1,106 @@ +# ------------------------------------------ +# Diffsound +# code based https://github.com/cientgu/VQ-Diffusion +# ------------------------------------------ +import distributed.distributed as dist_fn +import torch +from torch import distributed as dist +from torch import multiprocessing as mp + +# import distributed as dist_fn + + +def find_free_port(): + import socket + + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + sock.bind(("", 0)) + port = sock.getsockname()[1] + sock.close() + + return port + + +def launch(fn, + n_gpu_per_machine, + n_machine=1, + machine_rank=0, + dist_url=None, + args=()): + world_size = n_machine * n_gpu_per_machine + + if world_size > 1: + # if "OMP_NUM_THREADS" not in os.environ: + # os.environ["OMP_NUM_THREADS"] = "1" + if dist_url == "auto": + if n_machine != 1: + raise ValueError( + 'dist_url="auto" not supported in multi-machine jobs') + port = find_free_port() + dist_url = f"tcp://127.0.0.1:{port}" + print('dist_url ', dist_url) + print('n_machine ', n_machine) + print('args ', args) + print('world_size ', world_size) + print('machine_rank ', machine_rank) + if n_machine > 1 and dist_url.startswith("file://"): + raise ValueError( + "file:// is not a reliable init method in multi-machine jobs. Prefer tcp://" + ) + + mp.spawn( + distributed_worker, + nprocs=n_gpu_per_machine, + args=(fn, world_size, n_gpu_per_machine, machine_rank, dist_url, + args), + daemon=False, ) + # n_machine ? world_size + else: + local_rank = 0 + fn(local_rank, *args) + + +def distributed_worker(local_rank, fn, world_size, n_gpu_per_machine, + machine_rank, dist_url, args): + if not torch.cuda.is_available(): + raise OSError("CUDA is not available. Please check your environments") + + global_rank = machine_rank * n_gpu_per_machine + local_rank + print('local_rank ', local_rank) + print('global_rank ', global_rank) + try: + dist.init_process_group( + backend="NCCL", + init_method=dist_url, + world_size=world_size, + rank=global_rank, ) + + except Exception: + raise OSError("failed to initialize NCCL groups") + + # changed + dist_fn.synchronize() + + if n_gpu_per_machine > torch.cuda.device_count(): + raise ValueError( + f"specified n_gpu_per_machine larger than available device ({torch.cuda.device_count()})" + ) + + torch.cuda.set_device(local_rank) + + if dist_fn.LOCAL_PROCESS_GROUP is not None: + raise ValueError("torch.distributed.LOCAL_PROCESS_GROUP is not None") + + # change paert + + n_machine = world_size // n_gpu_per_machine + for i in range(n_machine): + ranks_on_i = list( + range(i * n_gpu_per_machine, (i + 1) * n_gpu_per_machine)) + pg = dist.new_group(ranks_on_i) + + if i == machine_rank: + dist_fn.LOCAL_PROCESS_GROUP = pg + + fn(local_rank, *args) diff --git a/third_party/AcademiCodec/academicodec/models/encodec/loss.py b/third_party/AcademiCodec/academicodec/models/encodec/loss.py new file mode 100644 index 0000000..6b2292d --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/loss.py @@ -0,0 +1,231 @@ +import torch +import torch.nn.functional as F +from torchaudio.transforms import MelSpectrogram + + +def adversarial_g_loss(y_disc_gen): + """Hinge loss""" + loss = 0.0 + for i in range(len(y_disc_gen)): + stft_loss = F.relu(1 - y_disc_gen[i]).mean().squeeze() + loss += stft_loss + return loss / len(y_disc_gen) + + +def feature_loss(fmap_r, fmap_gen): + loss = 0.0 + for i in range(len(fmap_r)): + for j in range(len(fmap_r[i])): + stft_loss = ((fmap_r[i][j] - fmap_gen[i][j]).abs() / + (fmap_r[i][j].abs().mean())).mean() + loss += stft_loss + return loss / (len(fmap_r) * len(fmap_r[0])) + + +def sim_loss(y_disc_r, y_disc_gen): + loss = 0.0 + for i in range(len(y_disc_r)): + loss += F.mse_loss(y_disc_r[i], y_disc_gen[i]) + return loss / len(y_disc_r) + +# def sisnr_loss(x, s, eps=1e-8): + # """ + # calculate training loss + # input: + # x: separated signal, N x S tensor, estimate value + # s: reference signal, N x S tensor, True value + # Return: + # sisnr: N tensor + # """ + # if x.shape != s.shape: + # if x.shape[-1] > s.shape[-1]: + # x = x[:, :s.shape[-1]] + # else: + # s = s[:, :x.shape[-1]] + # def l2norm(mat, keepdim=False): + # return torch.norm(mat, dim=-1, keepdim=keepdim) + # if x.shape != s.shape: + # raise RuntimeError( + # "Dimention mismatch when calculate si-snr, {} vs {}".format( + # x.shape, s.shape)) + # x_zm = x - torch.mean(x, dim=-1, keepdim=True) + # s_zm = s - torch.mean(s, dim=-1, keepdim=True) + # t = torch.sum( + # x_zm * s_zm, dim=-1, + # keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps) + # loss = -20. * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps)) + # return torch.sum(loss) / x.shape[0] + + +def reconstruction_loss(x, G_x, args, eps=1e-7): + # NOTE (lsx): hard-coded now + L = args.LAMBDA_WAV * F.mse_loss(x, G_x) # wav L1 loss + # loss_sisnr = sisnr_loss(G_x, x) # + # L += 0.01*loss_sisnr + # 2^6=64 -> 2^10=1024 + # NOTE (lsx): add 2^11 + for i in range(6, 12): + # for i in range(5, 12): # Encodec setting + s = 2**i + melspec = MelSpectrogram( + sample_rate=args.sr, + n_fft=max(s, 512), + win_length=s, + hop_length=s // 4, + n_mels=64, + wkwargs={"device": args.device}).to(args.device) + S_x = melspec(x) + S_G_x = melspec(G_x) + l1_loss = (S_x - S_G_x).abs().mean() + l2_loss = (((torch.log(S_x.abs() + eps) - torch.log(S_G_x.abs() + eps))**2).mean(dim=-2)**0.5).mean() + + alpha = (s / 2) ** 0.5 + L += (l1_loss + alpha * l2_loss) + return L + + +def criterion_d(y_disc_r, y_disc_gen, fmap_r_det, fmap_gen_det, y_df_hat_r, + y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, y_ds_hat_g, + fmap_s_r, fmap_s_g): + """Hinge Loss""" + loss = 0.0 + loss1 = 0.0 + loss2 = 0.0 + loss3 = 0.0 + for i in range(len(y_disc_r)): + loss1 += F.relu(1 - y_disc_r[i]).mean() + F.relu(1 + y_disc_gen[ + i]).mean() + for i in range(len(y_df_hat_r)): + loss2 += F.relu(1 - y_df_hat_r[i]).mean() + F.relu(1 + y_df_hat_g[ + i]).mean() + for i in range(len(y_ds_hat_r)): + loss3 += F.relu(1 - y_ds_hat_r[i]).mean() + F.relu(1 + y_ds_hat_g[ + i]).mean() + + loss = (loss1 / len(y_disc_gen) + loss2 / len(y_df_hat_r) + loss3 / + len(y_ds_hat_r)) / 3.0 + + return loss + + +def criterion_g(commit_loss, x, G_x, fmap_r, fmap_gen, y_disc_r, y_disc_gen, + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, + y_ds_hat_g, fmap_s_r, fmap_s_g, args): + adv_g_loss = adversarial_g_loss(y_disc_gen) + feat_loss = (feature_loss(fmap_r, fmap_gen) + sim_loss( + y_disc_r, y_disc_gen) + feature_loss(fmap_f_r, fmap_f_g) + sim_loss( + y_df_hat_r, y_df_hat_g) + feature_loss(fmap_s_r, fmap_s_g) + + sim_loss(y_ds_hat_r, y_ds_hat_g)) / 3.0 + rec_loss = reconstruction_loss(x.contiguous(), G_x.contiguous(), args) + total_loss = args.LAMBDA_COM * commit_loss + args.LAMBDA_ADV * adv_g_loss + args.LAMBDA_FEAT * feat_loss + args.LAMBDA_REC * rec_loss + return total_loss, adv_g_loss, feat_loss, rec_loss + + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def adopt_dis_weight(weight, global_step, threshold=0, value=0.): + # 0,3,6,9,13....这些时间步,不更新dis + if global_step % 3 == 0: + weight = value + return weight + + +def calculate_adaptive_weight(nll_loss, g_loss, last_layer, args): + if last_layer is not None: + nll_grads = torch.autograd.grad( + nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + print('last_layer cannot be none') + assert 1 == 2 + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 1.0, 1.0).detach() + d_weight = d_weight * args.LAMBDA_ADV + return d_weight + + +def loss_g(codebook_loss, + inputs, + reconstructions, + fmap_r, + fmap_gen, + y_disc_r, + y_disc_gen, + global_step, + y_df_hat_r, + y_df_hat_g, + y_ds_hat_r, + y_ds_hat_g, + fmap_f_r, + fmap_f_g, + fmap_s_r, + fmap_s_g, + last_layer=None, + is_training=True, + args=None): + """ + args: + codebook_loss: commit loss. + inputs: ground-truth wav. + reconstructions: reconstructed wav. + fmap_r: real stft-D feature map. + fmap_gen: fake stft-D feature map. + y_disc_r: real stft-D logits. + y_disc_gen: fake stft-D logits. + global_step: global training step. + y_df_hat_r: real MPD logits. + y_df_hat_g: fake MPD logits. + y_ds_hat_r: real MSD logits. + y_ds_hat_g: fake MSD logits. + fmap_f_r: real MPD feature map. + fmap_f_g: fake MPD feature map. + fmap_s_r: real MSD feature map. + fmap_s_g: fake MSD feature map. + """ + rec_loss = reconstruction_loss(inputs.contiguous(), + reconstructions.contiguous(), args) + adv_g_loss = adversarial_g_loss(y_disc_gen) + adv_mpd_loss = adversarial_g_loss(y_df_hat_g) + adv_msd_loss = adversarial_g_loss(y_ds_hat_g) + adv_loss = (adv_g_loss + adv_mpd_loss + adv_msd_loss + ) / 3.0 # NOTE(lsx): need to divide by 3? + feat_loss = feature_loss( + fmap_r, + fmap_gen) #+ sim_loss(y_disc_r, y_disc_gen) # NOTE(lsx): need logits? + feat_loss_mpd = feature_loss(fmap_f_r, + fmap_f_g) #+ sim_loss(y_df_hat_r, y_df_hat_g) + feat_loss_msd = feature_loss(fmap_s_r, + fmap_s_g) #+ sim_loss(y_ds_hat_r, y_ds_hat_g) + feat_loss_tot = (feat_loss + feat_loss_mpd + feat_loss_msd) / 3.0 + d_weight = torch.tensor(1.0) + # try: + # d_weight = calculate_adaptive_weight(rec_loss, adv_g_loss, last_layer, args) # 动态调整重构损失和对抗损失 + # except RuntimeError: + # assert not is_training + # d_weight = torch.tensor(0.0) + disc_factor = adopt_weight( + args.LAMBDA_ADV, global_step, threshold=args.discriminator_iter_start) + if disc_factor == 0.: + fm_loss_wt = 0 + else: + fm_loss_wt = args.LAMBDA_FEAT + #feat_factor = adopt_weight(args.LAMBDA_FEAT, global_step, threshold=args.discriminator_iter_start) + loss = rec_loss + d_weight * disc_factor * adv_loss + \ + fm_loss_wt * feat_loss_tot + args.LAMBDA_COM * codebook_loss + return loss, rec_loss, adv_loss, feat_loss_tot, d_weight + + +def loss_dis(y_disc_r_det, y_disc_gen_det, fmap_r_det, fmap_gen_det, y_df_hat_r, + y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, y_ds_hat_g, fmap_s_r, + fmap_s_g, global_step, args): + disc_factor = adopt_weight( + args.LAMBDA_ADV, global_step, threshold=args.discriminator_iter_start) + d_loss = disc_factor * criterion_d(y_disc_r_det, y_disc_gen_det, fmap_r_det, + fmap_gen_det, y_df_hat_r, y_df_hat_g, + fmap_f_r, fmap_f_g, y_ds_hat_r, + y_ds_hat_g, fmap_s_r, fmap_s_g) + return d_loss diff --git a/third_party/AcademiCodec/academicodec/models/encodec/main_launch.py b/third_party/AcademiCodec/academicodec/models/encodec/main_launch.py new file mode 100644 index 0000000..0cd2c68 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/main_launch.py @@ -0,0 +1,463 @@ +import argparse +import itertools +import os +import time + +import torch +import torch.distributed as dist +from academicodec.models.encodec.dataset import NSynthDataset +from academicodec.models.encodec.loss import criterion_d +from academicodec.models.encodec.loss import criterion_g +from academicodec.models.encodec.loss import loss_dis +from academicodec.models.encodec.loss import loss_g +from academicodec.models.encodec.msstftd import MultiScaleSTFTDiscriminator +from academicodec.models.encodec.net3 import SoundStream +from academicodec.models.soundstream.models import MultiPeriodDiscriminator +from academicodec.models.soundstream.models import MultiScaleDiscriminator +from academicodec.utils import Logger +from academicodec.utils import seed_everything +from torch.nn.parallel import DistributedDataParallel as DDP +from tqdm import tqdm + + +def getModelSize(model): + param_size = 0 + param_sum = 0 + for param in model.parameters(): + param_size += param.nelement() * param.element_size() + param_sum += param.nelement() + buffer_size = 0 + buffer_sum = 0 + for buffer in model.buffers(): + buffer_size += buffer.nelement() * buffer.element_size() + buffer_sum += buffer.nelement() + all_size = (param_size + buffer_size) / 1024 / 1024 + print('模型总大小为:{:.3f}MB'.format(all_size)) + return (param_size, param_sum, buffer_size, buffer_sum, all_size) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--local_rank', + default=-1, + type=int, + help='node rank for distributed training') + # args for random + parser.add_argument( + '--seed', + type=int, + default=6666, + help='seed for initializing training. ') + parser.add_argument( + '--cudnn_deterministic', + action='store_true', + help='set cudnn.deterministic True') + parser.add_argument( + '--tensorboard', + action='store_true', + help='use tensorboard for logging') + + # args for training + parser.add_argument( + '--LAMBDA_WAV', + type=float, + default=100, + help='hyper-parameter for wav time-domain loss') + parser.add_argument( + '--LAMBDA_ADV', + type=float, + default=1, + help='hyper-parameter for adver loss') + parser.add_argument( + '--LAMBDA_FEAT', + type=float, + default=1, + help='hyper-parameter for feat loss') + parser.add_argument( + '--LAMBDA_REC', + type=float, + default=1, + help='hyper-parameter for rec loss') + parser.add_argument( + '--LAMBDA_COM', + type=float, + default=1000, + help='hyper-parameter for commit loss') + parser.add_argument( + '--N_EPOCHS', type=int, default=100, help='Total training epoch') + parser.add_argument( + '--st_epoch', type=int, default=0, help='start training epoch') + parser.add_argument( + '--global_step', type=int, default=0, help='record the global step') + parser.add_argument('--discriminator_iter_start', type=int, default=500) + parser.add_argument('--BATCH_SIZE', type=int, default=10, help='batch size') + parser.add_argument( + '--PATH', type=str, default='model_path', help='model save path') + parser.add_argument('--sr', type=int, default=16000, help='sample rate') + parser.add_argument( + '--print_freq', type=int, default=10, help='the print number') + parser.add_argument( + '--save_dir', type=str, default='log', help='log save path') + parser.add_argument( + '--train_data_path', + type=str, + # default='/apdcephfs_cq2/share_1297902/speech_user/shaunxliu/dongchao/code4/InstructTTS2/data_process/soundstream_data/train16k.lst', + default="/apdcephfs_cq2/share_1297902/speech_user/shaunxliu/data/codec_data_24k/train_valid_lists/train.lst", + help='training data') + parser.add_argument( + '--valid_data_path', + type=str, + # default='/apdcephfs_cq2/share_1297902/speech_user/shaunxliu/dongchao/code4/InstructTTS2/data_process/soundstream_data/val16k.lst', + default="/apdcephfs_cq2/share_1297902/speech_user/shaunxliu/data/codec_data_24k/train_valid_lists/valid_256.lst", + help='validation data') + parser.add_argument( + '--resume', action='store_true', help='whether re-train model') + parser.add_argument( + '--resume_path', type=str, default=None, help='resume_path') + parser.add_argument( + '--ratios', + type=int, + nargs='+', + # probs(ratios) = hop_size + default=[8, 5, 4, 2], + help='ratios of SoundStream, shoud be set for different hop_size (32d, 320, 240d, ...)' + ) + parser.add_argument( + '--target_bandwidths', + type=float, + nargs='+', + # default for 16k_320d + default=[1, 1.5, 2, 4, 6, 12], + help='target_bandwidths of net3.py') + args = parser.parse_args() + time_str = time.strftime('%Y-%m-%d-%H-%M') + if args.resume: + args.PATH = args.resume_path # direcly use the old model path + else: + args.PATH = os.path.join(args.PATH, time_str) + args.save_dir = os.path.join(args.save_dir, time_str) + os.makedirs(args.PATH, exist_ok=True) + return args + + +def get_input(x): + x = x.to(memory_format=torch.contiguous_format) + return x.float() + + +def main(): + args = get_args() + if args.seed is not None or args.cudnn_deterministic: + seed_everything(args.seed, args.cudnn_deterministic) + args.ngpus_per_node = torch.cuda.device_count() + main_worker(args.local_rank, args) + + +def main_worker(local_rank, args): + rank = local_rank + args.local_rank = local_rank + args.global_rank = local_rank + args.distributed = args.ngpus_per_node > 1 + + if args.ngpus_per_node > 1: + from torch.distributed import init_process_group + torch.cuda.set_device(local_rank) + init_process_group(backend='nccl') + + #CUDA_VISIBLE_DEVICES = int(args.local_rank) + logger = Logger(args) + soundstream = SoundStream( + n_filters=32, + D=512, + ratios=args.ratios, + sample_rate=args.sr, + target_bandwidths=args.target_bandwidths) + msd = MultiScaleDiscriminator() + mpd = MultiPeriodDiscriminator() + stft_disc = MultiScaleSTFTDiscriminator(filters=32) + + if logger.is_primary: + getModelSize(soundstream) + getModelSize(msd) + getModelSize(mpd) + getModelSize(stft_disc) + + if args.distributed: + soundstream = torch.nn.SyncBatchNorm.convert_sync_batchnorm(soundstream) + stft_disc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(stft_disc) + msd = torch.nn.SyncBatchNorm.convert_sync_batchnorm(msd) + mpd = torch.nn.SyncBatchNorm.convert_sync_batchnorm(mpd) + + # torch.distributed.barrier() + args.device = torch.device('cuda', args.local_rank) + soundstream.to(args.device) + stft_disc.to(args.device) + msd.to(args.device) + mpd.to(args.device) + find_unused_parameters = False + if args.distributed: + soundstream = DDP( + soundstream, + device_ids=[args.local_rank], + find_unused_parameters=find_unused_parameters + ) # device_ids=[args.local_rank], output_device=args.local_rank + stft_disc = DDP(stft_disc, + device_ids=[args.local_rank], + find_unused_parameters=find_unused_parameters) + msd = DDP(msd, + device_ids=[args.local_rank], + find_unused_parameters=find_unused_parameters) + mpd = DDP(mpd, + device_ids=[args.local_rank], + find_unused_parameters=find_unused_parameters) + # 这里之后需要看下 sr 的问题,如果输入 wav 的 sr 和 `--sr` 不一致则会有问题 + logger.log_info('Training set') + train_dataset = NSynthDataset(audio_dir=args.train_data_path) + logger.log_info('valid set') + valid_dataset = NSynthDataset(audio_dir=args.valid_data_path) + args.sr = train_dataset.sr + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler( + train_dataset, drop_last=True, shuffle=True) + valid_sampler = torch.utils.data.distributed.DistributedSampler( + valid_dataset) + else: + train_sampler = None + valid_sampler = None + train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size=args.BATCH_SIZE, + num_workers=8, + sampler=train_sampler) + valid_loader = torch.utils.data.DataLoader( + valid_dataset, + batch_size=args.BATCH_SIZE, + num_workers=8, + sampler=valid_sampler) + logger.log_info("Build optimizers and lr-schedulers") + optimizer_g = torch.optim.AdamW( + soundstream.parameters(), lr=3e-4, betas=(0.5, 0.9)) + lr_scheduler_g = torch.optim.lr_scheduler.ExponentialLR( + optimizer_g, gamma=0.999) + optimizer_d = torch.optim.AdamW( + itertools.chain(stft_disc.parameters(), + msd.parameters(), mpd.parameters()), + lr=3e-4, + betas=(0.5, 0.9)) + lr_scheduler_d = torch.optim.lr_scheduler.ExponentialLR( + optimizer_d, gamma=0.999) + if args.resume: + latest_info = torch.load(args.resume_path + '/latest.pth') + args.st_epoch = latest_info['epoch'] + soundstream.load_state_dict(latest_info['soundstream']) + stft_disc.load_state_dict(latest_info['stft_disc']) + mpd.load_state_dict(latest_info['mpd']) + msd.load_state_dict(latest_info['msd']) + optimizer_g.load_state_dict(latest_info['optimizer_g']) + lr_scheduler_g.load_state_dict(latest_info['lr_scheduler_g']) + optimizer_d.load_state_dict(latest_info['optimizer_d']) + lr_scheduler_d.load_state_dict(latest_info['lr_scheduler_d']) + train(args, soundstream, stft_disc, msd, mpd, train_loader, valid_loader, + optimizer_g, optimizer_d, lr_scheduler_g, lr_scheduler_d, logger) + + +def train(args, soundstream, stft_disc, msd, mpd, train_loader, valid_loader, + optimizer_g, optimizer_d, lr_scheduler_g, lr_scheduler_d, logger): + print('args ', args.global_rank) + best_val_loss = float("inf") + best_val_epoch = -1 + global_step = 0 + for epoch in range(args.st_epoch, args.N_EPOCHS + 1): + soundstream.train() + stft_disc.train() + msd.train() + mpd.train() + train_loss_d = 0.0 + train_adv_g_loss = 0.0 + train_feat_loss = 0.0 + train_rec_loss = 0.0 + train_loss_g = 0.0 + train_commit_loss = 0.0 + k_iter = 0 + if args.distributed: + train_loader.sampler.set_epoch(epoch) + for x in tqdm(train_loader): + x = x.to(args.device) + k_iter += 1 + global_step += 1 # record the global step + for optimizer_idx in [0, 1]: # we have two optimizer + x_wav = get_input(x) + G_x, commit_loss, last_layer = soundstream(x_wav) + if optimizer_idx == 0: + # update generator + y_disc_r, fmap_r = stft_disc(x_wav.contiguous()) + y_disc_gen, fmap_gen = stft_disc(G_x.contiguous()) + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x_wav.contiguous(), G_x.contiguous()) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x_wav.contiguous(), G_x.contiguous()) + total_loss_g, rec_loss, adv_g_loss, feat_loss, d_weight = loss_g( + commit_loss, + x_wav, + G_x, + fmap_r, + fmap_gen, + y_disc_r, + y_disc_gen, + global_step, + y_df_hat_r, + y_df_hat_g, + y_ds_hat_r, + y_ds_hat_g, + fmap_f_r, + fmap_f_g, + fmap_s_r, + fmap_s_g, + last_layer=last_layer, + is_training=True, + args=args) + train_commit_loss += commit_loss.item() + train_loss_g += total_loss_g.item() + train_adv_g_loss += adv_g_loss.item() + train_feat_loss += feat_loss.item() + train_rec_loss += rec_loss.item() + optimizer_g.zero_grad() + total_loss_g.backward() + optimizer_g.step() + else: + # update discriminator + y_disc_r_det, fmap_r_det = stft_disc(x.detach()) + y_disc_gen_det, fmap_gen_det = stft_disc(G_x.detach()) + + # MPD + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x.detach(), G_x.detach()) + #MSD + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x.detach(), G_x.detach()) + + loss_d = loss_dis( + y_disc_r_det, y_disc_gen_det, fmap_r_det, fmap_gen_det, + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, + y_ds_hat_g, fmap_s_r, fmap_s_g, global_step, args) + train_loss_d += loss_d.item() + optimizer_d.zero_grad() + loss_d.backward() + optimizer_d.step() + message = ''.format( + epoch, k_iter, + total_loss_g.item(), + adv_g_loss.item(), + feat_loss.item(), + rec_loss.item(), + commit_loss.item(), loss_d.item(), d_weight.item()) + if k_iter % args.print_freq == 0: + logger.log_info(message) + lr_scheduler_g.step() + lr_scheduler_d.step() + message = ''.format( + epoch, train_loss_g / len(train_loader), train_rec_loss / + len(train_loader), train_adv_g_loss / len(train_loader), + train_feat_loss / len(train_loader), + train_commit_loss / len(train_loader)) + logger.log_info(message) + with torch.no_grad(): + soundstream.eval() + stft_disc.eval() + mpd.eval() + msd.eval() + valid_loss_d = 0.0 + valid_loss_g = 0.0 + valid_commit_loss = 0.0 + valid_adv_g_loss = 0.0 + valid_feat_loss = 0.0 + valid_rec_loss = 0.0 + if args.distributed: + valid_loader.sampler.set_epoch(epoch) + for x in tqdm(valid_loader): + x = x.to(args.device) + for optimizer_idx in [0, 1]: + x_wav = get_input(x) + G_x, commit_loss, _ = soundstream(x_wav) + if optimizer_idx == 0: + valid_commit_loss += commit_loss + y_disc_r, fmap_r = stft_disc(x_wav.contiguous()) + y_disc_gen, fmap_gen = stft_disc(G_x.contiguous()) + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x_wav.contiguous(), G_x.contiguous()) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x_wav.contiguous(), G_x.contiguous()) + + total_loss_g, adv_g_loss, feat_loss, rec_loss = criterion_g( + commit_loss, + x_wav, + G_x, + fmap_r, + fmap_gen, + y_disc_r, + y_disc_gen, + y_df_hat_r, + y_df_hat_g, + fmap_f_r, + fmap_f_g, + y_ds_hat_r, + y_ds_hat_g, + fmap_s_r, + fmap_s_g, + args=args) + valid_loss_g += total_loss_g.item() + valid_adv_g_loss += adv_g_loss.item() + valid_feat_loss += feat_loss.item() + valid_rec_loss += rec_loss.item() + else: + y_disc_r_det, fmap_r_det = stft_disc( + x_wav.contiguous().detach()) + y_disc_gen_det, fmap_gen_det = stft_disc( + G_x.contiguous().detach()) + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x_wav.contiguous().detach(), + G_x.contiguous().detach()) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x_wav.contiguous().detach(), + G_x.contiguous().detach()) + loss_d = criterion_d(y_disc_r_det, y_disc_gen_det, + fmap_r_det, fmap_gen_det, + y_df_hat_r, y_df_hat_g, fmap_f_r, + fmap_f_g, y_ds_hat_r, y_ds_hat_g, + fmap_s_r, fmap_s_g) + valid_loss_d += loss_d.item() + if dist.get_rank() == 0: + best_model = soundstream.state_dict().copy() + latest_model_soundstream = soundstream.state_dict().copy() + latest_model_dis = stft_disc.state_dict().copy() + latest_mpd = mpd.state_dict().copy() + latest_msd = msd.state_dict().copy() + if valid_rec_loss < best_val_loss: + best_val_loss = valid_rec_loss + best_val_epoch = epoch + torch.save(best_model, + args.PATH + '/best_' + str(epoch) + '.pth') + latest_save = {} + latest_save['soundstream'] = latest_model_soundstream + latest_save['stft_disc'] = latest_model_dis + latest_save['mpd'] = latest_mpd + latest_save['msd'] = latest_msd + latest_save['epoch'] = epoch + latest_save['optimizer_g'] = optimizer_g.state_dict() + latest_save['optimizer_d'] = optimizer_d.state_dict() + latest_save['lr_scheduler_g'] = lr_scheduler_g.state_dict() + latest_save['lr_scheduler_d'] = lr_scheduler_d.state_dict() + torch.save(latest_save, args.PATH + '/latest.pth') + + message = ''.format( + epoch, valid_loss_g / len(valid_loader), valid_rec_loss / + len(valid_loader), valid_adv_g_loss / len(valid_loader), + valid_feat_loss / len(valid_loader), + valid_commit_loss / len(valid_loader), + valid_loss_d / len(valid_loader), best_val_epoch) + logger.log_info(message) + + +if __name__ == '__main__': + main() diff --git a/third_party/AcademiCodec/academicodec/models/encodec/msstftd.py b/third_party/AcademiCodec/academicodec/models/encodec/msstftd.py new file mode 100644 index 0000000..9f88018 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/msstftd.py @@ -0,0 +1,198 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""MS-STFT discriminator, provided here for reference.""" +import typing as tp + +import torch +import torchaudio +from einops import rearrange +from torch import nn + +from academicodec.modules import NormConv2d + +FeatureMapType = tp.List[torch.Tensor] +LogitsType = torch.Tensor +DiscriminatorOutput = tp.Tuple[tp.List[LogitsType], tp.List[FeatureMapType]] + + +def get_2d_padding(kernel_size: tp.Tuple[int, int], + dilation: tp.Tuple[int, int]=(1, 1)): + return (((kernel_size[0] - 1) * dilation[0]) // 2, ( + (kernel_size[1] - 1) * dilation[1]) // 2) + + +class DiscriminatorSTFT(nn.Module): + """STFT sub-discriminator. + Args: + filters (int): Number of filters in convolutions + in_channels (int): Number of input channels. Default: 1 + out_channels (int): Number of output channels. Default: 1 + n_fft (int): Size of FFT for each scale. Default: 1024 + hop_length (int): Length of hop between STFT windows for each scale. Default: 256 + kernel_size (tuple of int): Inner Conv2d kernel sizes. Default: ``(3, 9)`` + stride (tuple of int): Inner Conv2d strides. Default: ``(1, 2)`` + dilations (list of int): Inner Conv2d dilation on the time dimension. Default: ``[1, 2, 4]`` + win_length (int): Window size for each scale. Default: 1024 + normalized (bool): Whether to normalize by magnitude after stft. Default: True + norm (str): Normalization method. Default: `'weight_norm'` + activation (str): Activation function. Default: `'LeakyReLU'` + activation_params (dict): Parameters to provide to the activation function. + growth (int): Growth factor for the filters. Default: 1 + """ + + def __init__(self, + filters: int, + in_channels: int=1, + out_channels: int=1, + n_fft: int=1024, + hop_length: int=256, + win_length: int=1024, + max_filters: int=1024, + filters_scale: int=1, + kernel_size: tp.Tuple[int, int]=(3, 9), + dilations: tp.List=[1, 2, 4], + stride: tp.Tuple[int, int]=(1, 2), + normalized: bool=True, + norm: str='weight_norm', + activation: str='LeakyReLU', + activation_params: dict={'negative_slope': 0.2}): + super().__init__() + assert len(kernel_size) == 2 + assert len(stride) == 2 + self.filters = filters + self.in_channels = in_channels + self.out_channels = out_channels + self.n_fft = n_fft + self.hop_length = hop_length + self.win_length = win_length + self.normalized = normalized + self.activation = getattr(torch.nn, activation)(**activation_params) + self.spec_transform = torchaudio.transforms.Spectrogram( + n_fft=self.n_fft, + hop_length=self.hop_length, + win_length=self.win_length, + window_fn=torch.hann_window, + normalized=self.normalized, + center=False, + pad_mode=None, + power=None) + spec_channels = 2 * self.in_channels + self.convs = nn.ModuleList() + self.convs.append( + NormConv2d( + spec_channels, + self.filters, + kernel_size=kernel_size, + padding=get_2d_padding(kernel_size))) + in_chs = min(filters_scale * self.filters, max_filters) + for i, dilation in enumerate(dilations): + out_chs = min((filters_scale**(i + 1)) * self.filters, max_filters) + self.convs.append( + NormConv2d( + in_chs, + out_chs, + kernel_size=kernel_size, + stride=stride, + dilation=(dilation, 1), + padding=get_2d_padding(kernel_size, (dilation, 1)), + norm=norm)) + in_chs = out_chs + out_chs = min((filters_scale**(len(dilations) + 1)) * self.filters, + max_filters) + self.convs.append( + NormConv2d( + in_chs, + out_chs, + kernel_size=(kernel_size[0], kernel_size[0]), + padding=get_2d_padding((kernel_size[0], kernel_size[0])), + norm=norm)) + self.conv_post = NormConv2d( + out_chs, + self.out_channels, + kernel_size=(kernel_size[0], kernel_size[0]), + padding=get_2d_padding((kernel_size[0], kernel_size[0])), + norm=norm) + + def forward(self, x: torch.Tensor): + fmap = [] + # print('x ', x.shape) + z = self.spec_transform(x) # [B, 2, Freq, Frames, 2] + # print('z ', z.shape) + z = torch.cat([z.real, z.imag], dim=1) + # print('cat_z ', z.shape) + z = rearrange(z, 'b c w t -> b c t w') + for i, layer in enumerate(self.convs): + z = layer(z) + z = self.activation(z) + # print('z i', i, z.shape) + fmap.append(z) + z = self.conv_post(z) + # print('logit ', z.shape) + return z, fmap + + +class MultiScaleSTFTDiscriminator(nn.Module): + """Multi-Scale STFT (MS-STFT) discriminator. + Args: + filters (int): Number of filters in convolutions + in_channels (int): Number of input channels. Default: 1 + out_channels (int): Number of output channels. Default: 1 + n_ffts (Sequence[int]): Size of FFT for each scale + hop_lengths (Sequence[int]): Length of hop between STFT windows for each scale + win_lengths (Sequence[int]): Window size for each scale + **kwargs: additional args for STFTDiscriminator + """ + + def __init__(self, + filters: int, + in_channels: int=1, + out_channels: int=1, + n_ffts: tp.List[int]=[1024, 2048, 512, 256, 128], + hop_lengths: tp.List[int]=[256, 512, 128, 64, 32], + win_lengths: tp.List[int]=[1024, 2048, 512, 256, 128], + **kwargs): + super().__init__() + assert len(n_ffts) == len(hop_lengths) == len(win_lengths) + self.discriminators = nn.ModuleList([ + DiscriminatorSTFT( + filters, + in_channels=in_channels, + out_channels=out_channels, + n_fft=n_ffts[i], + win_length=win_lengths[i], + hop_length=hop_lengths[i], + **kwargs) for i in range(len(n_ffts)) + ]) + self.num_discriminators = len(self.discriminators) + + def forward(self, x: torch.Tensor) -> DiscriminatorOutput: + logits = [] + fmaps = [] + for disc in self.discriminators: + logit, fmap = disc(x) + logits.append(logit) + fmaps.append(fmap) + return logits, fmaps + + +def test(): + disc = MultiScaleSTFTDiscriminator(filters=32) + y = torch.randn(1, 1, 24000) + y_hat = torch.randn(1, 1, 24000) + + y_disc_r, fmap_r = disc(y) + y_disc_gen, fmap_gen = disc(y_hat) + assert len(y_disc_r) == len(y_disc_gen) == len(fmap_r) == len( + fmap_gen) == disc.num_discriminators + + assert all([len(fm) == 5 for fm in fmap_r + fmap_gen]) + assert all( + [list(f.shape)[:2] == [1, 32] for fm in fmap_r + fmap_gen for f in fm]) + assert all([len(logits.shape) == 4 for logits in y_disc_r + y_disc_gen]) + + +if __name__ == '__main__': + test() diff --git a/third_party/AcademiCodec/academicodec/models/encodec/net3.py b/third_party/AcademiCodec/academicodec/models/encodec/net3.py new file mode 100644 index 0000000..e7495d9 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/net3.py @@ -0,0 +1,61 @@ +import math +import random + +import numpy as np +import torch.nn as nn +from academicodec.modules.seanet import SEANetDecoder +from academicodec.modules.seanet import SEANetEncoder +from academicodec.quantization import ResidualVectorQuantizer + + +# Generator +class SoundStream(nn.Module): + def __init__(self, + n_filters, + D, + target_bandwidths=[7.5, 15], + ratios=[8, 5, 4, 2], + sample_rate=24000, + bins=1024, + normalize=False): + super().__init__() + self.hop_length = np.prod(ratios) # 计算乘积 + self.encoder = SEANetEncoder( + n_filters=n_filters, dimension=D, ratios=ratios) + n_q = int(1000 * target_bandwidths[-1] // + (math.ceil(sample_rate / self.hop_length) * 10)) + self.frame_rate = math.ceil(sample_rate / np.prod(ratios)) # 75 + self.bits_per_codebook = int(math.log2(bins)) + self.target_bandwidths = target_bandwidths + self.quantizer = ResidualVectorQuantizer( + dimension=D, n_q=n_q, bins=bins) + self.decoder = SEANetDecoder( + n_filters=n_filters, dimension=D, ratios=ratios) + + def get_last_layer(self): + return self.decoder.layers[-1].weight + + def forward(self, x): + e = self.encoder(x) + max_idx = len(self.target_bandwidths) - 1 + bw = self.target_bandwidths[random.randint(0, max_idx)] + quantized, codes, bandwidth, commit_loss = self.quantizer( + e, self.frame_rate, bw) + o = self.decoder(quantized) + return o, commit_loss, None + + def encode(self, x, target_bw=None, st=None): + e = self.encoder(x) + if target_bw is None: + bw = self.target_bandwidths[-1] + else: + bw = target_bw + if st is None: + st = 0 + codes = self.quantizer.encode(e, self.frame_rate, bw, st) + return codes + + def decode(self, codes): + quantized = self.quantizer.decode(codes) + o = self.decoder(quantized) + return o diff --git a/third_party/AcademiCodec/academicodec/models/encodec/test.py b/third_party/AcademiCodec/academicodec/models/encodec/test.py new file mode 100644 index 0000000..04d0bd8 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/encodec/test.py @@ -0,0 +1,193 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Command-line for audio compression.""" +import argparse +import os +import sys +import typing as tp +from collections import OrderedDict +from pathlib import Path + +import librosa +import soundfile as sf +import torch +from academicodec.models.encodec.net3 import SoundStream + + +def save_audio(wav: torch.Tensor, + path: tp.Union[Path, str], + sample_rate: int, + rescale: bool=False): + limit = 0.99 + mx = wav.abs().max() + if rescale: + wav = wav * min(limit / mx, 1) + else: + wav = wav.clamp(-limit, limit) + wav = wav.squeeze().cpu().numpy() + sf.write(path, wav, sample_rate) + + +def get_parser(): + parser = argparse.ArgumentParser( + 'encodec', + description='High fidelity neural audio codec. ' + 'If input is a .ecdc, decompresses it. ' + 'If input is .wav, compresses it. If output is also wav, ' + 'do a compression/decompression cycle.') + parser.add_argument( + '--input', + type=Path, + help='Input file, whatever is supported by torchaudio on your system.') + parser.add_argument( + '--output', + type=Path, + nargs='?', + help='Output file, otherwise inferred from input file.') + parser.add_argument( + '--resume_path', type=str, default='resume_path', help='resume_path') + parser.add_argument( + '--sr', type=int, default=16000, help='sample rate of model') + parser.add_argument( + '-r', + '--rescale', + action='store_true', + help='Automatically rescale the output to avoid clipping.') + parser.add_argument( + '--ratios', + type=int, + nargs='+', + # probs(ratios) = hop_size + default=[8, 5, 4, 2], + help='ratios of SoundStream, shoud be set for different hop_size (32d, 320, 240d, ...)' + ) + parser.add_argument( + '--target_bandwidths', + type=float, + nargs='+', + # default for 16k_320d + default=[1, 1.5, 2, 4, 6, 12], + help='target_bandwidths of net3.py') + parser.add_argument( + '--target_bw', + type=float, + # default for 16k_320d + default=12, + help='target_bw of net3.py') + + return parser + + +def fatal(*args): + print(*args, file=sys.stderr) + sys.exit(1) + + +# 这只是打印了但是没有真的 clip +def check_clipping(wav, rescale): + if rescale: + return + mx = wav.abs().max() + limit = 0.99 + if mx > limit: + print( + f"Clipping!! max scale {mx}, limit is {limit}. " + "To avoid clipping, use the `-r` option to rescale the output.", + file=sys.stderr) + + +def test_one(args, wav_root, store_root, rescale, soundstream): + # torchaudio.load 的采样率为原始音频的采样率,不会自动下采样 + # wav, sr = torchaudio.load(wav_root) + # # 取单声道, output shape [1, T] + # wav = wav[0].unsqueeze(0) + # # 重采样为模型的采样率 + # wav = torchaudio.transforms.Resample(orig_freq=sr, new_freq=args.sr)(wav) + + # load wav with librosa + wav, sr = librosa.load(wav_root, sr=args.sr) + wav = torch.tensor(wav).unsqueeze(0) + + # add batch axis + wav = wav.unsqueeze(1).cuda() + + # compressing + compressed = soundstream.encode(wav, target_bw=args.target_bw) + print('finish compressing') + out = soundstream.decode(compressed) + out = out.detach().cpu().squeeze(0) + check_clipping(out, rescale) + save_audio(wav=out, path=store_root, sample_rate=args.sr, rescale=rescale) + print('finish decompressing') + + +def remove_encodec_weight_norm(model): + from academicodec.modules import SConv1d + from academicodec.modules.seanet import SConvTranspose1d + from academicodec.modules.seanet import SEANetResnetBlock + from torch.nn.utils import remove_weight_norm + + encoder = model.encoder.model + for key in encoder._modules: + if isinstance(encoder._modules[key], SEANetResnetBlock): + remove_weight_norm(encoder._modules[key].shortcut.conv.conv) + block_modules = encoder._modules[key].block._modules + for skey in block_modules: + if isinstance(block_modules[skey], SConv1d): + remove_weight_norm(block_modules[skey].conv.conv) + elif isinstance(encoder._modules[key], SConv1d): + remove_weight_norm(encoder._modules[key].conv.conv) + + decoder = model.decoder.model + for key in decoder._modules: + if isinstance(decoder._modules[key], SEANetResnetBlock): + remove_weight_norm(decoder._modules[key].shortcut.conv.conv) + block_modules = decoder._modules[key].block._modules + for skey in block_modules: + if isinstance(block_modules[skey], SConv1d): + remove_weight_norm(block_modules[skey].conv.conv) + elif isinstance(decoder._modules[key], SConvTranspose1d): + remove_weight_norm(decoder._modules[key].convtr.convtr) + elif isinstance(decoder._modules[key], SConv1d): + remove_weight_norm(decoder._modules[key].conv.conv) + + +def test_batch(): + args = get_parser().parse_args() + print("args.target_bandwidths:", args.target_bandwidths) + if not args.input.exists(): + fatal(f"Input file {args.input} does not exist.") + input_lists = os.listdir(args.input) + input_lists.sort() + soundstream = SoundStream( + n_filters=32, + D=512, + ratios=args.ratios, + sample_rate=args.sr, + target_bandwidths=args.target_bandwidths) + parameter_dict = torch.load(args.resume_path) + new_state_dict = OrderedDict() + # k 为 module.xxx.weight, v 为权重 + for k, v in parameter_dict.items(): + # 截取`module.`后面的xxx.weight + name = k[7:] + new_state_dict[name] = v + soundstream.load_state_dict(new_state_dict) # load model + remove_encodec_weight_norm(soundstream) + soundstream.cuda() + soundstream.eval() + os.makedirs(args.output, exist_ok=True) + for audio in input_lists: + test_one( + args=args, + wav_root=os.path.join(args.input, audio), + store_root=os.path.join(args.output, audio), + rescale=args.rescale, + soundstream=soundstream) + + +if __name__ == '__main__': + test_batch() diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/__init__.py b/third_party/AcademiCodec/academicodec/models/hificodec/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/env.py b/third_party/AcademiCodec/academicodec/models/hificodec/env.py new file mode 100644 index 0000000..2bdbc95 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/env.py @@ -0,0 +1,15 @@ +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/meldataset.py b/third_party/AcademiCodec/academicodec/models/hificodec/meldataset.py new file mode 100644 index 0000000..cf01ceb --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/meldataset.py @@ -0,0 +1,222 @@ +# code based on https://github.com/b04901014/MQTTS +import math +import os +import random + +import librosa +import numpy as np +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn + + +def load_wav(full_path, sr): + wav, sr = librosa.load(full_path, sr=sr) + return wav, sr + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def mel_spectrogram(y, + n_fft, + num_mels, + sampling_rate, + hop_size, + win_size, + fmin, + fmax, + center=False): + if torch.min(y) < -1.: + print('min value is ', torch.min(y)) + if torch.max(y) > 1.: + print('max value is ', torch.max(y)) + + global mel_basis, hann_window + if fmax not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[str(fmax) + '_' + + str(y.device)] = torch.from_numpy(mel).float().to(y.device) + hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) + + y = torch.nn.functional.pad( + y.unsqueeze(1), (int((n_fft - hop_size) / 2), int( + (n_fft - hop_size) / 2)), + mode='reflect') + y = y.squeeze(1) + + spec = torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[str(y.device)], + center=center, + pad_mode='reflect', + normalized=False, + onesided=True) + + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + + spec = torch.matmul(mel_basis[str(fmax) + '_' + str(y.device)], spec) + spec = spectral_normalize_torch(spec) + + return spec + + +def get_dataset_filelist(a): + with open(a.input_training_file, 'r') as f: + training_files = [l.strip() for l in f] + with open(a.input_validation_file, 'r') as f: + validation_files = [l.strip() for l in f] + return training_files, validation_files + + +class MelDataset(torch.utils.data.Dataset): + def __init__(self, + training_files, + segment_size, + n_fft, + num_mels, + hop_size, + win_size, + sampling_rate, + fmin, + fmax, + split=True, + shuffle=True, + n_cache_reuse=1, + device=None, + fmax_loss=None, + fine_tuning=False, + base_mels_path=None): + self.audio_files = training_files + random.seed(1234) + if shuffle: + random.shuffle(self.audio_files) + self.segment_size = segment_size + self.sampling_rate = sampling_rate + self.split = split + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.fmax_loss = fmax_loss + self.cached_wav = None + self.n_cache_reuse = n_cache_reuse + self._cache_ref_count = 0 + self.device = device + self.fine_tuning = fine_tuning + self.base_mels_path = base_mels_path + + def __getitem__(self, index): + filename = self.audio_files[index] + if self._cache_ref_count == 0: + try: + # Note by yuantian: load with the sample_rate of config + audio, sampling_rate = load_wav(filename, sr=self.sampling_rate) + except Exception as e: + print(f"Error on audio: {filename}") + audio = np.random.normal(size=(160000, )) * 0.05 + sampling_rate = self.sampling_rate + self.cached_wav = audio + if sampling_rate != self.sampling_rate: + raise ValueError("{} SR doesn't match target {} SR".format( + sampling_rate, self.sampling_rate)) + self._cache_ref_count = self.n_cache_reuse + else: + audio = self.cached_wav + self._cache_ref_count -= 1 + + audio = torch.FloatTensor(audio) + audio = audio.unsqueeze(0) + + if not self.fine_tuning: + if self.split: + if audio.size(1) >= self.segment_size: + max_audio_start = audio.size(1) - self.segment_size + audio_start = random.randint(0, max_audio_start) + audio = audio[:, audio_start:audio_start + + self.segment_size] + else: + audio = torch.nn.functional.pad(audio, ( + 0, self.segment_size - audio.size(1)), 'constant') + + mel = mel_spectrogram( + audio, + self.n_fft, + self.num_mels, + self.sampling_rate, + self.hop_size, + self.win_size, + self.fmin, + self.fmax, + center=False) + else: + mel = np.load( + os.path.join(self.base_mels_path, + os.path.splitext(os.path.split(filename)[-1])[0] + + '.npy')) + mel = torch.from_numpy(mel) + + if len(mel.shape) < 3: + mel = mel.unsqueeze(0) + + if self.split: + frames_per_seg = math.ceil(self.segment_size / self.hop_size) + + if audio.size(1) >= self.segment_size: + mel_start = random.randint(0, + mel.size(2) - frames_per_seg - 1) + mel = mel[:, :, mel_start:mel_start + frames_per_seg] + audio = audio[:, mel_start * self.hop_size:( + mel_start + frames_per_seg) * self.hop_size] + else: + mel = torch.nn.functional.pad(mel, ( + 0, frames_per_seg - mel.size(2)), 'constant') + audio = torch.nn.functional.pad(audio, ( + 0, self.segment_size - audio.size(1)), 'constant') + + mel_loss = mel_spectrogram( + audio, + self.n_fft, + self.num_mels, + self.sampling_rate, + self.hop_size, + self.win_size, + self.fmin, + self.fmax_loss, + center=False) + + return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) + + def __len__(self): + return len(self.audio_files) diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/models.py b/third_party/AcademiCodec/academicodec/models/hificodec/models.py new file mode 100644 index 0000000..561c063 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/models.py @@ -0,0 +1,535 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import AvgPool1d +from torch.nn import Conv1d +from torch.nn import Conv2d +from torch.nn import ConvTranspose1d +from torch.nn.utils import remove_weight_norm +from torch.nn.utils import spectral_norm +from torch.nn.utils import weight_norm + +from academicodec.utils import get_padding +from academicodec.utils import init_weights + +LRELU_SLOPE = 0.1 + + +class ResBlock1(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): + super(ResBlock1, self).__init__() + self.h = h + self.convs1 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]))) + ]) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))), weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1))) + ]) + self.convs2.apply(init_weights) + + def forward(self, x): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): + super(ResBlock2, self).__init__() + self.h = h + self.convs = nn.ModuleList([ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]))), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]))) + ]) + self.convs.apply(init_weights) + + def forward(self, x): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Generator(torch.nn.Module): + def __init__(self, h): + super(Generator, self).__init__() + self.h = h + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + self.conv_pre = weight_norm( + Conv1d(512, h.upsample_initial_channel, 7, 1, padding=3)) + resblock = ResBlock1 if h.resblock == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, + k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2**(i + 1)), + k, + u, + # padding=(u//2 + u%2), + padding=(k - u) // 2, + # output_padding=u%2 + ))) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2**(i + 1)) + for j, (k, d) in enumerate( + zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock(h, ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x): + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, + use_spectral_norm=False): + super(DiscriminatorP, self).__init__() + self.period = period + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.convs = nn.ModuleList([ + norm_f( + Conv2d( + 1, + 32, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0))), + norm_f( + Conv2d( + 32, + 128, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0))), + norm_f( + Conv2d( + 128, + 512, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0))), + norm_f( + Conv2d( + 512, + 1024, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), + ]) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self): + super(MultiPeriodDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorP(2), + DiscriminatorP(3), + DiscriminatorP(5), + DiscriminatorP(7), + DiscriminatorP(11), + ]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super(DiscriminatorS, self).__init__() + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.convs = nn.ModuleList([ + norm_f(Conv1d(1, 128, 15, 1, padding=7)), + norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), + norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), + norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ]) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiScaleDiscriminator(torch.nn.Module): + def __init__(self): + super(MultiScaleDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorS(use_spectral_norm=True), + DiscriminatorS(), + DiscriminatorS(), + ]) + self.meanpools = nn.ModuleList( + [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + if i != 0: + y = self.meanpools[i - 1](y) + y_hat = self.meanpools[i - 1](y_hat) + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + r_loss = torch.mean((1 - dr)**2) + g_loss = torch.mean(dg**2) + loss += (r_loss + g_loss) + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +def generator_loss(disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + l = torch.mean((1 - dg)**2) + gen_losses.append(l) + loss += l + + return loss, gen_losses + + +class Encoder(torch.nn.Module): + def __init__(self, h): + super(Encoder, self).__init__() + self.h = h + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + self.conv_pre = weight_norm(Conv1d(1, 32, 7, 1, padding=3)) + self.normalize = nn.ModuleList() + resblock = ResBlock1 if h.resblock == '1' else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate( + list( + reversed( + list(zip(h.upsample_rates, h.upsample_kernel_sizes))))): + self.ups.append( + weight_norm( + Conv1d( + 32 * (2**i), + 32 * (2**(i + 1)), + k, + u, + padding=((k - u) // 2) + # padding=(u//2 + u%2) + ))) + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = 32 * (2**(i + 1)) + for j, (k, d) in enumerate( + zip( + list(reversed(h.resblock_kernel_sizes)), + list(reversed(h.resblock_dilation_sizes)))): + self.resblocks.append(resblock(h, ch, k, d)) + self.normalize.append( + torch.nn.GroupNorm(ch // 16, ch, eps=1e-6, affine=True)) + self.conv_post = Conv1d(512, 512, 3, 1, padding=1) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x): + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + xs = self.normalize[i * self.num_kernels + j](xs) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + xs = self.normalize[i * self.num_kernels + j](xs) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + return x + + def remove_weight_norm(self): + print('Removing weight norm...') + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + + +class Quantizer_module(torch.nn.Module): + def __init__(self, n_e, e_dim): + super(Quantizer_module, self).__init__() + self.embedding = nn.Embedding(n_e, e_dim) + self.embedding.weight.data.uniform_(-1.0 / n_e, 1.0 / n_e) + + def forward(self, x): + # compute Euclidean distance + d = torch.sum(x ** 2, 1, keepdim=True) + torch.sum(self.embedding.weight ** 2, 1) \ + - 2 * torch.matmul(x, self.embedding.weight.T) + min_indicies = torch.argmin(d, 1) + z_q = self.embedding(min_indicies) + return z_q, min_indicies + + +class Quantizer(torch.nn.Module): + def __init__(self, h): + super(Quantizer, self).__init__() + assert 512 % h.n_code_groups == 0 + self.quantizer_modules = nn.ModuleList([ + Quantizer_module(h.n_codes, 512 // h.n_code_groups) + for _ in range(h.n_code_groups) + ]) + self.quantizer_modules2 = nn.ModuleList([ + Quantizer_module(h.n_codes, 512 // h.n_code_groups) + for _ in range(h.n_code_groups) + ]) + self.h = h + self.codebook_loss_lambda = self.h.codebook_loss_lambda # e.g., 1 + self.commitment_loss_lambda = self.h.commitment_loss_lambda # e.g., 0.25 + self.residul_layer = 2 + self.n_code_groups = h.n_code_groups + + def for_one_step(self, xin, idx): + xin = xin.transpose(1, 2) + x = xin.reshape(-1, 512) + x = torch.split(x, 512 // self.h.n_code_groups, dim=-1) + min_indicies = [] + z_q = [] + if idx == 0: + for _x, m in zip(x, self.quantizer_modules): + _z_q, _min_indicies = m(_x) + z_q.append(_z_q) + min_indicies.append(_min_indicies) #B * T, + z_q = torch.cat(z_q, -1).reshape(xin.shape) + # loss = 0.25 * torch.mean((z_q.detach() - xin) ** 2) + torch.mean((z_q - xin.detach()) ** 2) + loss = self.codebook_loss_lambda * torch.mean((z_q - xin.detach()) ** 2) \ + + self.commitment_loss_lambda * torch.mean((z_q.detach() - xin) ** 2) + z_q = xin + (z_q - xin).detach() + z_q = z_q.transpose(1, 2) + return z_q, loss, min_indicies + else: + for _x, m in zip(x, self.quantizer_modules2): + _z_q, _min_indicies = m(_x) + z_q.append(_z_q) + min_indicies.append(_min_indicies) #B * T, + z_q = torch.cat(z_q, -1).reshape(xin.shape) + # loss = 0.25 * torch.mean((z_q.detach() - xin) ** 2) + torch.mean((z_q - xin.detach()) ** 2) + loss = self.codebook_loss_lambda * torch.mean((z_q - xin.detach()) ** 2) \ + + self.commitment_loss_lambda * torch.mean((z_q.detach() - xin) ** 2) + z_q = xin + (z_q - xin).detach() + z_q = z_q.transpose(1, 2) + return z_q, loss, min_indicies + + def forward(self, xin): + #B, C, T + quantized_out = 0.0 + residual = xin + all_losses = [] + all_indices = [] + for i in range(self.residul_layer): + quantized, loss, indices = self.for_one_step(residual, i) # + residual = residual - quantized + quantized_out = quantized_out + quantized + all_indices.extend(indices) # + all_losses.append(loss) + all_losses = torch.stack(all_losses) + loss = torch.mean(all_losses) + return quantized_out, loss, all_indices + + def embed(self, x): + #idx: N, T, 4 + #print('x ', x.shape) + quantized_out = torch.tensor(0.0, device=x.device) + x = torch.split(x, 1, 2) # split, 将最后一个维度分开, 每个属于一个index group + #print('x.shape ', len(x),x[0].shape) + for i in range(self.residul_layer): + ret = [] + if i == 0: + for j in range(self.n_code_groups): + q = x[j] + embed = self.quantizer_modules[j] + q = embed.embedding(q.squeeze(-1)) + ret.append(q) + ret = torch.cat(ret, -1) + #print(ret.shape) + quantized_out = quantized_out + ret + else: + for j in range(self.n_code_groups): + q = x[j + self.n_code_groups] + embed = self.quantizer_modules2[j] + q = embed.embedding(q.squeeze(-1)) + ret.append(q) + ret = torch.cat(ret, -1) + quantized_out = quantized_out + ret + return quantized_out.transpose(1, 2) #N, C, T diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/train.py b/third_party/AcademiCodec/academicodec/models/hificodec/train.py new file mode 100644 index 0000000..77b272c --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/train.py @@ -0,0 +1,445 @@ +import warnings +warnings.simplefilter(action='ignore', category=FutureWarning) +import itertools +import os +import time +import argparse +import json +import torch +import torch.nn.functional as F +from torchaudio.transforms import MelSpectrogram +from torch.utils.tensorboard import SummaryWriter +from torch.utils.data import DistributedSampler, DataLoader +import torch.multiprocessing as mp +from torch.distributed import init_process_group +from torch.nn.parallel import DistributedDataParallel + +from academicodec.models.hificodec.env import AttrDict, build_env +from academicodec.models.hificodec.meldataset import MelDataset, mel_spectrogram, get_dataset_filelist +from academicodec.models.encodec.msstftd import MultiScaleSTFTDiscriminator +from academicodec.models.hificodec.models import Generator +from academicodec.models.hificodec.models import MultiPeriodDiscriminator +from academicodec.models.hificodec.models import MultiScaleDiscriminator +from academicodec.models.hificodec.models import feature_loss +from academicodec.models.hificodec.models import generator_loss +from academicodec.models.hificodec.models import discriminator_loss +from academicodec.models.hificodec.models import Encoder +from academicodec.models.hificodec.models import Quantizer +from academicodec.utils import plot_spectrogram +from academicodec.utils import scan_checkpoint +from academicodec.utils import load_checkpoint +from academicodec.utils import save_checkpoint + +torch.backends.cudnn.benchmark = True + + +def reconstruction_loss(x, G_x, device, eps=1e-7): + L = 100 * F.mse_loss(x, G_x) # wav L1 loss + for i in range(6, 11): + s = 2**i + melspec = MelSpectrogram( + sample_rate=24000, + n_fft=s, + hop_length=s // 4, + n_mels=64, + wkwargs={"device": device}).to(device) + # 64, 16, 64 + # 128, 32, 128 + # 256, 64, 256 + # 512, 128, 512 + # 1024, 256, 1024 + S_x = melspec(x) + S_G_x = melspec(G_x) + loss = ((S_x - S_G_x).abs().mean() + ( + ((torch.log(S_x.abs() + eps) - torch.log(S_G_x.abs() + eps))**2 + ).mean(dim=-2)**0.5).mean()) / (i) + L += loss + #print('i ,loss ', i, loss) + #assert 1==2 + return L + + +def train(rank, a, h): + torch.cuda.set_device(rank) + if h.num_gpus > 1: + init_process_group( + backend=h.dist_config['dist_backend'], + init_method=h.dist_config['dist_url'], + world_size=h.dist_config['world_size'] * h.num_gpus, + rank=rank) + + torch.cuda.manual_seed(h.seed) + device = torch.device('cuda:{:d}'.format(rank)) + + encoder = Encoder(h).to(device) + generator = Generator(h).to(device) + quantizer = Quantizer(h).to(device) + mpd = MultiPeriodDiscriminator().to(device) + msd = MultiScaleDiscriminator().to(device) + mstftd = MultiScaleSTFTDiscriminator(32).to(device) + if rank == 0: + print(encoder) + print(quantizer) + print(generator) + os.makedirs(a.checkpoint_path, exist_ok=True) + print("checkpoints directory : ", a.checkpoint_path) + + if os.path.isdir(a.checkpoint_path): + cp_g = scan_checkpoint(a.checkpoint_path, 'g_') + cp_do = scan_checkpoint(a.checkpoint_path, 'do_') + + steps = 0 + if cp_g is None or cp_do is None: + state_dict_do = None + last_epoch = -1 + else: + state_dict_g = load_checkpoint(cp_g, device) + state_dict_do = load_checkpoint(cp_do, device) + generator.load_state_dict(state_dict_g['generator']) + encoder.load_state_dict(state_dict_g['encoder']) + quantizer.load_state_dict(state_dict_g['quantizer']) + mpd.load_state_dict(state_dict_do['mpd']) + msd.load_state_dict(state_dict_do['msd']) + mstftd.load_state_dict(state_dict_do['mstftd']) + steps = state_dict_do['steps'] + 1 + last_epoch = state_dict_do['epoch'] + + if h.num_gpus > 1: + generator = DistributedDataParallel( + generator, device_ids=[rank]).to(device) + encoder = DistributedDataParallel(encoder, device_ids=[rank]).to(device) + quantizer = DistributedDataParallel( + quantizer, device_ids=[rank]).to(device) + mpd = DistributedDataParallel(mpd, device_ids=[rank]).to(device) + msd = DistributedDataParallel(msd, device_ids=[rank]).to(device) + mstftd = DistributedDataParallel(mstftd, device_ids=[rank]).to(device) + + optim_g = torch.optim.Adam( + itertools.chain(generator.parameters(), + encoder.parameters(), quantizer.parameters()), + h.learning_rate, + betas=[h.adam_b1, h.adam_b2]) + optim_d = torch.optim.Adam( + itertools.chain(msd.parameters(), mpd.parameters(), + mstftd.parameters()), + h.learning_rate, + betas=[h.adam_b1, h.adam_b2]) + if state_dict_do is not None: + optim_g.load_state_dict(state_dict_do['optim_g']) + optim_d.load_state_dict(state_dict_do['optim_d']) + + scheduler_g = torch.optim.lr_scheduler.ExponentialLR( + optim_g, gamma=h.lr_decay, last_epoch=last_epoch) + scheduler_d = torch.optim.lr_scheduler.ExponentialLR( + optim_d, gamma=h.lr_decay, last_epoch=last_epoch) + + training_filelist, validation_filelist = get_dataset_filelist(a) + + trainset = MelDataset( + training_filelist, + h.segment_size, + h.n_fft, + h.num_mels, + h.hop_size, + h.win_size, + h.sampling_rate, + h.fmin, + h.fmax, + n_cache_reuse=0, + shuffle=False if h.num_gpus > 1 else True, + fmax_loss=h.fmax_for_loss, + device=device, + fine_tuning=a.fine_tuning, + base_mels_path=a.input_mels_dir) + + train_sampler = DistributedSampler(trainset) if h.num_gpus > 1 else None + + train_loader = DataLoader( + trainset, + num_workers=h.num_workers, + shuffle=False, + sampler=train_sampler, + batch_size=h.batch_size, + pin_memory=True, + drop_last=True) + + if rank == 0: + validset = MelDataset( + validation_filelist, + h.segment_size, + h.n_fft, + h.num_mels, + h.hop_size, + h.win_size, + h.sampling_rate, + h.fmin, + h.fmax, + False, + False, + n_cache_reuse=0, + fmax_loss=h.fmax_for_loss, + device=device, + fine_tuning=a.fine_tuning, + base_mels_path=a.input_mels_dir) + validation_loader = DataLoader( + validset, + num_workers=1, + shuffle=False, + sampler=None, + batch_size=1, + pin_memory=True, + drop_last=True) + sw = SummaryWriter(os.path.join(a.checkpoint_path, 'logs')) + plot_gt_once = False + generator.train() + encoder.train() + quantizer.train() + mpd.train() + msd.train() + for epoch in range(max(0, last_epoch), a.training_epochs): + if rank == 0: + start = time.time() + print("Epoch: {}".format(epoch + 1)) + if h.num_gpus > 1: + train_sampler.set_epoch(epoch) + for i, batch in enumerate(train_loader): + if rank == 0: + start_b = time.time() + x, y, _, y_mel = batch + x = torch.autograd.Variable(x.to(device, non_blocking=True)) + y = torch.autograd.Variable(y.to(device, non_blocking=True)) + y_mel = torch.autograd.Variable(y_mel.to(device, non_blocking=True)) + y = y.unsqueeze(1) + + c = encoder(y) + # print("c.shape: ", c.shape) + q, loss_q, c = quantizer(c) + # print("q.shape: ", q.shape) + y_g_hat = generator(q) + y_g_hat_mel = mel_spectrogram( + y_g_hat.squeeze(1), h.n_fft, h.num_mels, h.sampling_rate, + h.hop_size, h.win_size, h.fmin, + h.fmax_for_loss) # 1024, 80, 24000, 240,1024 + y_r_mel_1 = mel_spectrogram( + y.squeeze(1), 512, h.num_mels, h.sampling_rate, 120, 512, + h.fmin, h.fmax_for_loss) + y_g_mel_1 = mel_spectrogram( + y_g_hat.squeeze(1), 512, h.num_mels, h.sampling_rate, 120, 512, + h.fmin, h.fmax_for_loss) + y_r_mel_2 = mel_spectrogram( + y.squeeze(1), 256, h.num_mels, h.sampling_rate, 60, 256, h.fmin, + h.fmax_for_loss) + y_g_mel_2 = mel_spectrogram( + y_g_hat.squeeze(1), 256, h.num_mels, h.sampling_rate, 60, 256, + h.fmin, h.fmax_for_loss) + y_r_mel_3 = mel_spectrogram( + y.squeeze(1), 128, h.num_mels, h.sampling_rate, 30, 128, h.fmin, + h.fmax_for_loss) + y_g_mel_3 = mel_spectrogram( + y_g_hat.squeeze(1), 128, h.num_mels, h.sampling_rate, 30, 128, + h.fmin, h.fmax_for_loss) + # print("x.shape: ", x.shape) + # print("y.shape: ", y.shape) + # print("y_g_hat.shape: ", y_g_hat.shape) + optim_d.zero_grad() + + # MPD + y_df_hat_r, y_df_hat_g, _, _ = mpd(y, y_g_hat.detach()) + loss_disc_f, losses_disc_f_r, losses_disc_f_g = discriminator_loss( + y_df_hat_r, y_df_hat_g) + + # MSD + y_ds_hat_r, y_ds_hat_g, _, _ = msd(y, y_g_hat.detach()) + loss_disc_s, losses_disc_s_r, losses_disc_s_g = discriminator_loss( + y_ds_hat_r, y_ds_hat_g) + + y_disc_r, fmap_r = mstftd(y) + y_disc_gen, fmap_gen = mstftd(y_g_hat.detach()) + loss_disc_stft, losses_disc_stft_r, losses_disc_stft_g = discriminator_loss( + y_disc_r, y_disc_gen) + loss_disc_all = loss_disc_s + loss_disc_f + loss_disc_stft + + loss_disc_all.backward() + optim_d.step() + + # Generator + optim_g.zero_grad() + + # L1 Mel-Spectrogram Loss + loss_mel1 = F.l1_loss(y_r_mel_1, y_g_mel_1) + loss_mel2 = F.l1_loss(y_r_mel_2, y_g_mel_2) + loss_mel3 = F.l1_loss(y_r_mel_3, y_g_mel_3) + #print('loss_mel1, loss_mel2 ', loss_mel1, loss_mel2) + loss_mel = F.l1_loss(y_mel, + y_g_hat_mel) * 45 + loss_mel1 + loss_mel2 + # print('loss_mel ', loss_mel) + # assert 1==2 + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd(y, y_g_hat) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd(y, y_g_hat) + y_stftd_hat_r, fmap_stftd_r = mstftd(y) + y_stftd_hat_g, fmap_stftd_g = mstftd(y_g_hat) + loss_fm_f = feature_loss(fmap_f_r, fmap_f_g) + loss_fm_s = feature_loss(fmap_s_r, fmap_s_g) + loss_fm_stft = feature_loss(fmap_stftd_r, fmap_stftd_g) + loss_gen_f, losses_gen_f = generator_loss(y_df_hat_g) + loss_gen_s, losses_gen_s = generator_loss(y_ds_hat_g) + loss_gen_stft, losses_gen_stft = generator_loss(y_stftd_hat_g) + loss_gen_all = loss_gen_s + loss_gen_f + loss_gen_stft + loss_fm_s + loss_fm_f + loss_fm_stft + loss_mel + loss_q * 10 + loss_gen_all.backward() + optim_g.step() + if rank == 0: + # STDOUT logging + if steps % a.stdout_interval == 0: + with torch.no_grad(): + mel_error = F.l1_loss(y_mel, y_g_hat_mel).item() + print( + 'Steps : {:d}, Gen Loss Total : {:4.3f}, Loss Q : {:4.3f}, Mel-Spec. Error : {:4.3f}, s/b : {:4.3f}'. + format(steps, loss_gen_all, loss_q, mel_error, + time.time() - start_b)) + # checkpointing + if steps % a.checkpoint_interval == 0 and steps != 0: + checkpoint_path = "{}/g_{:08d}".format(a.checkpoint_path, + steps) + save_checkpoint( + checkpoint_path, { + 'generator': (generator.module if h.num_gpus > 1 + else generator).state_dict(), + 'encoder': (encoder.module if h.num_gpus > 1 else + encoder).state_dict(), + 'quantizer': (quantizer.module if h.num_gpus > 1 + else quantizer).state_dict() + }, + num_ckpt_keep=a.num_ckpt_keep) + checkpoint_path = "{}/do_{:08d}".format(a.checkpoint_path, + steps) + save_checkpoint( + checkpoint_path, { + 'mpd': (mpd.module + if h.num_gpus > 1 else mpd).state_dict(), + 'msd': (msd.module + if h.num_gpus > 1 else msd).state_dict(), + 'mstftd': (mstftd.module + if h.num_gpus > 1 else msd).state_dict(), + 'optim_g': + optim_g.state_dict(), + 'optim_d': + optim_d.state_dict(), + 'steps': + steps, + 'epoch': + epoch + }, + num_ckpt_keep=a.num_ckpt_keep) + # Tensorboard summary logging + if steps % a.summary_interval == 0: + sw.add_scalar("training/gen_loss_total", loss_gen_all, + steps) + sw.add_scalar("training/mel_spec_error", mel_error, steps) + + # Validation + if steps % a.validation_interval == 0 and steps != 0: + generator.eval() + encoder.eval() + quantizer.eval() + torch.cuda.empty_cache() + val_err_tot = 0 + with torch.no_grad(): + for j, batch in enumerate(validation_loader): + x, y, _, y_mel = batch + c = encoder(y.to(device).unsqueeze(1)) + q, loss_q, c = quantizer(c) + y_g_hat = generator(q) + y_mel = torch.autograd.Variable(y_mel.to(device)) + y_g_hat_mel = mel_spectrogram( + y_g_hat.squeeze(1), h.n_fft, h.num_mels, + h.sampling_rate, h.hop_size, h.win_size, h.fmin, + h.fmax_for_loss) + i_size = min(y_mel.size(2), y_g_hat_mel.size(2)) + val_err_tot += F.l1_loss( + y_mel[:, :, :i_size], + y_g_hat_mel[:, :, :i_size]).item() + + if j <= 8: + # if steps == 0: + if not plot_gt_once: + sw.add_audio('gt/y_{}'.format(j), y[0], + steps, h.sampling_rate) + sw.add_figure('gt/y_spec_{}'.format(j), + plot_spectrogram(x[0]), steps) + + sw.add_audio('generated/y_hat_{}'.format(j), + y_g_hat[0], steps, h.sampling_rate) + y_hat_spec = mel_spectrogram( + y_g_hat.squeeze(1), h.n_fft, h.num_mels, + h.sampling_rate, h.hop_size, h.win_size, + h.fmin, h.fmax) + sw.add_figure( + 'generated/y_hat_spec_{}'.format(j), + plot_spectrogram( + y_hat_spec.squeeze(0).cpu().numpy()), + steps) + + val_err = val_err_tot / (j + 1) + sw.add_scalar("validation/mel_spec_error", val_err, + steps) + if not plot_gt_once: + plot_gt_once = True + + generator.train() + + steps += 1 + + scheduler_g.step() + scheduler_d.step() + + if rank == 0: + print('Time taken for epoch {} is {} sec\n'.format( + epoch + 1, int(time.time() - start))) + + +def main(): + print('Initializing Training Process..') + + parser = argparse.ArgumentParser() + + # parser.add_argument('--group_name', default=None) + # parser.add_argument('--input_wavs_dir', default='../datasets/audios') + parser.add_argument('--input_mels_dir', default=None) + parser.add_argument('--input_training_file', required=True) + parser.add_argument('--input_validation_file', required=True) + parser.add_argument('--checkpoint_path', default='checkpoints') + parser.add_argument('--config', default='') + parser.add_argument('--training_epochs', default=2000, type=int) + parser.add_argument('--stdout_interval', default=5, type=int) + parser.add_argument('--checkpoint_interval', default=5000, type=int) + parser.add_argument('--summary_interval', default=100, type=int) + parser.add_argument('--validation_interval', default=5000, type=int) + parser.add_argument('--num_ckpt_keep', default=5, type=int) + parser.add_argument('--fine_tuning', default=False, type=bool) + + a = parser.parse_args() + + with open(a.config) as f: + data = f.read() + + json_config = json.loads(data) + h = AttrDict(json_config) + build_env(a.config, 'config.json', a.checkpoint_path) + + torch.manual_seed(h.seed) + if torch.cuda.is_available(): + torch.cuda.manual_seed(h.seed) + h.num_gpus = torch.cuda.device_count() + h.batch_size = int(h.batch_size / h.num_gpus) + print('Batch size per GPU :', h.batch_size) + else: + pass + + if h.num_gpus > 1: + mp.spawn(train, nprocs=h.num_gpus, args=(a, h, )) + else: + train(0, a, h) + + +if __name__ == '__main__': + main() diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/vqvae.py b/third_party/AcademiCodec/academicodec/models/hificodec/vqvae.py new file mode 100644 index 0000000..d8e747e --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/vqvae.py @@ -0,0 +1,45 @@ +import json + +import torch +import torch.nn as nn + +from academicodec.models.hificodec.env import AttrDict +from academicodec.models.hificodec.models import Encoder +from academicodec.models.hificodec.models import Generator +from academicodec.models.hificodec.models import Quantizer + + +class VQVAE(nn.Module): + def __init__(self, + config_path, + ckpt_path, + with_encoder=False): + super(VQVAE, self).__init__() + ckpt = torch.load(ckpt_path) + with open(config_path) as f: + data = f.read() + json_config = json.loads(data) + self.h = AttrDict(json_config) + self.quantizer = Quantizer(self.h) + self.generator = Generator(self.h) + self.generator.load_state_dict(ckpt['generator']) + self.quantizer.load_state_dict(ckpt['quantizer']) + if with_encoder: + self.encoder = Encoder(self.h) + self.encoder.load_state_dict(ckpt['encoder']) + + def forward(self, x): + # x is the codebook + # x.shape (B, T, Nq) + quant_emb = self.quantizer.embed(x) + return self.generator(quant_emb) + + def encode(self, x): + batch_size = x.size(0) + if len(x.shape) == 3 and x.shape[-1] == 1: + x = x.squeeze(-1) + c = self.encoder(x.unsqueeze(1)) + q, loss_q, c = self.quantizer(c) + c = [code.reshape(batch_size, -1) for code in c] + # shape: [N, T, 4] + return torch.stack(c, -1) diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/vqvae_copy_syn.py b/third_party/AcademiCodec/academicodec/models/hificodec/vqvae_copy_syn.py new file mode 100644 index 0000000..d3e9687 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/vqvae_copy_syn.py @@ -0,0 +1,50 @@ +import argparse +import glob +import json +import os +from pathlib import Path + +import soundfile as sf +from tqdm import tqdm + +from academicodec.models.hificodec.vqvae_tester import VqvaeTester + +parser = argparse.ArgumentParser() + +#Path +parser.add_argument('--outputdir', type=str, required=True) +parser.add_argument('--model_path', type=str, required=True) +parser.add_argument('--input_wavdir', type=str, required=True) +parser.add_argument('--config_path', type=str, required=True) +parser.add_argument('--num_gens', type=int, default=1024) + +#Data +parser.add_argument('--sample_rate', type=int, default=24000) + +args = parser.parse_args() + +with open(args.config_path, 'r') as f: + argdict = json.load(f) + assert argdict['sampling_rate'] == args.sample_rate, \ + f"Sampling rate not consistent, stated {args.sample_rate}, but the model is trained on {argdict['sample_rate']}" + argdict.update(args.__dict__) + args.__dict__ = argdict + +if __name__ == '__main__': + Path(args.outputdir).mkdir(parents=True, exist_ok=True) + print("Init model and load weights") + model = VqvaeTester(config_path=args.config_path, model_path=args.model_path,sample_rate=args.sample_rate) + model.cuda() + model.vqvae.generator.remove_weight_norm() + model.vqvae.encoder.remove_weight_norm() + model.eval() + print("Model ready") + + wav_paths = glob.glob(f"{args.input_wavdir}/*.wav")[:args.num_gens] + print(f"Globbed {len(wav_paths)} wav files.") + + for wav_path in wav_paths: + fid, wav = model(wav_path) + wav = wav.squeeze().cpu().numpy() + sf.write( + os.path.join(args.outputdir, f'{fid}.wav'), wav, args.sample_rate) diff --git a/third_party/AcademiCodec/academicodec/models/hificodec/vqvae_tester.py b/third_party/AcademiCodec/academicodec/models/hificodec/vqvae_tester.py new file mode 100644 index 0000000..4cbe9bf --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/hificodec/vqvae_tester.py @@ -0,0 +1,37 @@ +import os + +import librosa +import torch +import torch.nn as nn + +from academicodec.models.hificodec.vqvae import VQVAE + + +class VqvaeTester(nn.Module): + def __init__(self, config_path, model_path, sample_rate=24000): + super().__init__() + self.vqvae = VQVAE(config_path, model_path, with_encoder=True) + self.sample_rate = sample_rate + + @torch.no_grad() + def forward(self, wav_path): + # 单声道 + # wav.shape (T, ), 按照模型的 sr 读取 + wav, sr = librosa.load(wav_path, sr=self.sample_rate) + fid = os.path.basename(wav_path)[:-4] + wav = torch.tensor(wav).unsqueeze(0) + wav = wav.cuda() + # vq_codes is acoustic token + vq_codes = self.vqvae.encode(wav) + syn = self.vqvae(vq_codes) + return fid, syn + + @torch.no_grad() + def vq(self, wav_path): + wav, sr = librosa.load(wav_path, sr=self.sample_rate) + fid = os.path.basename(wav_path)[:-4] + wav = torch.tensor(wav).unsqueeze(0) + wav = wav.cuda() + # vq_codes is acoustic token + vq_codes = self.vqvae.encode(wav) + return fid, vq_codes diff --git a/third_party/AcademiCodec/academicodec/models/soundstream/__init__.py b/third_party/AcademiCodec/academicodec/models/soundstream/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/AcademiCodec/academicodec/models/soundstream/dataset.py b/third_party/AcademiCodec/academicodec/models/soundstream/dataset.py new file mode 100644 index 0000000..6ed2ba9 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/soundstream/dataset.py @@ -0,0 +1,58 @@ +# 和 Encodec* 的 dataset.py 有点类似但是不完全一样 +# 主要是 prob > 0.7 的时候多了 ans2 +import glob +import random + +import torch +import torchaudio +from torch.utils.data import Dataset + + +class NSynthDataset(Dataset): + """Dataset to load NSynth data.""" + + def __init__(self, audio_dir): + super().__init__() + self.filenames = [] + self.filenames.extend(glob.glob(audio_dir + "/*.wav")) + print(len(self.filenames)) + _, self.sr = torchaudio.load(self.filenames[0]) + self.max_len = 24000 # 24000 + + def __len__(self): + return len(self.filenames) + + def __getitem__(self, index): + #print(self.filenames[index]) + prob = random.random() # (0,1) + if prob > 0.7: + # data augmentation + ans1 = torch.zeros(1, self.max_len) + ans2 = torch.zeros(1, self.max_len) + audio1 = torchaudio.load(self.filenames[index])[0] + index2 = random.randint(0, len(self.filenames) - 1) + audio2 = torchaudio.load(self.filenames[index2])[0] + if audio1.shape[1] > self.max_len: + st = random.randint(0, audio1.shape[1] - self.max_len - 1) + ed = st + self.max_len + ans1 = audio1[:, st:ed] + else: + ans1[:, :audio1.shape[1]] = audio1 + if audio2.shape[1] > self.max_len: + st = random.randint(0, audio2.shape[1] - self.max_len - 1) + ed = st + self.max_len + ans2 = audio2[:, st:ed] + else: + ans2[:, :audio2.shape[1]] = audio2 + ans = ans1 + ans2 + return ans + else: + ans = torch.zeros(1, self.max_len) + audio = torchaudio.load(self.filenames[index])[0] + if audio.shape[1] > self.max_len: + st = random.randint(0, audio.shape[1] - self.max_len - 1) + ed = st + self.max_len + return audio[:, st:ed] + else: + ans[:, :audio.shape[1]] = audio + return ans diff --git a/third_party/AcademiCodec/academicodec/models/soundstream/loss.py b/third_party/AcademiCodec/academicodec/models/soundstream/loss.py new file mode 100644 index 0000000..40b5222 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/soundstream/loss.py @@ -0,0 +1,209 @@ +import torch +import torch.nn.functional as F +from torchaudio.transforms import MelSpectrogram + + +def adversarial_g_loss(y_disc_gen): + loss = 0.0 + for i in range(len(y_disc_gen)): + #print(y_disc_gen[i].shape) + # assert 1==2 + stft_loss = F.relu(1 - y_disc_gen[i]).mean().squeeze() + loss += stft_loss + return loss / len(y_disc_gen) + + +def feature_loss(fmap_r, fmap_gen): + loss = 0.0 + for i in range(len(fmap_r)): + for j in range(len(fmap_r[i])): + stft_loss = ((fmap_r[i][j] - fmap_gen[i][j]).abs() / + (fmap_r[i][j].abs().mean())).mean() + loss += stft_loss + return loss / (len(fmap_r) * len(fmap_r[0])) + + +def sim_loss(y_disc_r, y_disc_gen): + loss = 0.0 + for i in range(len(y_disc_r)): + loss += F.mse_loss(y_disc_r[i], y_disc_gen[i]) + return loss / len(y_disc_r) + + +def sisnr_loss(x, s, eps=1e-8): + """ + calculate training loss + input: + x: separated signal, N x S tensor, estimate value + s: reference signal, N x S tensor, True value + Return: + sisnr: N tensor + """ + if x.shape != s.shape: + if x.shape[-1] > s.shape[-1]: + x = x[:, :s.shape[-1]] + else: + s = s[:, :x.shape[-1]] + + def l2norm(mat, keepdim=False): + return torch.norm(mat, dim=-1, keepdim=keepdim) + + if x.shape != s.shape: + raise RuntimeError("Dimention mismatch when calculate si-snr, {} vs {}". + format(x.shape, s.shape)) + x_zm = x - torch.mean(x, dim=-1, keepdim=True) + s_zm = s - torch.mean(s, dim=-1, keepdim=True) + t = torch.sum( + x_zm * s_zm, dim=-1, + keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps) + loss = -20. * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps)) + return torch.sum(loss) / x.shape[0] + + +def reconstruction_loss(x, G_x, args, eps=1e-7): + L = 100 * F.mse_loss(x, G_x) # wav L1 loss + #loss_sisnr = sisnr_loss(G_x, x) # + #L += 0.01*loss_sisnr + # print('L0 ', L) + # print('loss_sisnr ', 0.01*loss_sisnr) + # print('L0 ', L) + for i in range(6, 11): + s = 2**i + melspec = MelSpectrogram( + sample_rate=args.sr, + n_fft=max(s, 512), + win_length=s, + hop_length=s // 4, + n_mels=64, + wkwargs={"device": args.device}).to(args.device) + S_x = melspec(x) + S_G_x = melspec(G_x) + l1_loss = (S_x - S_G_x).abs().mean() + l2_loss = (((torch.log(S_x.abs() + eps) - torch.log(S_G_x.abs() + eps))**2).mean(dim=-2)**0.5).mean() + + alpha = (s / 2) ** 0.5 + L += (l1_loss + alpha * l2_loss) + #print('i ,loss ', i, loss) + #assert 1==2 + return L + + +def criterion_d(y_disc_r, y_disc_gen, fmap_r_det, fmap_gen_det, y_df_hat_r, + y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, y_ds_hat_g, + fmap_s_r, fmap_s_g): + loss = 0.0 + loss1 = 0.0 + loss2 = 0.0 + loss3 = 0.0 + loss_f = feature_loss(fmap_r_det, fmap_gen_det) + feature_loss( + fmap_f_r, fmap_f_g) + feature_loss(fmap_s_r, fmap_s_g) + for i in range(len(y_disc_r)): + loss1 += F.relu(1 - y_disc_r[i]).mean() + F.relu(1 + y_disc_gen[ + i]).mean() + for i in range(len(y_df_hat_r)): + loss2 += F.relu(1 - y_df_hat_r[i]).mean() + F.relu(1 + y_df_hat_g[ + i]).mean() + for i in range(len(y_ds_hat_r)): + loss3 += F.relu(1 - y_ds_hat_r[i]).mean() + F.relu(1 + y_ds_hat_g[ + i]).mean() + loss = (loss1 / len(y_disc_gen) + loss2 / len(y_df_hat_r) + loss3 / + len(y_ds_hat_r)) / 3.0 + return loss + 0.0 * loss_f + + +def criterion_g(commit_loss, x, G_x, fmap_r, fmap_gen, y_disc_r, y_disc_gen, + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, + y_ds_hat_g, fmap_s_r, fmap_s_g, args): + adv_g_loss = adversarial_g_loss(y_disc_gen) + feat_loss = (feature_loss(fmap_r, fmap_gen) + sim_loss( + y_disc_r, y_disc_gen) + feature_loss(fmap_f_r, fmap_f_g) + sim_loss( + y_df_hat_r, y_df_hat_g) + feature_loss(fmap_s_r, fmap_s_g) + + sim_loss(y_ds_hat_r, y_ds_hat_g)) / 3.0 + rec_loss = reconstruction_loss(x.contiguous(), G_x.contiguous(), args) + total_loss = args.LAMBDA_COM * commit_loss + args.LAMBDA_ADV * adv_g_loss + \ + args.LAMBDA_FEAT * feat_loss + args.LAMBDA_REC * rec_loss + return total_loss, adv_g_loss, feat_loss, rec_loss + + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def adopt_dis_weight(weight, global_step, threshold=0, value=0.): + if global_step % 3 == 0: # 0,3,6,9,13....这些时间步,不更新dis + weight = value + return weight + + +def calculate_adaptive_weight(nll_loss, g_loss, last_layer, args): + if last_layer is not None: + nll_grads = torch.autograd.grad( + nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + print('last_layer cannot be none') + assert 1 == 2 + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 1.0, 1.0).detach() + d_weight = d_weight * args.LAMBDA_ADV + return d_weight + + +def loss_g(codebook_loss, + inputs, + reconstructions, + fmap_r, + fmap_gen, + y_disc_r, + y_disc_gen, + global_step, + y_df_hat_r, + y_df_hat_g, + y_ds_hat_r, + y_ds_hat_g, + fmap_f_r, + fmap_f_g, + fmap_s_r, + fmap_s_g, + last_layer=None, + is_training=True, + args=None): + rec_loss = reconstruction_loss(inputs.contiguous(), + reconstructions.contiguous(), args) + adv_g_loss = adversarial_g_loss(y_disc_gen) + adv_mpd_loss = adversarial_g_loss(y_df_hat_g) + adv_msd_loss = adversarial_g_loss(y_ds_hat_g) + adv_loss = (adv_g_loss + adv_mpd_loss + adv_msd_loss) / 3.0 + feat_loss = feature_loss(fmap_r, fmap_gen) + sim_loss(y_disc_r, + y_disc_gen) # + feat_loss_mpd = feature_loss(fmap_f_r, fmap_f_g) + sim_loss(y_df_hat_r, + y_df_hat_g) + feat_loss_msd = feature_loss(fmap_s_r, fmap_s_g) + sim_loss(y_ds_hat_r, + y_ds_hat_g) + feat_loss_tot = (feat_loss + feat_loss_mpd + feat_loss_msd) / 3.0 + d_weight = torch.tensor(1.0) + # try: + # d_weight = calculate_adaptive_weight(rec_loss, adv_g_loss, last_layer, args) # 动态调整重构损失和对抗损失 + # except RuntimeError: + # assert not is_training + # d_weight = torch.tensor(0.0) + disc_factor = adopt_weight( + args.LAMBDA_ADV, global_step, threshold=args.discriminator_iter_start) + #feat_factor = adopt_weight(args.LAMBDA_FEAT, global_step, threshold=args.discriminator_iter_start) + loss = rec_loss + d_weight * disc_factor * adv_loss + \ + args.LAMBDA_FEAT * feat_loss_tot + args.LAMBDA_COM * codebook_loss + return loss, rec_loss, adv_loss, feat_loss_tot, d_weight + + +def loss_dis(y_disc_r_det, y_disc_gen_det, fmap_r_det, fmap_gen_det, y_df_hat_r, + y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, y_ds_hat_g, fmap_s_r, + fmap_s_g, global_step, args): + disc_factor = adopt_weight( + args.LAMBDA_ADV, global_step, threshold=args.discriminator_iter_start) + d_loss = disc_factor * criterion_d(y_disc_r_det, y_disc_gen_det, fmap_r_det, + fmap_gen_det, y_df_hat_r, y_df_hat_g, + fmap_f_r, fmap_f_g, y_ds_hat_r, + y_ds_hat_g, fmap_s_r, fmap_s_g) + return d_loss diff --git a/third_party/AcademiCodec/academicodec/models/soundstream/models.py b/third_party/AcademiCodec/academicodec/models/soundstream/models.py new file mode 100644 index 0000000..13d1f4d --- /dev/null +++ b/third_party/AcademiCodec/academicodec/models/soundstream/models.py @@ -0,0 +1,151 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from academicodec.modules import NormConv1d +from academicodec.modules import NormConv2d +from academicodec.utils import get_padding +from torch.nn import AvgPool1d +from torch.nn.utils import spectral_norm +from torch.nn.utils import weight_norm + +LRELU_SLOPE = 0.1 + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, + period, + kernel_size=5, + stride=3, + use_spectral_norm=False, + activation: str='LeakyReLU', + activation_params: dict={'negative_slope': 0.2}): + super(DiscriminatorP, self).__init__() + self.period = period + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.activation = getattr(torch.nn, activation)(**activation_params) + self.convs = nn.ModuleList([ + NormConv2d( + 1, + 32, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0)), + NormConv2d( + 32, + 32, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0)), + NormConv2d( + 32, + 32, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0)), + NormConv2d( + 32, + 32, (kernel_size, 1), (stride, 1), + padding=(get_padding(5, 1), 0)), + NormConv2d(32, 32, (kernel_size, 1), 1, padding=(2, 0)), + ]) + self.conv_post = NormConv2d(32, 1, (3, 1), 1, padding=(1, 0)) + + def forward(self, x): + fmap = [] + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = self.activation(x) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self): + super(MultiPeriodDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorP(2), + DiscriminatorP(3), + DiscriminatorP(5), + DiscriminatorP(7), + DiscriminatorP(11), + ]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, + use_spectral_norm=False, + activation: str='LeakyReLU', + activation_params: dict={'negative_slope': 0.2}): + super(DiscriminatorS, self).__init__() + self.activation = getattr(torch.nn, activation)(**activation_params) + self.convs = nn.ModuleList([ + NormConv1d(1, 32, 15, 1, padding=7), + NormConv1d(32, 32, 41, 2, groups=4, padding=20), + NormConv1d(32, 32, 41, 2, groups=16, padding=20), + NormConv1d(32, 32, 41, 4, groups=16, padding=20), + NormConv1d(32, 32, 41, 4, groups=16, padding=20), + NormConv1d(32, 32, 41, 1, groups=16, padding=20), + NormConv1d(32, 32, 5, 1, padding=2), + ]) + self.conv_post = NormConv1d(32, 1, 3, 1, padding=1) + + def forward(self, x): + fmap = [] + for l in self.convs: + x = l(x) + x = self.activation(x) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + return x, fmap + + +class MultiScaleDiscriminator(torch.nn.Module): + def __init__(self): + super(MultiScaleDiscriminator, self).__init__() + self.discriminators = nn.ModuleList([ + DiscriminatorS(), + DiscriminatorS(), + DiscriminatorS(), + ]) + self.meanpools = nn.ModuleList( + [AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + if i != 0: + y = self.meanpools[i - 1](y) + y_hat = self.meanpools[i - 1](y_hat) + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs diff --git a/third_party/AcademiCodec/academicodec/modules/__init__.py b/third_party/AcademiCodec/academicodec/modules/__init__.py new file mode 100644 index 0000000..4cd5108 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/modules/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Torch modules.""" +# flake8: noqa +from .conv import NormConv1d +from .conv import NormConv2d +from .conv import NormConvTranspose1d +from .conv import NormConvTranspose2d +from .conv import pad1d +from .conv import SConv1d +from .conv import SConvTranspose1d +from .conv import unpad1d +from .lstm import SLSTM +from .seanet import SEANetDecoder +from .seanet import SEANetEncoder +from .transformer import StreamingTransformerEncoder diff --git a/third_party/AcademiCodec/academicodec/modules/conv.py b/third_party/AcademiCodec/academicodec/modules/conv.py new file mode 100644 index 0000000..b0c12af --- /dev/null +++ b/third_party/AcademiCodec/academicodec/modules/conv.py @@ -0,0 +1,323 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Convolutional layers wrappers and utilities.""" +import math +import typing as tp +import warnings + +import torch +from torch import nn +from torch.nn import functional as F +from torch.nn.utils import spectral_norm +from torch.nn.utils import weight_norm + +from academicodec.modules.norm import ConvLayerNorm + +CONV_NORMALIZATIONS = frozenset([ + 'none', 'weight_norm', 'spectral_norm', 'time_layer_norm', 'layer_norm', + 'time_group_norm' +]) + + +def apply_parametrization_norm(module: nn.Module, + norm: str='none') -> nn.Module: + assert norm in CONV_NORMALIZATIONS + if norm == 'weight_norm': + return weight_norm(module) + elif norm == 'spectral_norm': + return spectral_norm(module) + else: + # We already check was in CONV_NORMALIZATION, so any other choice + # doesn't need reparametrization. + return module + + +def get_norm_module(module: nn.Module, + causal: bool=False, + norm: str='none', + **norm_kwargs) -> nn.Module: + """Return the proper normalization module. If causal is True, this will ensure the returned + module is causal, or return an error if the normalization doesn't support causal evaluation. + """ + assert norm in CONV_NORMALIZATIONS + if norm == 'layer_norm': + assert isinstance(module, nn.modules.conv._ConvNd) + return ConvLayerNorm(module.out_channels, **norm_kwargs) + elif norm == 'time_group_norm': + if causal: + raise ValueError("GroupNorm doesn't support causal evaluation.") + assert isinstance(module, nn.modules.conv._ConvNd) + return nn.GroupNorm(1, module.out_channels, **norm_kwargs) + else: + return nn.Identity() + + +def get_extra_padding_for_conv1d(x: torch.Tensor, + kernel_size: int, + stride: int, + padding_total: int=0) -> int: + """See `pad_for_conv1d`. + """ + length = x.shape[-1] + n_frames = (length - kernel_size + padding_total) / stride + 1 + ideal_length = (math.ceil(n_frames) - 1) * stride + (kernel_size - + padding_total) + return ideal_length - length + + +def pad_for_conv1d(x: torch.Tensor, + kernel_size: int, + stride: int, + padding_total: int=0): + """Pad for a convolution to make sure that the last window is full. + Extra padding is added at the end. This is required to ensure that we can rebuild + an output of the same length, as otherwise, even with padding, some time steps + might get removed. + For instance, with total padding = 4, kernel size = 4, stride = 2: + 0 0 1 2 3 4 5 0 0 # (0s are padding) + 1 2 3 # (output frames of a convolution, last 0 is never used) + 0 0 1 2 3 4 5 0 # (output of tr. conv., but pos. 5 is going to get removed as padding) + 1 2 3 4 # once you removed padding, we are missing one time step ! + """ + extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, + padding_total) + return F.pad(x, (0, extra_padding)) + + +def pad1d(x: torch.Tensor, + paddings: tp.Tuple[int, int], + mode: str='zero', + value: float=0.): + """Tiny wrapper around F.pad, just to allow for reflect padding on small input. + If this is the case, we insert extra 0 padding to the right before the reflection happen. + """ + length = x.shape[-1] + padding_left, padding_right = paddings + assert padding_left >= 0 and padding_right >= 0, (padding_left, + padding_right) + if mode == 'reflect': + max_pad = max(padding_left, padding_right) + extra_pad = 0 + if length <= max_pad: + extra_pad = max_pad - length + 1 + x = F.pad(x, (0, extra_pad)) + padded = F.pad(x, paddings, mode, value) + end = padded.shape[-1] - extra_pad + return padded[..., :end] + else: + return F.pad(x, paddings, mode, value) + + +def unpad1d(x: torch.Tensor, paddings: tp.Tuple[int, int]): + """Remove padding from x, handling properly zero padding. Only for 1d!""" + padding_left, padding_right = paddings + assert padding_left >= 0 and padding_right >= 0, (padding_left, + padding_right) + assert (padding_left + padding_right) <= x.shape[-1] + end = x.shape[-1] - padding_right + return x[..., padding_left:end] + + +class NormConv1d(nn.Module): + """Wrapper around Conv1d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + + def __init__(self, + *args, + causal: bool=False, + norm: str='none', + norm_kwargs: tp.Dict[str, tp.Any]={}, + **kwargs): + super().__init__() + self.conv = apply_parametrization_norm(nn.Conv1d(*args, **kwargs), norm) + self.norm = get_norm_module(self.conv, causal, norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class NormConv2d(nn.Module): + """Wrapper around Conv2d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + + def __init__(self, + *args, + norm: str='none', + norm_kwargs: tp.Dict[str, tp.Any]={}, + **kwargs): + super().__init__() + self.conv = apply_parametrization_norm(nn.Conv2d(*args, **kwargs), norm) + self.norm = get_norm_module( + self.conv, causal=False, norm=norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.conv(x) + x = self.norm(x) + return x + + +class NormConvTranspose1d(nn.Module): + """Wrapper around ConvTranspose1d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + + def __init__(self, + *args, + causal: bool=False, + norm: str='none', + norm_kwargs: tp.Dict[str, tp.Any]={}, + **kwargs): + super().__init__() + self.convtr = apply_parametrization_norm( + nn.ConvTranspose1d(*args, **kwargs), norm) + self.norm = get_norm_module(self.convtr, causal, norm, **norm_kwargs) + self.norm_type = norm + + def forward(self, x): + x = self.convtr(x) + x = self.norm(x) + return x + + +class NormConvTranspose2d(nn.Module): + """Wrapper around ConvTranspose2d and normalization applied to this conv + to provide a uniform interface across normalization approaches. + """ + + def __init__(self, + *args, + norm: str='none', + norm_kwargs: tp.Dict[str, tp.Any]={}, + **kwargs): + super().__init__() + self.convtr = apply_parametrization_norm( + nn.ConvTranspose2d(*args, **kwargs), norm) + self.norm = get_norm_module( + self.convtr, causal=False, norm=norm, **norm_kwargs) + + def forward(self, x): + x = self.convtr(x) + x = self.norm(x) + return x + + +class SConv1d(nn.Module): + """Conv1d with some builtin handling of asymmetric or causal padding + and normalization. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int=1, + dilation: int=1, + groups: int=1, + bias: bool=True, + causal: bool=False, + norm: str='none', + norm_kwargs: tp.Dict[str, tp.Any]={}, + pad_mode: str='reflect'): + super().__init__() + # warn user on unusual setup between dilation and stride + if stride > 1 and dilation > 1: + warnings.warn( + 'SConv1d has been initialized with stride > 1 and dilation > 1' + f' (kernel_size={kernel_size} stride={stride}, dilation={dilation}).' + ) + self.conv = NormConv1d( + in_channels, + out_channels, + kernel_size, + stride, + dilation=dilation, + groups=groups, + bias=bias, + causal=causal, + norm=norm, + norm_kwargs=norm_kwargs) + self.causal = causal + self.pad_mode = pad_mode + + def forward(self, x): + B, C, T = x.shape + kernel_size = self.conv.conv.kernel_size[0] + stride = self.conv.conv.stride[0] + dilation = self.conv.conv.dilation[0] + padding_total = (kernel_size - 1) * dilation - (stride - 1) + extra_padding = get_extra_padding_for_conv1d(x, kernel_size, stride, + padding_total) + if self.causal: + # Left padding for causal + x = pad1d(x, (padding_total, extra_padding), mode=self.pad_mode) + else: + # Asymmetric padding required for odd strides + padding_right = padding_total // 2 + padding_left = padding_total - padding_right + x = pad1d( + x, (padding_left, padding_right + extra_padding), + mode=self.pad_mode) + return self.conv(x) + + +class SConvTranspose1d(nn.Module): + """ConvTranspose1d with some builtin handling of asymmetric or causal padding + and normalization. + """ + + def __init__(self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int=1, + causal: bool=False, + norm: str='none', + trim_right_ratio: float=1., + norm_kwargs: tp.Dict[str, tp.Any]={}): + super().__init__() + self.convtr = NormConvTranspose1d( + in_channels, + out_channels, + kernel_size, + stride, + causal=causal, + norm=norm, + norm_kwargs=norm_kwargs) + self.causal = causal + self.trim_right_ratio = trim_right_ratio + assert self.causal or self.trim_right_ratio == 1., \ + "`trim_right_ratio` != 1.0 only makes sense for causal convolutions" + assert self.trim_right_ratio >= 0. and self.trim_right_ratio <= 1. + + def forward(self, x): + kernel_size = self.convtr.convtr.kernel_size[0] + stride = self.convtr.convtr.stride[0] + padding_total = kernel_size - stride + + y = self.convtr(x) + + # We will only trim fixed padding. Extra padding from `pad_for_conv1d` would be + # removed at the very end, when keeping only the right length for the output, + # as removing it here would require also passing the length at the matching layer + # in the encoder. + if self.causal: + # Trim the padding on the right according to the specified ratio + # if trim_right_ratio = 1.0, trim everything from right + padding_right = math.ceil(padding_total * self.trim_right_ratio) + padding_left = padding_total - padding_right + y = unpad1d(y, (padding_left, padding_right)) + else: + # Asymmetric padding required for odd strides + padding_right = padding_total // 2 + padding_left = padding_total - padding_right + y = unpad1d(y, (padding_left, padding_right)) + return y diff --git a/third_party/AcademiCodec/academicodec/modules/lstm.py b/third_party/AcademiCodec/academicodec/modules/lstm.py new file mode 100644 index 0000000..14729fb --- /dev/null +++ b/third_party/AcademiCodec/academicodec/modules/lstm.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""LSTM layers module.""" +from torch import nn + + +class SLSTM(nn.Module): + """ + LSTM without worrying about the hidden state, nor the layout of the data. + Expects input as convolutional layout. + """ + + def __init__(self, dimension: int, num_layers: int=2, skip: bool=True): + super().__init__() + self.skip = skip + self.lstm = nn.LSTM(dimension, dimension, num_layers) + + def forward(self, x): + x = x.permute(2, 0, 1) + y, _ = self.lstm(x) + if self.skip: + y = y + x + y = y.permute(1, 2, 0) + return y diff --git a/third_party/AcademiCodec/academicodec/modules/norm.py b/third_party/AcademiCodec/academicodec/modules/norm.py new file mode 100644 index 0000000..b5a40c0 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/modules/norm.py @@ -0,0 +1,29 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Normalization modules.""" +import typing as tp + +import einops +import torch +from torch import nn + + +class ConvLayerNorm(nn.LayerNorm): + """ + Convolution-friendly LayerNorm that moves channels to last dimensions + before running the normalization and moves them back to original position right after. + """ + + def __init__(self, + normalized_shape: tp.Union[int, tp.List[int], torch.Size], + **kwargs): + super().__init__(normalized_shape, **kwargs) + + def forward(self, x): + x = einops.rearrange(x, 'b ... t -> b t ...') + x = super().forward(x) + x = einops.rearrange(x, 'b t ... -> b ... t') + return diff --git a/third_party/AcademiCodec/academicodec/modules/seanet.py b/third_party/AcademiCodec/academicodec/modules/seanet.py new file mode 100644 index 0000000..209c32a --- /dev/null +++ b/third_party/AcademiCodec/academicodec/modules/seanet.py @@ -0,0 +1,351 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Encodec SEANet-based encoder and decoder implementation.""" +import typing as tp + +import numpy as np +import torch.nn as nn + +from academicodec.modules import SConv1d +from academicodec.modules import SConvTranspose1d +from academicodec.modules import SLSTM + + +class SEANetResnetBlock(nn.Module): + """Residual block from SEANet model. + Args: + dim (int): Dimension of the input/output + kernel_sizes (list): List of kernel sizes for the convolutions. + dilations (list): List of dilations for the convolutions. + activation (str): Activation function. + activation_params (dict): Parameters to provide to the activation function + norm (str): Normalization method. + norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. + causal (bool): Whether to use fully causal convolution. + pad_mode (str): Padding mode for the convolutions. + compress (int): Reduced dimensionality in residual branches (from Demucs v3) + true_skip (bool): Whether to use true skip connection or a simple convolution as the skip connection. + """ + + def __init__(self, + dim: int, + kernel_sizes: tp.List[int]=[3, 1], + dilations: tp.List[int]=[1, 1], + activation: str='ELU', + activation_params: dict={'alpha': 1.0}, + norm: str='weight_norm', + norm_params: tp.Dict[str, tp.Any]={}, + causal: bool=False, + pad_mode: str='reflect', + compress: int=2, + true_skip: bool=True): + super().__init__() + assert len(kernel_sizes) == len( + dilations), 'Number of kernel sizes should match number of dilations' + act = getattr(nn, activation) + hidden = dim // compress + block = [] + for i, (kernel_size, + dilation) in enumerate(zip(kernel_sizes, dilations)): + in_chs = dim if i == 0 else hidden + out_chs = dim if i == len(kernel_sizes) - 1 else hidden + block += [ + act(**activation_params), + SConv1d( + in_chs, + out_chs, + kernel_size=kernel_size, + dilation=dilation, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode), + ] + self.block = nn.Sequential(*block) + self.shortcut: nn.Module + if true_skip: + self.shortcut = nn.Identity() + else: + self.shortcut = SConv1d( + dim, + dim, + kernel_size=1, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode) + + def forward(self, x): + return self.shortcut(x) + self.block(x) + + +class SEANetEncoder(nn.Module): + """SEANet encoder. + Args: + channels (int): Audio channels. + dimension (int): Intermediate representation dimension. + n_filters (int): Base width for the model. + n_residual_layers (int): nb of residual layers. + ratios (Sequence[int]): kernel size and stride ratios. The encoder uses downsampling ratios instead of + upsampling ratios, hence it will use the ratios in the reverse order to the ones specified here + that must match the decoder order + activation (str): Activation function. + activation_params (dict): Parameters to provide to the activation function + norm (str): Normalization method. + norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. + kernel_size (int): Kernel size for the initial convolution. + last_kernel_size (int): Kernel size for the initial convolution. + residual_kernel_size (int): Kernel size for the residual layers. + dilation_base (int): How much to increase the dilation with each layer. + causal (bool): Whether to use fully causal convolution. + pad_mode (str): Padding mode for the convolutions. + true_skip (bool): Whether to use true skip connection or a simple + (streamable) convolution as the skip connection in the residual network blocks. + compress (int): Reduced dimensionality in residual branches (from Demucs v3). + lstm (int): Number of LSTM layers at the end of the encoder. + """ + + def __init__(self, + channels: int=1, + dimension: int=128, + n_filters: int=32, + n_residual_layers: int=1, + ratios: tp.List[int]=[8, 5, 4, 2], + activation: str='ELU', + activation_params: dict={'alpha': 1.0}, + norm: str='weight_norm', + norm_params: tp.Dict[str, tp.Any]={}, + kernel_size: int=7, + last_kernel_size: int=7, + residual_kernel_size: int=3, + dilation_base: int=2, + causal: bool=False, + pad_mode: str='reflect', + true_skip: bool=False, + compress: int=2, + lstm: int=2): + super().__init__() + self.channels = channels + self.dimension = dimension + self.n_filters = n_filters + self.ratios = list(reversed(ratios)) + del ratios + self.n_residual_layers = n_residual_layers + self.hop_length = np.prod(self.ratios) # 计算乘积 + + act = getattr(nn, activation) + mult = 1 + model: tp.List[nn.Module] = [ + SConv1d( + channels, + mult * n_filters, + kernel_size, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode) + ] + # Downsample to raw audio scale + for i, ratio in enumerate(self.ratios): + # Add residual layers + for j in range(n_residual_layers): + model += [ + SEANetResnetBlock( + mult * n_filters, + kernel_sizes=[residual_kernel_size, 1], + dilations=[dilation_base**j, 1], + norm=norm, + norm_params=norm_params, + activation=activation, + activation_params=activation_params, + causal=causal, + pad_mode=pad_mode, + compress=compress, + true_skip=true_skip) + ] + + # Add downsampling layers + model += [ + act(**activation_params), + SConv1d( + mult * n_filters, + mult * n_filters * 2, + kernel_size=ratio * 2, + stride=ratio, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode), + ] + mult *= 2 + + if lstm: + model += [SLSTM(mult * n_filters, num_layers=lstm)] + + model += [ + act(**activation_params), SConv1d( + mult * n_filters, + dimension, + last_kernel_size, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode) + ] + + self.model = nn.Sequential(*model) + + def forward(self, x): + return self.model(x) + + +class SEANetDecoder(nn.Module): + """SEANet decoder. + Args: + channels (int): Audio channels. + dimension (int): Intermediate representation dimension. + n_filters (int): Base width for the model. + n_residual_layers (int): nb of residual layers. + ratios (Sequence[int]): kernel size and stride ratios + activation (str): Activation function. + activation_params (dict): Parameters to provide to the activation function + final_activation (str): Final activation function after all convolutions. + final_activation_params (dict): Parameters to provide to the activation function + norm (str): Normalization method. + norm_params (dict): Parameters to provide to the underlying normalization used along with the convolution. + kernel_size (int): Kernel size for the initial convolution. + last_kernel_size (int): Kernel size for the initial convolution. + residual_kernel_size (int): Kernel size for the residual layers. + dilation_base (int): How much to increase the dilation with each layer. + causal (bool): Whether to use fully causal convolution. + pad_mode (str): Padding mode for the convolutions. + true_skip (bool): Whether to use true skip connection or a simple + (streamable) convolution as the skip connection in the residual network blocks. + compress (int): Reduced dimensionality in residual branches (from Demucs v3). + lstm (int): Number of LSTM layers at the end of the encoder. + trim_right_ratio (float): Ratio for trimming at the right of the transposed convolution under the causal setup. + If equal to 1.0, it means that all the trimming is done at the right. + """ + + def __init__(self, + channels: int=1, + dimension: int=128, + n_filters: int=32, + n_residual_layers: int=1, + ratios: tp.List[int]=[8, 5, 4, 2], + activation: str='ELU', + activation_params: dict={'alpha': 1.0}, + final_activation: tp.Optional[str]=None, + final_activation_params: tp.Optional[dict]=None, + norm: str='weight_norm', + norm_params: tp.Dict[str, tp.Any]={}, + kernel_size: int=7, + last_kernel_size: int=7, + residual_kernel_size: int=3, + dilation_base: int=2, + causal: bool=False, + pad_mode: str='reflect', + true_skip: bool=False, + compress: int=2, + lstm: int=2, + trim_right_ratio: float=1.0): + super().__init__() + self.dimension = dimension + self.channels = channels + self.n_filters = n_filters + self.ratios = ratios + del ratios + self.n_residual_layers = n_residual_layers + self.hop_length = np.prod(self.ratios) + + act = getattr(nn, activation) + mult = int(2**len(self.ratios)) + model: tp.List[nn.Module] = [ + SConv1d( + dimension, + mult * n_filters, + kernel_size, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode) + ] + + if lstm: + model += [SLSTM(mult * n_filters, num_layers=lstm)] + + # Upsample to raw audio scale + for i, ratio in enumerate(self.ratios): + # Add upsampling layers + model += [ + act(**activation_params), + SConvTranspose1d( + mult * n_filters, + mult * n_filters // 2, + kernel_size=ratio * 2, + stride=ratio, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + trim_right_ratio=trim_right_ratio), + ] + # Add residual layers + for j in range(n_residual_layers): + model += [ + SEANetResnetBlock( + mult * n_filters // 2, + kernel_sizes=[residual_kernel_size, 1], + dilations=[dilation_base**j, 1], + activation=activation, + activation_params=activation_params, + norm=norm, + norm_params=norm_params, + causal=causal, + pad_mode=pad_mode, + compress=compress, + true_skip=true_skip) + ] + + mult //= 2 + + # Add final layers + model += [ + act(**activation_params), SConv1d( + n_filters, + channels, + last_kernel_size, + norm=norm, + norm_kwargs=norm_params, + causal=causal, + pad_mode=pad_mode) + ] + # Add optional final activation to decoder (eg. tanh) + if final_activation is not None: + final_act = getattr(nn, final_activation) + final_activation_params = final_activation_params or {} + model += [final_act(**final_activation_params)] + self.model = nn.Sequential(*model) + + def forward(self, z): + y = self.model(z) + return y + + +def test(): + import torch + encoder = SEANetEncoder() + decoder = SEANetDecoder() + x = torch.randn(1, 1, 24000) + z = encoder(x) + print('z ', z.shape) + assert 1 == 2 + assert list(z.shape) == [1, 128, 75], z.shape + y = decoder(z) + assert y.shape == x.shape, (x.shape, y.shape) + + +if __name__ == '__main__': + test() diff --git a/third_party/AcademiCodec/academicodec/modules/transformer.py b/third_party/AcademiCodec/academicodec/modules/transformer.py new file mode 100644 index 0000000..acf55f6 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/modules/transformer.py @@ -0,0 +1,141 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""A streamable transformer.""" +import typing as tp + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def create_sin_embedding(positions: torch.Tensor, + dim: int, + max_period: float=10000): + """Create time embedding for the given positions, target dimension `dim`. + """ + # We aim for BTC format + assert dim % 2 == 0 + half_dim = dim // 2 + adim = torch.arange(half_dim, device=positions.device).view(1, 1, -1) + phase = positions / (max_period**(adim / (half_dim - 1))) + return torch.cat( + [ + torch.cos(phase), + torch.sin(phase), + ], dim=-1) + + +class StreamingTransformerEncoderLayer(nn.TransformerEncoderLayer): + def forward(self, x: torch.Tensor, x_past: torch.Tensor, + past_context: int): # type: ignore + if self.norm_first: + sa_input = self.norm1(x) + x = x + self._sa_block(sa_input, x_past, past_context) + x = x + self._ff_block(self.norm2(x)) + else: + sa_input = x + x = self.norm1(x + self._sa_block(sa_input, x_past, past_context)) + x = self.norm2(x + self._ff_block(x)) + + return x, sa_input + + # self-attention block + def _sa_block(self, + x: torch.Tensor, + x_past: torch.Tensor, + past_context: int): # type: ignore + _, T, _ = x.shape + _, H, _ = x_past.shape + + queries = x + keys = torch.cat([x_past, x], dim=1) + values = keys + + queries_pos = torch.arange(H, T + H, device=x.device).view(-1, 1) + keys_pos = torch.arange(T + H, device=x.device).view(1, -1) + delta = queries_pos - keys_pos + valid_access = (delta >= 0) & (delta <= past_context) + x = self.self_attn( + queries, keys, values, attn_mask=~valid_access, + need_weights=False)[0] + return self.dropout1(x) + + +class StreamingTransformerEncoder(nn.Module): + """TransformerEncoder with streaming support. + + Args: + dim (int): dimension of the data. + hidden_scale (int): intermediate dimension of FF module is this times the dimension. + num_heads (int): number of heads. + num_layers (int): number of layers. + max_period (float): maxium period of cosines in the positional embedding. + past_context (int or None): receptive field for the causal mask, infinite if None. + gelu (bool): if true uses GeLUs, otherwise use ReLUs. + norm_in (bool): normalize the input. + dropout (float): dropout probability. + **kwargs: See `nn.TransformerEncoderLayer`. + """ + + def __init__(self, + dim, + hidden_scale: float=4., + num_heads: int=8, + num_layers: int=5, + max_period: float=10000, + past_context: int=1000, + gelu: bool=True, + norm_in: bool=True, + dropout: float=0., + **kwargs): + super().__init__() + assert dim % num_heads == 0 + hidden_dim = int(dim * hidden_scale) + + self.max_period = max_period + self.past_context = past_context + activation: tp.Any = F.gelu if gelu else F.relu + + self.norm_in: nn.Module + if norm_in: + self.norm_in = nn.LayerNorm(dim) + else: + self.norm_in = nn.Identity() + + self.layers = nn.ModuleList() + for idx in range(num_layers): + self.layers.append( + StreamingTransformerEncoderLayer( + dim, + num_heads, + hidden_dim, + activation=activation, + batch_first=True, + dropout=dropout, + **kwargs)) + + def forward(self, + x: torch.Tensor, + states: tp.Optional[tp.List[torch.Tensor]]=None, + offset: tp.Union[int, torch.Tensor]=0): + B, T, C = x.shape + if states is None: + states = [ + torch.zeros_like(x[:, :1]) for _ in range(1 + len(self.layers)) + ] + + positions = torch.arange(T, device=x.device).view(1, -1, 1) + offset + pos_emb = create_sin_embedding(positions, C, max_period=self.max_period) + + new_state: tp.List[torch.Tensor] = [] + x = self.norm_in(x) + x = x + pos_emb + + for layer_state, layer in zip(states, self.layers): + x, new_layer_state = layer(x, layer_state, self.past_context) + new_layer_state = torch.cat([layer_state, new_layer_state], dim=1) + new_state.append(new_layer_state[:, -self.past_context:, :]) + return x, new_state, offset + T diff --git a/third_party/AcademiCodec/academicodec/quantization/__init__.py b/third_party/AcademiCodec/academicodec/quantization/__init__.py new file mode 100644 index 0000000..d0745cf --- /dev/null +++ b/third_party/AcademiCodec/academicodec/quantization/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# flake8: noqa +from .vq import QuantizedResult +from .vq import ResidualVectorQuantizer diff --git a/third_party/AcademiCodec/academicodec/quantization/ac.py b/third_party/AcademiCodec/academicodec/quantization/ac.py new file mode 100644 index 0000000..1e07592 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/quantization/ac.py @@ -0,0 +1,306 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Arithmetic coder.""" +import io +import math +import random +import typing as tp + +import torch + +from academicodec.binary import BitPacker +from academicodec.binary import BitUnpacker + + +def build_stable_quantized_cdf(pdf: torch.Tensor, + total_range_bits: int, + roundoff: float=1e-8, + min_range: int=2, + check: bool=True) -> torch.Tensor: + """Turn the given PDF into a quantized CDF that splits + [0, 2 ** self.total_range_bits - 1] into chunks of size roughly proportional + to the PDF. + + Args: + pdf (torch.Tensor): probability distribution, shape should be `[N]`. + total_range_bits (int): see `ArithmeticCoder`, the typical range we expect + during the coding process is `[0, 2 ** total_range_bits - 1]`. + roundoff (float): will round the pdf up to that level to remove difference coming + from e.g. evaluating the Language Model on different architectures. + min_range (int): minimum range width. Should always be at least 2 for numerical + stability. Use this to avoid pathological behavior is a value + that is expected to be rare actually happens in real life. + check (bool): if True, checks that nothing bad happened, can be deactivated for speed. + """ + pdf = pdf.detach() + if roundoff: + pdf = (pdf / roundoff).floor() * roundoff + # interpolate with uniform distribution to achieve desired minimum probability. + total_range = 2**total_range_bits + cardinality = len(pdf) + alpha = min_range * cardinality / total_range + assert alpha <= 1, "you must reduce min_range" + ranges = (((1 - alpha) * total_range) * pdf).floor().long() + ranges += min_range + quantized_cdf = torch.cumsum(ranges, dim=-1) + if min_range < 2: + raise ValueError("min_range must be at least 2.") + if check: + assert quantized_cdf[-1] <= 2**total_range_bits, quantized_cdf[-1] + if ((quantized_cdf[1:] - quantized_cdf[:-1]) < min_range + ).any() or quantized_cdf[0] < min_range: + raise ValueError("You must increase your total_range_bits.") + return quantized_cdf + + +class ArithmeticCoder: + """ArithmeticCoder, + Let us take a distribution `p` over `N` symbols, and assume we have a stream + of random variables `s_t` sampled from `p`. Let us assume that we have a budget + of `B` bits that we can afford to write on device. There are `2**B` possible numbers, + corresponding to the range `[0, 2 ** B - 1]`. We can map each of those number to a single + sequence `(s_t)` by doing the following: + + 1) Initialize the current range to` [0 ** 2 B - 1]`. + 2) For each time step t, split the current range into contiguous chunks, + one for each possible outcome, with size roughly proportional to `p`. + For instance, if `p = [0.75, 0.25]`, and the range is `[0, 3]`, the chunks + would be `{[0, 2], [3, 3]}`. + 3) Select the chunk corresponding to `s_t`, and replace the current range with this. + 4) When done encoding all the values, just select any value remaining in the range. + + You will notice that this procedure can fail: for instance if at any point in time + the range is smaller than `N`, then we can no longer assign a non-empty chunk to each + possible outcome. Intuitively, the more likely a value is, the less the range width + will reduce, and the longer we can go on encoding values. This makes sense: for any efficient + coding scheme, likely outcomes would take less bits, and more of them can be coded + with a fixed budget. + + In practice, we do not know `B` ahead of time, but we have a way to inject new bits + when the current range decreases below a given limit (given by `total_range_bits`), without + having to redo all the computations. If we encode mostly likely values, we will seldom + need to inject new bits, but a single rare value can deplete our stock of entropy! + + In this explanation, we assumed that the distribution `p` was constant. In fact, the present + code works for any sequence `(p_t)` possibly different for each timestep. + We also assume that `s_t ~ p_t`, but that doesn't need to be true, although the smaller + the KL between the true distribution and `p_t`, the most efficient the coding will be. + + Args: + fo (IO[bytes]): file-like object to which the bytes will be written to. + total_range_bits (int): the range `M` described above is `2 ** total_range_bits. + Any time the current range width fall under this limit, new bits will + be injected to rescale the initial range. + """ + + def __init__(self, fo: tp.IO[bytes], total_range_bits: int=24): + assert total_range_bits <= 30 + self.total_range_bits = total_range_bits + self.packer = BitPacker(bits=1, fo=fo) # we push single bits at a time. + self.low: int = 0 + self.high: int = 0 + self.max_bit: int = -1 + self._dbg: tp.List[tp.Any] = [] + self._dbg2: tp.List[tp.Any] = [] + + @property + def delta(self) -> int: + """Return the current range width.""" + return self.high - self.low + 1 + + def _flush_common_prefix(self): + # If self.low and self.high start with the sames bits, + # those won't change anymore as we always just increase the range + # by powers of 2, and we can flush them out to the bit stream. + assert self.high >= self.low, (self.low, self.high) + assert self.high < 2**(self.max_bit + 1) + while self.max_bit >= 0: + b1 = self.low >> self.max_bit + b2 = self.high >> self.max_bit + if b1 == b2: + self.low -= (b1 << self.max_bit) + self.high -= (b1 << self.max_bit) + assert self.high >= self.low, (self.high, self.low, + self.max_bit) + assert self.low >= 0 + self.max_bit -= 1 + self.packer.push(b1) + else: + break + + def push(self, symbol: int, quantized_cdf: torch.Tensor): + """Push the given symbol on the stream, flushing out bits + if possible. + + Args: + symbol (int): symbol to encode with the AC. + quantized_cdf (torch.Tensor): use `build_stable_quantized_cdf` + to build this from your pdf estimate. + """ + while self.delta < 2**self.total_range_bits: + self.low *= 2 + self.high = self.high * 2 + 1 + self.max_bit += 1 + + range_low = 0 if symbol == 0 else quantized_cdf[symbol - 1].item() + range_high = quantized_cdf[symbol].item() - 1 + effective_low = int( + math.ceil(range_low * (self.delta / (2**self.total_range_bits)))) + effective_high = int( + math.floor(range_high * (self.delta / (2**self.total_range_bits)))) + assert self.low <= self.high + self.high = self.low + effective_high + self.low = self.low + effective_low + assert self.low <= self.high, (effective_low, effective_high, range_low, + range_high) + self._dbg.append((self.low, self.high)) + self._dbg2.append((self.low, self.high)) + outs = self._flush_common_prefix() + assert self.low <= self.high + assert self.max_bit >= -1 + assert self.max_bit <= 61, self.max_bit + return outs + + def flush(self): + """Flush the remaining information to the stream. + """ + while self.max_bit >= 0: + b1 = (self.low >> self.max_bit) & 1 + self.packer.push(b1) + self.max_bit -= 1 + self.packer.flush() + + +class ArithmeticDecoder: + """ArithmeticDecoder, see `ArithmeticCoder` for a detailed explanation. + + Note that this must be called with **exactly** the same parameters and sequence + of quantized cdf as the arithmetic encoder or the wrong values will be decoded. + + If the AC encoder current range is [L, H], with `L` and `H` having the some common + prefix (i.e. the same most significant bits), then this prefix will be flushed to the stream. + For instances, having read 3 bits `b1 b2 b3`, we know that `[L, H]` is contained inside + `[b1 b2 b3 0 ... 0 b1 b3 b3 1 ... 1]`. Now this specific sub-range can only be obtained + for a specific sequence of symbols and a binary-search allows us to decode those symbols. + At some point, the prefix `b1 b2 b3` will no longer be sufficient to decode new symbols, + and we will need to read new bits from the stream and repeat the process. + + """ + + def __init__(self, fo: tp.IO[bytes], total_range_bits: int=24): + self.total_range_bits = total_range_bits + self.low: int = 0 + self.high: int = 0 + self.current: int = 0 + self.max_bit: int = -1 + self.unpacker = BitUnpacker( + bits=1, fo=fo) # we pull single bits at a time. + # Following is for debugging + self._dbg: tp.List[tp.Any] = [] + self._dbg2: tp.List[tp.Any] = [] + self._last: tp.Any = None + + @property + def delta(self) -> int: + return self.high - self.low + 1 + + def _flush_common_prefix(self): + # Given the current range [L, H], if both have a common prefix, + # we know we can remove it from our representation to avoid handling large numbers. + while self.max_bit >= 0: + b1 = self.low >> self.max_bit + b2 = self.high >> self.max_bit + if b1 == b2: + self.low -= (b1 << self.max_bit) + self.high -= (b1 << self.max_bit) + self.current -= (b1 << self.max_bit) + assert self.high >= self.low + assert self.low >= 0 + self.max_bit -= 1 + else: + break + + def pull(self, quantized_cdf: torch.Tensor) -> tp.Optional[int]: + """Pull a symbol, reading as many bits from the stream as required. + This returns `None` when the stream has been exhausted. + + Args: + quantized_cdf (torch.Tensor): use `build_stable_quantized_cdf` + to build this from your pdf estimate. This must be **exatly** + the same cdf as the one used at encoding time. + """ + while self.delta < 2**self.total_range_bits: + bit = self.unpacker.pull() + if bit is None: + return None + self.low *= 2 + self.high = self.high * 2 + 1 + self.current = self.current * 2 + bit + self.max_bit += 1 + + def bin_search(low_idx: int, high_idx: int): + # Binary search is not just for coding interviews :) + if high_idx < low_idx: + raise RuntimeError("Binary search failed") + mid = (low_idx + high_idx) // 2 + range_low = quantized_cdf[mid - 1].item() if mid > 0 else 0 + range_high = quantized_cdf[mid].item() - 1 + effective_low = int( + math.ceil(range_low * (self.delta / (2**self.total_range_bits) + ))) + effective_high = int( + math.floor(range_high * (self.delta / (2**self.total_range_bits) + ))) + low = effective_low + self.low + high = effective_high + self.low + if self.current >= low: + if self.current <= high: + return (mid, low, high, self.current) + else: + return bin_search(mid + 1, high_idx) + else: + return bin_search(low_idx, mid - 1) + + self._last = (self.low, self.high, self.current, self.max_bit) + sym, self.low, self.high, self.current = bin_search( + 0, len(quantized_cdf) - 1) + self._dbg.append((self.low, self.high, self.current)) + self._flush_common_prefix() + self._dbg2.append((self.low, self.high, self.current)) + + return sym + + +def test(): + torch.manual_seed(1234) + random.seed(1234) + for _ in range(4): + pdfs = [] + cardinality = random.randrange(4000) + steps = random.randrange(100, 500) + fo = io.BytesIO() + encoder = ArithmeticCoder(fo) + symbols = [] + for step in range(steps): + pdf = torch.softmax(torch.randn(cardinality), dim=0) + pdfs.append(pdf) + q_cdf = build_stable_quantized_cdf(pdf, encoder.total_range_bits) + symbol = torch.multinomial(pdf, 1).item() + symbols.append(symbol) + encoder.push(symbol, q_cdf) + encoder.flush() + + fo.seek(0) + decoder = ArithmeticDecoder(fo) + for idx, (pdf, symbol) in enumerate(zip(pdfs, symbols)): + q_cdf = build_stable_quantized_cdf(pdf, encoder.total_range_bits) + decoded_symbol = decoder.pull(q_cdf) + assert decoded_symbol == symbol, idx + assert decoder.pull(torch.zeros(1)) is None + + +if __name__ == "__main__": + test() diff --git a/third_party/AcademiCodec/academicodec/quantization/core_vq.py b/third_party/AcademiCodec/academicodec/quantization/core_vq.py new file mode 100644 index 0000000..121b398 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/quantization/core_vq.py @@ -0,0 +1,370 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +# This implementation is inspired from +# https://github.com/lucidrains/vector-quantize-pytorch +# which is released under MIT License. Hereafter, the original license: +# MIT License +# +# Copyright (c) 2020 Phil Wang +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +"""Core vector quantization implementation.""" +import typing as tp + +import torch +import torch.nn.functional as F +from einops import rearrange +from einops import repeat +from torch import nn + +from academicodec.quantization.distrib import broadcast_tensors + + +def default(val: tp.Any, d: tp.Any) -> tp.Any: + return val if val is not None else d + + +def ema_inplace(moving_avg, new, decay: float): + moving_avg.data.mul_(decay).add_(new, alpha=(1 - decay)) + + +def laplace_smoothing(x, n_categories: int, epsilon: float=1e-5): + return (x + epsilon) / (x.sum() + n_categories * epsilon) + + +def uniform_init(*shape: int): + t = torch.empty(shape) + nn.init.kaiming_uniform_(t) + return t + + +def sample_vectors(samples, num: int): + num_samples, device = samples.shape[0], samples.device + + if num_samples >= num: + indices = torch.randperm(num_samples, device=device)[:num] + else: + indices = torch.randint(0, num_samples, (num, ), device=device) + + return samples[indices] + + +def kmeans(samples, num_clusters: int, num_iters: int=10): + dim, dtype = samples.shape[-1], samples.dtype + + means = sample_vectors(samples, num_clusters) + + for _ in range(num_iters): + diffs = rearrange(samples, "n d -> n () d") - rearrange(means, + "c d -> () c d") + dists = -(diffs**2).sum(dim=-1) + + buckets = dists.max(dim=-1).indices + bins = torch.bincount(buckets, minlength=num_clusters) + zero_mask = bins == 0 + bins_min_clamped = bins.masked_fill(zero_mask, 1) + + new_means = buckets.new_zeros(num_clusters, dim, dtype=dtype) + new_means.scatter_add_(0, repeat(buckets, "n -> n d", d=dim), samples) + new_means = new_means / bins_min_clamped[..., None] + + means = torch.where(zero_mask[..., None], means, new_means) + + return means, bins + + +class EuclideanCodebook(nn.Module): + """Codebook with Euclidean distance. + Args: + dim (int): Dimension. + codebook_size (int): Codebook size. + kmeans_init (bool): Whether to use k-means to initialize the codebooks. + If set to true, run the k-means algorithm on the first training batch and use + the learned centroids as initialization. + kmeans_iters (int): Number of iterations used for k-means algorithm at initialization. + decay (float): Decay for exponential moving average over the codebooks. + epsilon (float): Epsilon value for numerical stability. + threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes + that have an exponential moving average cluster size less than the specified threshold with + randomly selected vector from the current batch. + """ + + def __init__( + self, + dim: int, + codebook_size: int, + kmeans_init: int=False, + kmeans_iters: int=10, + decay: float=0.99, + epsilon: float=1e-5, + threshold_ema_dead_code: int=2, ): + super().__init__() + self.decay = decay + init_fn: tp.Union[ + tp.Callable[..., torch.Tensor], + tp.Any] = uniform_init if not kmeans_init else torch.zeros + embed = init_fn(codebook_size, dim) + + self.codebook_size = codebook_size + + self.kmeans_iters = kmeans_iters + self.epsilon = epsilon + self.threshold_ema_dead_code = threshold_ema_dead_code + + self.register_buffer("inited", torch.Tensor([not kmeans_init])) + self.register_buffer("cluster_size", torch.zeros(codebook_size)) + self.register_buffer("embed", embed) + self.register_buffer("embed_avg", embed.clone()) + + @torch.jit.ignore + def init_embed_(self, data): + if self.inited: + return + + embed, cluster_size = kmeans(data, self.codebook_size, + self.kmeans_iters) + self.embed.data.copy_(embed) + self.embed_avg.data.copy_(embed.clone()) + self.cluster_size.data.copy_(cluster_size) + self.inited.data.copy_(torch.Tensor([True])) + # Make sure all buffers across workers are in sync after initialization + broadcast_tensors(self.buffers()) + + def replace_(self, samples, mask): + modified_codebook = torch.where( + mask[..., None], + sample_vectors(samples, self.codebook_size), self.embed) + self.embed.data.copy_(modified_codebook) + + def expire_codes_(self, batch_samples): + if self.threshold_ema_dead_code == 0: + return + + expired_codes = self.cluster_size < self.threshold_ema_dead_code + if not torch.any(expired_codes): + return + + batch_samples = rearrange(batch_samples, "... d -> (...) d") + self.replace_(batch_samples, mask=expired_codes) + broadcast_tensors(self.buffers()) + + def preprocess(self, x): + x = rearrange(x, "... d -> (...) d") + return x + + def quantize(self, x): + embed = self.embed.t() + dist = -(x.pow(2).sum(1, keepdim=True) - 2 * x @ embed + + embed.pow(2).sum(0, keepdim=True)) + embed_ind = dist.max(dim=-1).indices + return embed_ind + + def postprocess_emb(self, embed_ind, shape): + return embed_ind.view(*shape[:-1]) + + def dequantize(self, embed_ind): + quantize = F.embedding(embed_ind, self.embed) + return quantize + + def encode(self, x): + shape = x.shape + # pre-process + x = self.preprocess(x) + # quantize + embed_ind = self.quantize(x) + # post-process + embed_ind = self.postprocess_emb(embed_ind, shape) + return embed_ind + + def decode(self, embed_ind): + quantize = self.dequantize(embed_ind) + return quantize + + def forward(self, x): + shape, dtype = x.shape, x.dtype + x = self.preprocess(x) + + self.init_embed_(x) + + embed_ind = self.quantize(x) + embed_onehot = F.one_hot(embed_ind, self.codebook_size).type(dtype) + embed_ind = self.postprocess_emb(embed_ind, shape) + quantize = self.dequantize(embed_ind) + + if self.training: + # We do the expiry of code at that point as buffers are in sync + # and all the workers will take the same decision. + self.expire_codes_(x) + ema_inplace(self.cluster_size, embed_onehot.sum(0), self.decay) + embed_sum = x.t() @ embed_onehot + ema_inplace(self.embed_avg, embed_sum.t(), self.decay) + cluster_size = ( + laplace_smoothing(self.cluster_size, self.codebook_size, + self.epsilon) * self.cluster_size.sum()) + embed_normalized = self.embed_avg / cluster_size.unsqueeze(1) + self.embed.data.copy_(embed_normalized) + + return quantize, embed_ind + + +class VectorQuantization(nn.Module): + """Vector quantization implementation. + Currently supports only euclidean distance. + Args: + dim (int): Dimension + codebook_size (int): Codebook size + codebook_dim (int): Codebook dimension. If not defined, uses the specified dimension in dim. + decay (float): Decay for exponential moving average over the codebooks. + epsilon (float): Epsilon value for numerical stability. + kmeans_init (bool): Whether to use kmeans to initialize the codebooks. + kmeans_iters (int): Number of iterations used for kmeans initialization. + threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes + that have an exponential moving average cluster size less than the specified threshold with + randomly selected vector from the current batch. + commitment_weight (float): Weight for commitment loss. + """ + + def __init__( + self, + dim: int, + codebook_size: int, + codebook_dim: tp.Optional[int]=None, + decay: float=0.99, + epsilon: float=1e-5, + kmeans_init: bool=True, + kmeans_iters: int=50, + threshold_ema_dead_code: int=2, + commitment_weight: float=1., ): + super().__init__() + _codebook_dim: int = default(codebook_dim, dim) + + requires_projection = _codebook_dim != dim + self.project_in = (nn.Linear(dim, _codebook_dim) + if requires_projection else nn.Identity()) + self.project_out = (nn.Linear(_codebook_dim, dim) + if requires_projection else nn.Identity()) + + self.epsilon = epsilon + self.commitment_weight = commitment_weight + + self._codebook = EuclideanCodebook( + dim=_codebook_dim, + codebook_size=codebook_size, + kmeans_init=kmeans_init, + kmeans_iters=kmeans_iters, + decay=decay, + epsilon=epsilon, + threshold_ema_dead_code=threshold_ema_dead_code) + self.codebook_size = codebook_size + + @property + def codebook(self): + return self._codebook.embed + + def encode(self, x): + x = rearrange(x, "b d n -> b n d") + x = self.project_in(x) + embed_in = self._codebook.encode(x) + return embed_in + + def decode(self, embed_ind): + quantize = self._codebook.decode(embed_ind) + quantize = self.project_out(quantize) + quantize = rearrange(quantize, "b n d -> b d n") + return quantize + + def forward(self, x): + device = x.device + x = rearrange(x, "b d n -> b n d") + x = self.project_in(x) + + quantize, embed_ind = self._codebook(x) + + if self.training: + quantize = x + (quantize - x).detach() + + loss = torch.tensor([0.0], device=device, requires_grad=self.training) + + if self.training: + if self.commitment_weight > 0: + commit_loss = F.mse_loss(quantize.detach(), x) + loss = loss + commit_loss * self.commitment_weight + + quantize = self.project_out(quantize) + quantize = rearrange(quantize, "b n d -> b d n") + return quantize, embed_ind, loss + + +class ResidualVectorQuantization(nn.Module): + """Residual vector quantization implementation. + Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf + """ + + def __init__(self, *, num_quantizers, **kwargs): + super().__init__() + self.layers = nn.ModuleList( + [VectorQuantization(**kwargs) for _ in range(num_quantizers)]) + + def forward(self, x, n_q: tp.Optional[int]=None): + quantized_out = 0.0 + residual = x + + all_losses = [] + all_indices = [] + + n_q = n_q or len(self.layers) + + for layer in self.layers[:n_q]: + quantized, indices, loss = layer(residual) + residual = residual - quantized + quantized_out = quantized_out + quantized + + all_indices.append(indices) + all_losses.append(loss) + + out_losses, out_indices = map(torch.stack, (all_losses, all_indices)) + return quantized_out, out_indices, out_losses + + def encode(self, + x: torch.Tensor, + n_q: tp.Optional[int]=None, + st: tp.Optional[int]=None) -> torch.Tensor: + residual = x + all_indices = [] + n_q = n_q or len(self.layers) + st = st or 0 + for layer in self.layers[st:n_q]: # 设置解码的起止layer + indices = layer.encode(residual) + quantized = layer.decode(indices) + residual = residual - quantized + all_indices.append(indices) + out_indices = torch.stack(all_indices) + return out_indices + + def decode(self, q_indices: torch.Tensor) -> torch.Tensor: + quantized_out = torch.tensor(0.0, device=q_indices.device) + for i, indices in enumerate(q_indices): + layer = self.layers[i] + quantized = layer.decode(indices) + quantized_out = quantized_out + quantized + return quantized_out diff --git a/third_party/AcademiCodec/academicodec/quantization/distrib.py b/third_party/AcademiCodec/academicodec/quantization/distrib.py new file mode 100644 index 0000000..2ee5de0 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/quantization/distrib.py @@ -0,0 +1,130 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Torch distributed utilities.""" +import typing as tp + +import torch + + +def rank(): + if torch.distributed.is_initialized(): + return torch.distributed.get_rank() + else: + return 0 + + +def world_size(): + if torch.distributed.is_initialized(): + return torch.distributed.get_world_size() + else: + return 1 + + +def is_distributed(): + return world_size() > 1 + + +def all_reduce(tensor: torch.Tensor, op=torch.distributed.ReduceOp.SUM): + if is_distributed(): + return torch.distributed.all_reduce(tensor, op) + + +def _is_complex_or_float(tensor): + return torch.is_floating_point(tensor) or torch.is_complex(tensor) + + +def _check_number_of_params(params: tp.List[torch.Tensor]): + # utility function to check that the number of params in all workers is the same, + # and thus avoid a deadlock with distributed all reduce. + if not is_distributed() or not params: + return + #print('params[0].device ', params[0].device) + tensor = torch.tensor( + [len(params)], device=params[0].device, dtype=torch.long) + all_reduce(tensor) + if tensor.item() != len(params) * world_size(): + # If not all the workers have the same number, for at least one of them, + # this inequality will be verified. + raise RuntimeError( + f"Mismatch in number of params: ours is {len(params)}, " + "at least one worker has a different one.") + + +def broadcast_tensors(tensors: tp.Iterable[torch.Tensor], src: int=0): + """Broadcast the tensors from the given parameters to all workers. + This can be used to ensure that all workers have the same model to start with. + """ + if not is_distributed(): + return + tensors = [tensor for tensor in tensors if _is_complex_or_float(tensor)] + _check_number_of_params(tensors) + handles = [] + for tensor in tensors: + # src = int(rank()) # added code + handle = torch.distributed.broadcast( + tensor.data, src=src, async_op=True) + handles.append(handle) + for handle in handles: + handle.wait() + + +def sync_buffer(buffers, average=True): + """ + Sync grad for buffers. If average is False, broadcast instead of averaging. + """ + if not is_distributed(): + return + handles = [] + for buffer in buffers: + if torch.is_floating_point(buffer.data): + if average: + handle = torch.distributed.all_reduce( + buffer.data, + op=torch.distributed.ReduceOp.SUM, + async_op=True) + else: + handle = torch.distributed.broadcast( + buffer.data, src=0, async_op=True) + handles.append((buffer, handle)) + for buffer, handle in handles: + handle.wait() + if average: + buffer.data /= world_size + + +def sync_grad(params): + """ + Simpler alternative to DistributedDataParallel, that doesn't rely + on any black magic. For simple models it can also be as fast. + Just call this on your model parameters after the call to backward! + """ + if not is_distributed(): + return + handles = [] + for p in params: + if p.grad is not None: + handle = torch.distributed.all_reduce( + p.grad.data, op=torch.distributed.ReduceOp.SUM, async_op=True) + handles.append((p, handle)) + for p, handle in handles: + handle.wait() + p.grad.data /= world_size() + + +def average_metrics(metrics: tp.Dict[str, float], count=1.): + """Average a dictionary of metrics across all workers, using the optional + `count` as unormalized weight. + """ + if not is_distributed(): + return metrics + keys, values = zip(*metrics.items()) + device = 'cuda' if torch.cuda.is_available() else 'cpu' + tensor = torch.tensor( + list(values) + [1], device=device, dtype=torch.float32) + tensor *= count + all_reduce(tensor) + averaged = (tensor[:-1] / tensor[-1]).cpu().tolist() + return dict(zip(keys, averaged)) diff --git a/third_party/AcademiCodec/academicodec/quantization/vq.py b/third_party/AcademiCodec/academicodec/quantization/vq.py new file mode 100644 index 0000000..dff877c --- /dev/null +++ b/third_party/AcademiCodec/academicodec/quantization/vq.py @@ -0,0 +1,121 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Residual vector quantizer implementation.""" +import math +import typing as tp +from dataclasses import dataclass +from dataclasses import field + +import torch +from torch import nn + +from academicodec.quantization.core_vq import ResidualVectorQuantization + + +@dataclass +class QuantizedResult: + quantized: torch.Tensor + codes: torch.Tensor + bandwidth: torch.Tensor # bandwidth in kb/s used, per batch item. + penalty: tp.Optional[torch.Tensor] = None + metrics: dict = field(default_factory=dict) + + +class ResidualVectorQuantizer(nn.Module): + """Residual Vector Quantizer. + Args: + dimension (int): Dimension of the codebooks. + n_q (int): Number of residual vector quantizers used. + bins (int): Codebook size. + decay (float): Decay for exponential moving average over the codebooks. + kmeans_init (bool): Whether to use kmeans to initialize the codebooks. + kmeans_iters (int): Number of iterations used for kmeans initialization. + threshold_ema_dead_code (int): Threshold for dead code expiration. Replace any codes + that have an exponential moving average cluster size less than the specified threshold with + randomly selected vector from the current batch. + """ + + def __init__( + self, + dimension: int=256, + n_q: int=8, + bins: int=1024, + decay: float=0.99, + kmeans_init: bool=True, + kmeans_iters: int=50, + threshold_ema_dead_code: int=2, ): + super().__init__() + self.n_q = n_q + self.dimension = dimension + self.bins = bins + self.decay = decay + self.kmeans_init = kmeans_init + self.kmeans_iters = kmeans_iters + self.threshold_ema_dead_code = threshold_ema_dead_code + self.vq = ResidualVectorQuantization( + dim=self.dimension, + codebook_size=self.bins, + num_quantizers=self.n_q, + decay=self.decay, + kmeans_init=self.kmeans_init, + kmeans_iters=self.kmeans_iters, + threshold_ema_dead_code=self.threshold_ema_dead_code, ) + + def forward(self, + x: torch.Tensor, + sample_rate: int, + bandwidth: tp.Optional[float]=None) -> QuantizedResult: + """Residual vector quantization on the given input tensor. + Args: + x (torch.Tensor): Input tensor. + sample_rate (int): Sample rate of the input tensor. + bandwidth (float): Target bandwidth. + Returns: + QuantizedResult: + The quantized (or approximately quantized) representation with + the associated bandwidth and any penalty term for the loss. + """ + bw_per_q = self.get_bandwidth_per_quantizer(sample_rate) + n_q = self.get_num_quantizers_for_bandwidth(sample_rate, bandwidth) + quantized, codes, commit_loss = self.vq(x, n_q=n_q) + bw = torch.tensor(n_q * bw_per_q).to(x) + return quantized, codes, bw, torch.mean(commit_loss) + #return QuantizedResult(quantized, codes, bw, penalty=torch.mean(commit_loss)) + + def get_num_quantizers_for_bandwidth( + self, sample_rate: int, bandwidth: tp.Optional[float]=None) -> int: + """Return n_q based on specified target bandwidth. + """ + bw_per_q = self.get_bandwidth_per_quantizer(sample_rate) + n_q = self.n_q + if bandwidth and bandwidth > 0.: + n_q = int(max(1, math.floor(bandwidth / bw_per_q))) + return n_q + + def get_bandwidth_per_quantizer(self, sample_rate: int): + """Return bandwidth per quantizer for a given input sample rate. + """ + return math.log2(self.bins) * sample_rate / 1000 + + def encode(self, + x: torch.Tensor, + sample_rate: int, + bandwidth: tp.Optional[float]=None, + st: tp.Optional[int]=None) -> torch.Tensor: + """Encode a given input tensor with the specified sample rate at the given bandwidth. + The RVQ encode method sets the appropriate number of quantizer to use + and returns indices for each quantizer. + """ + n_q = self.get_num_quantizers_for_bandwidth(sample_rate, bandwidth) + st = st or 0 + codes = self.vq.encode(x, n_q=n_q, st=st) + return codes + + def decode(self, codes: torch.Tensor) -> torch.Tensor: + """Decode the given codes to the quantized representation. + """ + quantized = self.vq.decode(codes) + return quantized diff --git a/third_party/AcademiCodec/academicodec/utils.py b/third_party/AcademiCodec/academicodec/utils.py new file mode 100644 index 0000000..cdc7407 --- /dev/null +++ b/third_party/AcademiCodec/academicodec/utils.py @@ -0,0 +1,220 @@ +import glob +import json +import os +import random +import sys +import time +import warnings + +import matplotlib +import numpy as np +import torch +import yaml +from torch import distributed as dist +from torch.nn.utils import weight_norm +matplotlib.use("Agg") +import matplotlib.pylab as plt +import re +import pathlib + + +def seed_everything(seed, cudnn_deterministic=False): + """ + Function that sets seed for pseudo-random number generators in: + pytorch, numpy, python.random + + Args: + seed: the integer value seed for global random state + """ + if seed is not None: + # print(f"Global seed set to {seed}") + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + + # if cudnn_deterministic: + # torch.backends.cudnn.deterministic = True + # warnings.warn('You have chosen to seed training. ' + # 'This will turn on the CUDNN deterministic setting, ' + # 'which can slow down your training considerably! ' + # 'You may see unexpected behavior when restarting ' + # 'from checkpoints.') + + +def is_primary(): + return get_rank() == 0 + + +def get_rank(): + if not dist.is_available(): + return 0 + if not dist.is_initialized(): + return 0 + + return dist.get_rank() + + +def load_yaml_config(path): + with open(path) as f: + config = yaml.full_load(f) + return config + + +def save_config_to_yaml(config, path): + assert path.endswith('.yaml') + with open(path, 'w') as f: + f.write(yaml.dump(config)) + f.close() + + +def save_dict_to_json(d, path, indent=None): + json.dump(d, open(path, 'w'), indent=indent) + + +def load_dict_from_json(path): + return json.load(open(path, 'r')) + + +def write_args(args, path): + args_dict = dict((name, getattr(args, name)) for name in dir(args) + if not name.startswith('_')) + with open(path, 'a') as args_file: + args_file.write('==> torch version: {}\n'.format(torch.__version__)) + args_file.write( + '==> cudnn version: {}\n'.format(torch.backends.cudnn.version())) + args_file.write('==> Cmd:\n') + args_file.write(str(sys.argv)) + args_file.write('\n==> args:\n') + for k, v in sorted(args_dict.items()): + args_file.write(' %s: %s\n' % (str(k), str(v))) + args_file.close() + + +class Logger(object): + def __init__(self, args): + self.args = args + self.save_dir = args.save_dir + self.is_primary = is_primary() + + if self.is_primary: + os.makedirs(self.save_dir, exist_ok=True) + + # save the args and config + self.config_dir = os.path.join(self.save_dir, 'configs') + os.makedirs(self.config_dir, exist_ok=True) + file_name = os.path.join(self.config_dir, 'args.txt') + write_args(args, file_name) + + log_dir = os.path.join(self.save_dir, 'logs') + if not os.path.exists(log_dir): + os.makedirs(log_dir, exist_ok=True) + self.text_writer = open(os.path.join(log_dir, 'log.txt'), + 'a') # 'w') + if args.tensorboard: + self.log_info('using tensorboard') + self.tb_writer = torch.utils.tensorboard.SummaryWriter( + log_dir=log_dir + ) # tensorboard.SummaryWriter(log_dir=log_dir) + else: + self.tb_writer = None + + def save_config(self, config): + if self.is_primary: + save_config_to_yaml(config, + os.path.join(self.config_dir, 'config.yaml')) + + def log_info(self, info, check_primary=True): + if self.is_primary or (not check_primary): + print(info) + if self.is_primary: + info = str(info) + time_str = time.strftime('%Y-%m-%d-%H-%M') + info = '{}: {}'.format(time_str, info) + if not info.endswith('\n'): + info += '\n' + self.text_writer.write(info) + self.text_writer.flush() + + def add_scalar(self, **kargs): + """Log a scalar variable.""" + if self.is_primary: + if self.tb_writer is not None: + self.tb_writer.add_scalar(**kargs) + + def add_scalars(self, **kargs): + """Log a scalar variable.""" + if self.is_primary: + if self.tb_writer is not None: + self.tb_writer.add_scalars(**kargs) + + def add_image(self, **kargs): + """Log a scalar variable.""" + if self.is_primary: + if self.tb_writer is not None: + self.tb_writer.add_image(**kargs) + + def add_images(self, **kargs): + """Log a scalar variable.""" + if self.is_primary: + if self.tb_writer is not None: + self.tb_writer.add_images(**kargs) + + def close(self): + if self.is_primary: + self.text_writer.close() + self.tb_writer.close() + + +def plot_spectrogram(spectrogram): + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow( + spectrogram, aspect="auto", origin="lower", interpolation='none') + plt.colorbar(im, ax=ax) + + fig.canvas.draw() + plt.close() + + return fig + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print("Loading '{}'".format(filepath)) + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict + + +def save_checkpoint(filepath, obj, num_ckpt_keep=5): + name = re.match(r'(do|g)_\d+', pathlib.Path(filepath).name).group(1) + ckpts = sorted(pathlib.Path(filepath).parent.glob(f'{name}_*')) + if len(ckpts) > num_ckpt_keep: + [os.remove(c) for c in ckpts[:-num_ckpt_keep]] + print("Saving checkpoint to {}".format(filepath)) + torch.save(obj, filepath) + print("Complete.") + + +def scan_checkpoint(cp_dir, prefix): + pattern = os.path.join(cp_dir, prefix + '????????') + cp_list = glob.glob(pattern) + if len(cp_list) == 0: + return None + return sorted(cp_list)[-1] diff --git a/third_party/AcademiCodec/egs/Encodec_16k_320d/path.sh b/third_party/AcademiCodec/egs/Encodec_16k_320d/path.sh new file mode 100644 index 0000000..9ef6052 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_16k_320d/path.sh @@ -0,0 +1 @@ +../Encodec_24k_32d/path.sh \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_16k_320d/readme.md b/third_party/AcademiCodec/egs/Encodec_16k_320d/readme.md new file mode 100644 index 0000000..177d6fb --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_16k_320d/readme.md @@ -0,0 +1,16 @@ +# The training code of Encodec + +### Note that, this part of code is based on Facebook's Encodec. We just provide the training process. The license is the same as Encodec. + +### For Training +set the right path to start.sh +`bash start.sh` + +### For Inference +if you want to use our checkpoint. Run the following
+```bash +mkdir checkpoint +cd checkpoint +wget https://huggingface.co/Dongchao/AcademiCodec/resolve/main/encodec_16khz_320d.pth +bash test.sh # set the root in test.sh, before runing it. +``` \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_16k_320d/start.sh b/third_party/AcademiCodec/egs/Encodec_16k_320d/start.sh new file mode 100644 index 0000000..7c36e01 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_16k_320d/start.sh @@ -0,0 +1,18 @@ +#!/bin/bash +source path.sh +log_root=logs +# 16kHz *.wav in train_data_dir +train_data_dir=dump/train +valid_data_dir=dump/valid + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +python3 -m torch.distributed.launch --nproc_per_node 8 ${BIN_DIR}/main_launch.py \ + --BATCH_SIZE 16 \ + --N_EPOCHS 300 \ + --save_dir ${log_root} \ + --PATH ${log_root} \ + --train_data_path ${train_data_dir} \ + --valid_data_path ${valid_data_dir} \ + --sr 16000 \ + --ratios 8 5 4 2 \ + --target_bandwidths 1 1.5 2 4 6 12 diff --git a/third_party/AcademiCodec/egs/Encodec_16k_320d/test.sh b/third_party/AcademiCodec/egs/Encodec_16k_320d/test.sh new file mode 100644 index 0000000..b864aa9 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_16k_320d/test.sh @@ -0,0 +1,12 @@ +#!/bin/bash +source path.sh + +python3 ${BIN_DIR}/test.py \ + --input=./test_wav \ + --output=./output \ + --resume_path=checkpoint/encodec_16k_320d.pth \ + --sr=16000 \ + --ratios 8 5 4 2 \ + --target_bandwidths 1 1.5 2 4 6 12 \ + --target_bw=12 \ + -r diff --git a/third_party/AcademiCodec/egs/Encodec_24k_240d/path.sh b/third_party/AcademiCodec/egs/Encodec_24k_240d/path.sh new file mode 100644 index 0000000..9ef6052 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_240d/path.sh @@ -0,0 +1 @@ +../Encodec_24k_32d/path.sh \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_24k_240d/readme.md b/third_party/AcademiCodec/egs/Encodec_24k_240d/readme.md new file mode 100644 index 0000000..ddd45a1 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_240d/readme.md @@ -0,0 +1,28 @@ +# The training code of Encodec + +### Note that, this part of code is based on Facebook's Encodec. We just provide the training process. The license is the same as Encodec. + +### For Training +set the right path to statr/start.sh + +run: `bash start.sh` + +### For Finetune +If you want to finetune the model, you can use following instruct: +` +python3 main3_ddp.py --BATCH_SIZE 16 --N_EPOCHS 300 \ + --save_dir path_to_save_log \ + --PATH path_to_save_model \ + --train_data_path path_to_training_data \ + --valid_data_path path_to_val_data \ + --resume --resume_path the_model_path +` + +### For Inference +if you want to use our checkpoint. Run the following
+```bash +mkdir checkpoint +cd checkpoint +wget https://huggingface.co/Dongchao/AcademiCodec/resolve/main/encodec_24khz_240d.pth +bash test.sh # set the root in test.sh, before runing it. +``` \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_24k_240d/start.sh b/third_party/AcademiCodec/egs/Encodec_24k_240d/start.sh new file mode 100644 index 0000000..8c6c165 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_240d/start.sh @@ -0,0 +1,18 @@ +#!/bin/bash +source path.sh +log_root=logs +# 24kHz *.wav in train_data_dir +train_data_dir=dump/train +valid_data_dir=dump/valid + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +python3 -m torch.distributed.launch --nproc_per_node 8 ${BIN_DIR}/main_launch.py \ + --BATCH_SIZE 16 \ + --N_EPOCHS 300 \ + --save_dir ${log_root} \ + --PATH ${log_root} \ + --train_data_path ${train_data_dir} \ + --valid_data_path ${valid_data_dir} \ + --sr 24000 \ + --ratios 6 5 4 2 \ + --target_bandwidths 1 2 4 8 12 \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_24k_240d/test.sh b/third_party/AcademiCodec/egs/Encodec_24k_240d/test.sh new file mode 100644 index 0000000..2bc83c7 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_240d/test.sh @@ -0,0 +1,13 @@ +#!/bin/bash +source path.sh + +python3 ${BIN_DIR}/test.py \ + --input=./test_wav \ + --output=./output \ + --resume_path=checkpoint/encodec_24khz_240d.pth \ + --sr=24000 \ + --ratios 6 5 4 2 \ + --target_bandwidths 1 2 4 8 12 \ + --target_bw=12 \ + -r + \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_24k_32d/path.sh b/third_party/AcademiCodec/egs/Encodec_24k_32d/path.sh new file mode 100644 index 0000000..9d23a5d --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_32d/path.sh @@ -0,0 +1,6 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../` + +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} +MODEL=encodec +export BIN_DIR=${MAIN_ROOT}/academicodec/models/${MODEL} \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/Encodec_24k_32d/readme.md b/third_party/AcademiCodec/egs/Encodec_24k_32d/readme.md new file mode 100644 index 0000000..75d70f3 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_32d/readme.md @@ -0,0 +1,17 @@ +# The training code of Encodec + +### Note that, this part of code is based on Facebook's Encodec. We just provide the training process. The license is the same as Encodec. + +### For Training +set the right path to start.sh + +`bash start.sh` + +### For Inference +if you want to use our checkpoint. Run the following
+```bash +mkdir checkpoint +cd checkpoint` +wget https://huggingface.co/Dongchao/AcademiCodec/resolve/main/encodec_24khz_32d.pth +bash test.sh # set the root in test.sh, before runing it. +``` diff --git a/third_party/AcademiCodec/egs/Encodec_24k_32d/start.sh b/third_party/AcademiCodec/egs/Encodec_24k_32d/start.sh new file mode 100644 index 0000000..d2a2b55 --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_32d/start.sh @@ -0,0 +1,18 @@ +#!/bin/bash +source path.sh +log_root=logs +# 24kHz *.wav in train_data_dir +train_data_dir=dump/train +valid_data_dir=dump/valid + +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +python3 -m torch.distributed.launch --nproc_per_node 8 ${BIN_DIR}/main_launch.py \ + --BATCH_SIZE 16 \ + --N_EPOCHS 300 \ + --save_dir ${log_root} \ + --PATH ${log_root} \ + --train_data_path ${train_data_dir} \ + --valid_data_path ${valid_data_dir} \ + --sr 24000 \ + --ratios 2 2 2 4 \ + --target_bandwidths 7.5 15 diff --git a/third_party/AcademiCodec/egs/Encodec_24k_32d/test.sh b/third_party/AcademiCodec/egs/Encodec_24k_32d/test.sh new file mode 100644 index 0000000..d39ab3b --- /dev/null +++ b/third_party/AcademiCodec/egs/Encodec_24k_32d/test.sh @@ -0,0 +1,14 @@ +#!/bin/bash +source path.sh + +python3 ${BIN_DIR}/test.py \ + --input=./test_wav \ + --output=./output \ + --resume_path=checkpoint/Encodec_24khz_32d.pth \ + --sr=24000 \ + --ratios 2 2 2 4 \ + --target_bandwidths 7.5 15 \ + --target_bw=7.5 \ + -r + + \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/config_16k_320d.json b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/config_16k_320d.json new file mode 100644 index 0000000..c8054c9 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/config_16k_320d.json @@ -0,0 +1,42 @@ +{ + "resblock": "1", + "num_gpus": 8, + "batch_size": 64, + "learning_rate": 0.0002, + "adam_b1": 0.5, + "adam_b2": 0.9, + "lr_decay": 0.98, + "seed": 1234, + + "upsample_rates": [8,5,4,2], + "upsample_kernel_sizes": [16,11,8,4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "segment_size": 16000, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 200, + "win_size": 800, + + "sampling_rate": 16000, + + "n_code_groups": 2, + "n_codes": 1024, + "codebook_loss_lambda": 1.0, + "commitment_loss_lambda": 0.25, + + "fmin": 0, + "fmax": 8000, + "fmax_for_loss": null, + + "num_workers": 12, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/path.sh b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/path.sh new file mode 100644 index 0000000..e406044 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/path.sh @@ -0,0 +1 @@ +../HiFi-Codec-24k-240d/path.sh \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/readme.md b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/readme.md new file mode 100644 index 0000000..84ab8fd --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/readme.md @@ -0,0 +1,13 @@ +## How to train your model +Firstly, set the related path in start.sh file, then
+```bash +bash start.sh +``` + +## How to Inference +```bash +mkdir checkpoint +cd checkpoint +wget https://huggingface.co/Dongchao/AcademiCodec/resolve/main/HiFi-Codec-16k-320d +bash test.sh +``` diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/start.sh b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/start.sh new file mode 100644 index 0000000..cee2b50 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/start.sh @@ -0,0 +1,40 @@ + +#!/bin/bash +source path.sh +set -e + +log_root="logs" +# .lst save the wav path. +input_training_file="train.lst" +input_validation_file="valid.lst" + +#mode=debug +mode=train + +if [ "${mode}" == "debug" ]; then + ## debug + echo "Debug" + log_root=${log_root}_debug + export CUDA_VISIBLE_DEVICES=0 + python ${BIN_DIR}/train.py \ + --config config_16k_320d.json \ + --checkpoint_path ${log_root} \ + --input_training_file ${input_training_file} \ + --input_validation_file ${input_validation_file} \ + --checkpoint_interval 100 \ + --summary_interval 10 \ + --validation_interval 100 \ + +elif [ "$mode" == "train" ]; then + ## train + echo "Train model..." + export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 + python ${BIN_DIR}/train.py \ + --config config_16k_320d.json \ + --checkpoint_path ${log_root} \ + --input_training_file ${input_training_file} \ + --input_validation_file ${input_validation_file} \ + --checkpoint_interval 5000 \ + --summary_interval 100 \ + --validation_interval 5000 +fi diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/test.sh b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/test.sh new file mode 100644 index 0000000..dcdb570 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-16k-320d/test.sh @@ -0,0 +1,19 @@ +#!/bin/bash +source path.sh + +ckpt=checkpoint/HiFi-Codec-16k-320d +echo checkpoint path: ${ckpt} + +# the path of test wave +wav_dir=test_wav + +outputdir=output +mkdir -p ${outputdir} + +python3 ${BIN_DIR}/vqvae_copy_syn.py \ + --model_path=${ckpt} \ + --config_path=config_16k_320d.json \ + --input_wavdir=${wav_dir} \ + --outputdir=${outputdir} \ + --num_gens=10000 \ + --sample_rate=16000 diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/config_24k_240d.json b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/config_24k_240d.json new file mode 100644 index 0000000..0c7cf0f --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/config_24k_240d.json @@ -0,0 +1,42 @@ +{ + "resblock": "1", + "num_gpus": 8, + "batch_size": 32, + "learning_rate": 0.0002, + "adam_b1": 0.5, + "adam_b2": 0.9, + "lr_decay": 0.98, + "seed": 1234, + + "upsample_rates": [8,5,3,2], + "upsample_kernel_sizes": [16,11,7,4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "segment_size": 12000, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 240, + "win_size": 1024, + + "sampling_rate": 24000, + + "n_code_groups": 2, + "n_codes": 1024, + "codebook_loss_lambda": 1.0, + "commitment_loss_lambda": 0.25, + + "fmin": 0, + "fmax": 8000, + "fmax_for_loss": null, + + "num_workers": 12, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/path.sh b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/path.sh new file mode 100644 index 0000000..24e247e --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/path.sh @@ -0,0 +1,6 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../` + +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} +MODEL=hificodec +export BIN_DIR=${MAIN_ROOT}/academicodec/models/${MODEL} diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/readme.md b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/readme.md new file mode 100644 index 0000000..9b0184d --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/readme.md @@ -0,0 +1,13 @@ +## How to train your model +Firstly, set the related path in start.sh file, then
+```bash +bash start.sh +``` + +## How to Inference +```bash +mkdir checkpoint +cd checkpoint +wget https://huggingface.co/Dongchao/AcademiCodec/resolve/main/HiFi-Codec-24k-240d +bash test.sh +``` diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/start.sh b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/start.sh new file mode 100644 index 0000000..0d03643 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/start.sh @@ -0,0 +1,40 @@ + +#!/bin/bash +source path.sh +set -e + +log_root="logs" +# .lst save the wav path. +input_training_file="train.lst" +input_validation_file="valid.lst" + +#mode=debug +mode=train + +if [ "${mode}" == "debug" ]; then + ## debug + echo "Debug" + log_root=${log_root}_debug + export CUDA_VISIBLE_DEVICES=0 + python ${BIN_DIR}/train.py \ + --config config_24k_240d.json \ + --checkpoint_path ${log_root} \ + --input_training_file ${input_training_file} \ + --input_validation_file ${input_validation_file} \ + --checkpoint_interval 100 \ + --summary_interval 10 \ + --validation_interval 100 \ + +elif [ "$mode" == "train" ]; then + ## train + echo "Train model..." + export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 + python ${BIN_DIR}/train.py \ + --config config_24k_240d.json \ + --checkpoint_path ${log_root} \ + --input_training_file ${input_training_file} \ + --input_validation_file ${input_validation_file} \ + --checkpoint_interval 5000 \ + --summary_interval 100 \ + --validation_interval 5000 +fi diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/test.sh b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/test.sh new file mode 100644 index 0000000..e17d186 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-240d/test.sh @@ -0,0 +1,18 @@ +#!/bin/bash +source path.sh + +ckpt=checkpoint/HiFi-Codec-24k-240d +echo checkpoint path: ${ckpt} + +# the path of test wave +wav_dir=test_wav + +outputdir=output +mkdir -p ${outputdir} + +python3 ${BIN_DIR}/vqvae_copy_syn.py \ + --model_path=${ckpt} \ + --config_path=config_24k_240d.json \ + --input_wavdir=${wav_dir} \ + --outputdir=${outputdir} \ + --num_gens=10000 diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/config_24k_320d.json b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/config_24k_320d.json new file mode 100644 index 0000000..7757d5e --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/config_24k_320d.json @@ -0,0 +1,42 @@ +{ + "resblock": "1", + "num_gpus": 8, + "batch_size": 80, + "learning_rate": 0.0002, + "adam_b1": 0.5, + "adam_b2": 0.9, + "lr_decay": 0.98, + "seed": 1234, + + "upsample_rates": [8,5,4,2], + "upsample_kernel_sizes": [16,11,8,4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3,7,11], + "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]], + + "segment_size": 16000, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 240, + "win_size": 1024, + + "sampling_rate": 24000, + + "n_code_groups": 2, + "n_codes": 1024, + "codebook_loss_lambda": 1.0, + "commitment_loss_lambda": 0.25, + + "fmin": 0, + "fmax": 8000, + "fmax_for_loss": null, + + "num_workers": 12, + + "dist_config": { + "dist_backend": "nccl", + "dist_url": "tcp://localhost:54321", + "world_size": 1 + } +} diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/infer.ipynb b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/infer.ipynb new file mode 100644 index 0000000..171ef1d --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/infer.ipynb @@ -0,0 +1,138 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import sys\n", + "\n", + "sys.path.append('../../')" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Init model and load weights\n", + "Model ready\n", + "Globbed 12 wav files.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████| 1/1 [00:00<00:00, 11.08it/s]" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "wav.shape: (97681,)\n", + "acoustic_token: tensor([[[ 11, 591, 281, 629],\n", + " [733, 591, 401, 139],\n", + " [500, 591, 733, 600],\n", + " ...,\n", + " [733, 591, 451, 346],\n", + " [733, 591, 401, 139],\n", + " [386, 591, 281, 461]]], device='cuda:0')\n", + "acoustic_token.shape: torch.Size([1, 305, 4])\n", + "acoustic_token.dtype: torch.int64\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\n" + ] + } + ], + "source": [ + "import glob\n", + "import json\n", + "import os\n", + "from pathlib import Path\n", + "\n", + "import librosa\n", + "import torch\n", + "from academicodec.models.hificodec.vqvae import VQVAE\n", + "from librosa.util import normalize\n", + "from tqdm import tqdm\n", + "\n", + "ckpt_path = './checkpoint/HiFi-Codec-24k-320d'\n", + "config_path = './config_24k_320d.json'\n", + "with open(config_path, 'r') as f:\n", + " config = json.load(f)\n", + " sample_rate = config['sampling_rate']\n", + "\n", + "outputdir = './output'\n", + "inputdir = './test_wav'\n", + "num = 1024\n", + "\n", + "if __name__ == '__main__':\n", + " Path(outputdir).mkdir(parents=True, exist_ok=True)\n", + " print(\"Init model and load weights\")\n", + " # make sure you downloaded the weights from https://huggingface.co/Dongchao/AcademiCodec/blob/main/HiFi-Codec-24k-320d \n", + " # and put it in ./checkpoint/\n", + " model = VQVAE(\n", + " config_path,\n", + " ckpt_path,\n", + " with_encoder=True)\n", + " model.cuda()\n", + " model.eval()\n", + " print(\"Model ready\")\n", + "\n", + " wav_paths = glob.glob(f\"{inputdir}/*.wav\")[:num]\n", + " print(f\"Globbed {len(wav_paths)} wav files.\")\n", + " fid_to_acoustic_token = {}\n", + " for wav_path in tqdm(wav_paths[:1]):\n", + " wav, sr = librosa.load(wav_path, sr=sample_rate)\n", + " print(\"wav.shape:\",wav.shape)\n", + " assert sr == sample_rate\n", + " fid = os.path.basename(wav_path)[:-4]\n", + " wav = normalize(wav) * 0.95\n", + " wav = torch.FloatTensor(wav).unsqueeze(0)\n", + " wav = wav.to(torch.device('cuda'))\n", + " acoustic_token = model.encode(wav)\n", + " print(\"acoustic_token:\",acoustic_token)\n", + " print(\"acoustic_token.shape:\",acoustic_token.shape)\n", + " print(\"acoustic_token.dtype:\",acoustic_token.dtype)\n", + " fid = os.path.basename(wav_path)[:-4]\n", + " fid_to_acoustic_token[fid] = acoustic_token\n", + "\n", + " torch.save(fid_to_acoustic_token,\n", + " os.path.join(outputdir, 'fid_to_acoustic_token.pth'))\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/path.sh b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/path.sh new file mode 100644 index 0000000..e406044 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/path.sh @@ -0,0 +1 @@ +../HiFi-Codec-24k-240d/path.sh \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/readme.md b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/readme.md new file mode 100644 index 0000000..76c4672 --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/readme.md @@ -0,0 +1,13 @@ +## How to train your model +Firstly, set the related path in start.sh file, then
+```bash +bash start.sh +``` + +## How to Inference +```bash +mkdir checkpoint +cd checkpoint +wget https://huggingface.co/Dongchao/AcademiCodec/resolve/main/HiFi-Codec-24k-320d +bash test.sh +``` diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/start.sh b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/start.sh new file mode 100644 index 0000000..6b6bc2d --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/start.sh @@ -0,0 +1,39 @@ +#!/bin/bash +source path.sh +set -e + +log_root="logs" +# .lst save the wav path. +input_training_file="train.lst" +input_validation_file="valid.lst" + +#mode=debug +mode=train + +if [ "${mode}" == "debug" ]; then + ## debug + echo "Debug" + log_root=${log_root}_debug + export CUDA_VISIBLE_DEVICES=0 + python ${BIN_DIR}/train.py \ + --config config_24k_320d.json \ + --checkpoint_path ${log_root} \ + --input_training_file ${input_training_file} \ + --input_validation_file ${input_validation_file} \ + --checkpoint_interval 100 \ + --summary_interval 10 \ + --validation_interval 100 \ + +elif [ "$mode" == "train" ]; then + ## train + echo "Train model..." + export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 + python ${BIN_DIR}/train.py \ + --config config_24k_320d.json \ + --checkpoint_path ${log_root} \ + --input_training_file ${input_training_file} \ + --input_validation_file ${input_validation_file} \ + --checkpoint_interval 5000 \ + --summary_interval 100 \ + --validation_interval 5000 +fi diff --git a/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/test.sh b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/test.sh new file mode 100644 index 0000000..f7390ed --- /dev/null +++ b/third_party/AcademiCodec/egs/HiFi-Codec-24k-320d/test.sh @@ -0,0 +1,18 @@ +#!/bin/bash +source path.sh + +ckpt=checkpoint/HiFi-Codec-24k-320d +echo checkpoint path: ${ckpt} + +# the path of test wave +wav_dir=test_wav + +outputdir=output +mkdir -p ${outputdir} + +python3 ${BIN_DIR}/vqvae_copy_syn.py \ + --model_path=${ckpt} \ + --config_path=config_24k_320d.json \ + --input_wavdir=${wav_dir} \ + --outputdir=${outputdir} \ + --num_gens=10000 diff --git a/third_party/AcademiCodec/egs/SoundStream_24k_240d/main3_ddp.py b/third_party/AcademiCodec/egs/SoundStream_24k_240d/main3_ddp.py new file mode 100644 index 0000000..fbc0ac6 --- /dev/null +++ b/third_party/AcademiCodec/egs/SoundStream_24k_240d/main3_ddp.py @@ -0,0 +1,488 @@ +# 与 Encodec_24k_240d main3_ddp.py 相比只有鉴别器不同 +import argparse +import itertools +import os +import time + +import torch +import torch.distributed as dist +from academicodec.models.encodec.distributed.launch import launch +from academicodec.models.encodec.msstftd import MultiScaleSTFTDiscriminator +from academicodec.models.encodec.net3 import SoundStream +from academicodec.models.soundstream.dataset import NSynthDataset +from academicodec.models.soundstream.loss import criterion_d +from academicodec.models.soundstream.loss import criterion_g +from academicodec.models.soundstream.loss import loss_dis +from academicodec.models.soundstream.loss import loss_g +from academicodec.models.soundstream.models import MultiPeriodDiscriminator +from academicodec.models.soundstream.models import MultiScaleDiscriminator +from academicodec.utils import Logger +from academicodec.utils import seed_everything +from torch.nn.parallel import DistributedDataParallel as DDP +from tqdm import tqdm +NODE_RANK = os.environ['INDEX'] if 'INDEX' in os.environ else 0 +NODE_RANK = int(NODE_RANK) +MASTER_ADDR, MASTER_PORT = (os.environ['CHIEF_IP'], + 22275) if 'CHIEF_IP' in os.environ else ( + "127.0.0.1", 29500) +MASTER_PORT = int(MASTER_PORT) +DIST_URL = 'tcp://%s:%s' % (MASTER_ADDR, MASTER_PORT) +NUM_NODE = os.environ['HOST_NUM'] if 'HOST_NUM' in os.environ else 1 + + +def getModelSize(model): + param_size = 0 + param_sum = 0 + for param in model.parameters(): + param_size += param.nelement() * param.element_size() + param_sum += param.nelement() + buffer_size = 0 + buffer_sum = 0 + for buffer in model.buffers(): + buffer_size += buffer.nelement() * buffer.element_size() + buffer_sum += buffer.nelement() + all_size = (param_size + buffer_size) / 1024 / 1024 + print('模型总大小为:{:.3f}MB'.format(all_size)) + return (param_size, param_sum, buffer_size, buffer_sum, all_size) + + +def get_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--num_node', + type=int, + default=NUM_NODE, + help='number of nodes for distributed training') + parser.add_argument( + '--ngpus_per_node', + type=int, + default=8, + help='number of gpu on one node') + parser.add_argument( + '--node_rank', + type=int, + default=NODE_RANK, + help='node rank for distributed training') + parser.add_argument( + '--dist_url', + type=str, + default=DIST_URL, + help='url used to set up distributed training') + parser.add_argument( + '--gpu', + type=int, + default=None, + help='GPU id to use. If given, only the specific gpu will be' + ' used, and ddp will be disabled') + parser.add_argument( + '--local_rank', + default=-1, + type=int, + help='node rank for distributed training') + # args for random + parser.add_argument( + '--seed', + type=int, + default=None, + help='seed for initializing training. ') + parser.add_argument( + '--cudnn_deterministic', + action='store_true', + help='set cudnn.deterministic True') + parser.add_argument( + '--tensorboard', + action='store_true', + help='use tensorboard for logging') + # args for training + parser.add_argument( + '--LAMBDA_ADV', + type=float, + default=1, + help='hyper-parameter for adver loss') + parser.add_argument( + '--LAMBDA_FEAT', + type=float, + default=1, + help='hyper-parameter for feat loss') + parser.add_argument( + '--LAMBDA_REC', + type=float, + default=1, + help='hyper-parameter for rec loss') + parser.add_argument( + '--LAMBDA_COM', + type=float, + default=1000, + help='hyper-parameter for commit loss') + parser.add_argument( + '--N_EPOCHS', type=int, default=100, help='Total training epoch') + parser.add_argument( + '--st_epoch', type=int, default=0, help='start training epoch') + parser.add_argument( + '--global_step', type=int, default=0, help='record the global step') + parser.add_argument('--discriminator_iter_start', type=int, default=500) + parser.add_argument('--BATCH_SIZE', type=int, default=2, help='batch size') + parser.add_argument( + '--PATH', + type=str, + default='model_path/', + help='The path to save the model') + parser.add_argument('--sr', type=int, default=24000, help='sample rate') + parser.add_argument( + '--print_freq', type=int, default=10, help='the print number') + parser.add_argument( + '--save_dir', type=str, default='log', help='log save path') + parser.add_argument( + '--train_data_path', + type=str, + default='path_to_wavs', + help='training data') + parser.add_argument( + '--valid_data_path', + type=str, + default='path_to_val_wavs', + help='training data') + parser.add_argument( + '--resume', action='store_true', help='whether re-train model') + parser.add_argument( + '--resume_path', type=str, default='path_to_resume', help='resume_path') + parser.add_argument( + '--ratios', + type=int, + nargs='+', + # probs(ratios) = hop_size + default=[8, 5, 4, 2], + help='ratios of SoundStream, shoud be set for different hop_size (32d, 320, 240d, ...)' + ) + parser.add_argument( + '--target_bandwidths', + type=float, + nargs='+', + # default for 16k_320d + default=[1, 1.5, 2, 4, 6, 12], + help='target_bandwidths of net3.py') + args = parser.parse_args() + time_str = time.strftime('%Y-%m-%d-%H-%M') + if args.resume: + args.PATH = args.resume_path # direcly use the old model path + else: + args.PATH = os.path.join(args.PATH, time_str) + args.save_dir = os.path.join(args.save_dir, time_str) + os.makedirs(args.PATH, exist_ok=True) + return args + + +def get_input(x): + x = x.to(memory_format=torch.contiguous_format) + return x.float() + + +def main(): + args = get_args() + if args.seed is not None or args.cudnn_deterministic: + seed_everything(args.seed, args.cudnn_deterministic) + if args.num_node == 1: + args.dist_url == "auto" + else: + assert args.num_node > 1 + args.ngpus_per_node = torch.cuda.device_count() + args.world_size = args.ngpus_per_node * args.num_node # + launch( + main_worker, + args.ngpus_per_node, + args.num_node, + args.node_rank, + args.dist_url, + args=(args, )) + + +def main_worker(local_rank, args): + args.local_rank = local_rank + args.global_rank = args.local_rank + args.node_rank * args.ngpus_per_node + args.distributed = args.world_size > 1 + #CUDA_VISIBLE_DEVICES = int(args.local_rank) + logger = Logger(args) + # 240倍下采 + soundstream = SoundStream(n_filters=32, D=512, ratios=args.ratios) + msd = MultiScaleDiscriminator() + mpd = MultiPeriodDiscriminator() + #print('soundstream ', soundstream) + # assert 1==2 + stft_disc = MultiScaleSTFTDiscriminator(filters=32) + getModelSize(soundstream) + getModelSize(msd) + getModelSize(mpd) + getModelSize(stft_disc) + if args.distributed: + soundstream = torch.nn.SyncBatchNorm.convert_sync_batchnorm(soundstream) + stft_disc = torch.nn.SyncBatchNorm.convert_sync_batchnorm(stft_disc) + msd = torch.nn.SyncBatchNorm.convert_sync_batchnorm(msd) + mpd = torch.nn.SyncBatchNorm.convert_sync_batchnorm(mpd) + # torch.distributed.barrier() + args.device = torch.device('cuda', args.local_rank) + soundstream.to(args.device) + stft_disc.to(args.device) + msd.to(args.device) + mpd.to(args.device) + if args.distributed: + soundstream = DDP( + soundstream, + device_ids=[args.local_rank], + find_unused_parameters=True + ) # device_ids=[args.local_rank], output_device=args.local_rank + stft_disc = DDP(stft_disc, + device_ids=[args.local_rank], + find_unused_parameters=True) + msd = DDP(msd, + device_ids=[args.local_rank], + find_unused_parameters=True) + mpd = DDP(mpd, + device_ids=[args.local_rank], + find_unused_parameters=True) + + train_dataset = NSynthDataset(audio_dir=args.train_data_path) + valid_dataset = NSynthDataset(audio_dir=args.valid_data_path) + args.sr = train_dataset.sr + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler( + train_dataset, drop_last=True, shuffle=True) + valid_sampler = torch.utils.data.distributed.DistributedSampler( + valid_dataset) + else: + train_sampler = None + valid_sampler = None + train_loader = torch.utils.data.DataLoader( + train_dataset, + batch_size=args.BATCH_SIZE, + num_workers=8, + sampler=train_sampler) + valid_loader = torch.utils.data.DataLoader( + valid_dataset, + batch_size=args.BATCH_SIZE, + num_workers=8, + sampler=valid_sampler) + optimizer_g = torch.optim.AdamW( + soundstream.parameters(), lr=3e-4, betas=(0.5, 0.9)) + lr_scheduler_g = torch.optim.lr_scheduler.ExponentialLR( + optimizer_g, gamma=0.999) + optimizer_d = torch.optim.AdamW( + itertools.chain(stft_disc.parameters(), + msd.parameters(), mpd.parameters()), + lr=3e-4, + betas=(0.5, 0.9)) + lr_scheduler_d = torch.optim.lr_scheduler.ExponentialLR( + optimizer_d, gamma=0.999) + if args.resume: + latest_info = torch.load(args.resume_path + '/latest.pth') + args.st_epoch = latest_info['epoch'] + soundstream.load_state_dict(latest_info['soundstream']) + stft_disc.load_state_dict(latest_info['stft_disc']) + mpd.load_state_dict(latest_info['mpd']) + msd.load_state_dict(latest_info['msd']) + optimizer_g.load_state_dict(latest_info['optimizer_g']) + lr_scheduler_g.load_state_dict(latest_info['lr_scheduler_g']) + optimizer_d.load_state_dict(latest_info['optimizer_d']) + lr_scheduler_d.load_state_dict(latest_info['lr_scheduler_d']) + train(args, soundstream, stft_disc, msd, mpd, train_loader, valid_loader, + optimizer_g, optimizer_d, lr_scheduler_g, lr_scheduler_d, logger) + + +def train(args, soundstream, stft_disc, msd, mpd, train_loader, valid_loader, + optimizer_g, optimizer_d, lr_scheduler_g, lr_scheduler_d, logger): + print('args ', args.global_rank) + best_val_loss = float("inf") + best_val_epoch = -1 + global_step = 0 + for epoch in range(args.st_epoch, args.N_EPOCHS + 1): + soundstream.train() + stft_disc.train() + msd.train() + mpd.train() + train_loss_d = 0.0 + train_adv_g_loss = 0.0 + train_feat_loss = 0.0 + train_rec_loss = 0.0 + train_loss_g = 0.0 + train_commit_loss = 0.0 + k_iter = 0 + if args.distributed: + train_loader.sampler.set_epoch(epoch) + for x in tqdm(train_loader): + x = x.to(args.device) + k_iter += 1 + global_step += 1 # record the global step + for optimizer_idx in [0, 1]: # we have two optimizer + x_wav = get_input(x) + G_x, commit_loss, last_layer = soundstream(x_wav) + if optimizer_idx == 0: + # update generator + y_disc_r, fmap_r = stft_disc(x_wav.contiguous()) + y_disc_gen, fmap_gen = stft_disc(G_x.contiguous()) + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x_wav.contiguous(), G_x.contiguous()) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x_wav.contiguous(), G_x.contiguous()) + total_loss_g, rec_loss, adv_g_loss, feat_loss, d_weight = loss_g( + commit_loss, + x_wav, + G_x, + fmap_r, + fmap_gen, + y_disc_r, + y_disc_gen, + global_step, + y_df_hat_r, + y_df_hat_g, + y_ds_hat_r, + y_ds_hat_g, + fmap_f_r, + fmap_f_g, + fmap_s_r, + fmap_s_g, + last_layer=last_layer, + is_training=True, + args=args) + train_commit_loss += commit_loss + train_loss_g += total_loss_g.item() + train_adv_g_loss += adv_g_loss.item() + train_feat_loss += feat_loss.item() + train_rec_loss += rec_loss.item() + optimizer_g.zero_grad() + total_loss_g.backward() + optimizer_g.step() + else: + # update discriminator + y_disc_r_det, fmap_r_det = stft_disc(x.detach()) + y_disc_gen_det, fmap_gen_det = stft_disc(G_x.detach()) + + # MPD + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x.detach(), G_x.detach()) + #MSD + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x.detach(), G_x.detach()) + + loss_d = loss_dis( + y_disc_r_det, y_disc_gen_det, fmap_r_det, fmap_gen_det, + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g, y_ds_hat_r, + y_ds_hat_g, fmap_s_r, fmap_s_g, global_step, args) + train_loss_d += loss_d.item() + optimizer_d.zero_grad() + loss_d.backward() + optimizer_d.step() + message = ''.format( + epoch, k_iter, + total_loss_g.item(), + adv_g_loss.item(), + feat_loss.item(), + rec_loss.item(), + commit_loss.item(), loss_d.item(), d_weight.item()) + if k_iter % args.print_freq == 0: + logger.log_info(message) + lr_scheduler_g.step() + lr_scheduler_d.step() + message = ''.format( + epoch, train_loss_g / len(train_loader), train_rec_loss / + len(train_loader), train_adv_g_loss / len(train_loader), + train_feat_loss / len(train_loader), + train_commit_loss / len(train_loader)) + logger.log_info(message) + with torch.no_grad(): + soundstream.eval() + stft_disc.eval() + mpd.eval() + msd.eval() + valid_loss_d = 0.0 + valid_loss_g = 0.0 + valid_commit_loss = 0.0 + valid_adv_g_loss = 0.0 + valid_feat_loss = 0.0 + valid_rec_loss = 0.0 + if args.distributed: + valid_loader.sampler.set_epoch(epoch) + for x in tqdm(valid_loader): + x = x.to(args.device) + for optimizer_idx in [0, 1]: + x_wav = get_input(x) + G_x, commit_loss, _ = soundstream(x_wav) + if optimizer_idx == 0: + valid_commit_loss += commit_loss + y_disc_r, fmap_r = stft_disc(x_wav.contiguous()) + y_disc_gen, fmap_gen = stft_disc(G_x.contiguous()) + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x_wav.contiguous(), G_x.contiguous()) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x_wav.contiguous(), G_x.contiguous()) + + total_loss_g, adv_g_loss, feat_loss, rec_loss = criterion_g( + commit_loss, + x_wav, + G_x, + fmap_r, + fmap_gen, + y_disc_r, + y_disc_gen, + y_df_hat_r, + y_df_hat_g, + fmap_f_r, + fmap_f_g, + y_ds_hat_r, + y_ds_hat_g, + fmap_s_r, + fmap_s_g, + args=args) + valid_loss_g += total_loss_g.item() + valid_adv_g_loss += adv_g_loss.item() + valid_feat_loss += feat_loss.item() + valid_rec_loss += rec_loss.item() + else: + y_disc_r_det, fmap_r_det = stft_disc( + x_wav.contiguous().detach()) + y_disc_gen_det, fmap_gen_det = stft_disc( + G_x.contiguous().detach()) + y_df_hat_r, y_df_hat_g, fmap_f_r, fmap_f_g = mpd( + x_wav.contiguous().detach(), + G_x.contiguous().detach()) + y_ds_hat_r, y_ds_hat_g, fmap_s_r, fmap_s_g = msd( + x_wav.contiguous().detach(), + G_x.contiguous().detach()) + loss_d = criterion_d(y_disc_r_det, y_disc_gen_det, + fmap_r_det, fmap_gen_det, + y_df_hat_r, y_df_hat_g, fmap_f_r, + fmap_f_g, y_ds_hat_r, y_ds_hat_g, + fmap_s_r, fmap_s_g) + valid_loss_d += loss_d.item() + if dist.get_rank() == 0: + best_model = soundstream.state_dict().copy() + latest_model_soundstream = soundstream.state_dict().copy() + latest_model_dis = stft_disc.state_dict().copy() + latest_mpd = mpd.state_dict().copy() + latest_msd = msd.state_dict().copy() + if valid_rec_loss < best_val_loss: + best_val_loss = valid_rec_loss + best_val_epoch = epoch + torch.save(best_model, + args.PATH + '/best_' + str(epoch) + '.pth') + latest_save = {} + latest_save['soundstream'] = latest_model_soundstream + latest_save['stft_disc'] = latest_model_dis + latest_save['mpd'] = latest_mpd + latest_save['msd'] = latest_msd + latest_save['epoch'] = epoch + latest_save['optimizer_g'] = optimizer_g.state_dict() + latest_save['optimizer_d'] = optimizer_d.state_dict() + latest_save['lr_scheduler_g'] = lr_scheduler_g.state_dict() + latest_save['lr_scheduler_d'] = lr_scheduler_d.state_dict() + torch.save(latest_save, args.PATH + '/latest.pth') + + message = ''.format( + epoch, valid_loss_g / len(valid_loader), valid_rec_loss / + len(valid_loader), valid_adv_g_loss / len(valid_loader), + valid_feat_loss / len(valid_loader), + valid_commit_loss / len(valid_loader), + valid_loss_d / len(valid_loader), best_val_epoch) + logger.log_info(message) + + +if __name__ == '__main__': + main() diff --git a/third_party/AcademiCodec/egs/SoundStream_24k_240d/path.sh b/third_party/AcademiCodec/egs/SoundStream_24k_240d/path.sh new file mode 100644 index 0000000..9d23a5d --- /dev/null +++ b/third_party/AcademiCodec/egs/SoundStream_24k_240d/path.sh @@ -0,0 +1,6 @@ +#!/bin/bash +export MAIN_ROOT=`realpath ${PWD}/../../` + +export PYTHONPATH=${MAIN_ROOT}:${PYTHONPATH} +MODEL=encodec +export BIN_DIR=${MAIN_ROOT}/academicodec/models/${MODEL} \ No newline at end of file diff --git a/third_party/AcademiCodec/egs/SoundStream_24k_240d/readme.md b/third_party/AcademiCodec/egs/SoundStream_24k_240d/readme.md new file mode 100644 index 0000000..1991079 --- /dev/null +++ b/third_party/AcademiCodec/egs/SoundStream_24k_240d/readme.md @@ -0,0 +1,12 @@ +# The training code of SoundStream + + +### For Training +set the right path to start.sh + +run: `bash start.sh` + +### For Inference + + +模型不开源,这个目录暂未整理 diff --git a/third_party/AcademiCodec/egs/SoundStream_24k_240d/start.sh b/third_party/AcademiCodec/egs/SoundStream_24k_240d/start.sh new file mode 100644 index 0000000..2837950 --- /dev/null +++ b/third_party/AcademiCodec/egs/SoundStream_24k_240d/start.sh @@ -0,0 +1,13 @@ +#!/bin/bash +source path.sh + +python3 main3_ddp.py \ + --BATCH_SIZE 16 \ + --N_EPOCHS 300 \ + --save_dir path_to_save_log \ + --PATH path_to_save_model \ + --train_data_path path_to_training_data \ + --valid_data_path path_to_val_data \ + --sr 24000 \ + --ratios 6 5 4 2 \ + --target_bandwidths 1 2 4 8 12 diff --git a/third_party/AcademiCodec/egs/SoundStream_24k_240d/test.sh b/third_party/AcademiCodec/egs/SoundStream_24k_240d/test.sh new file mode 100644 index 0000000..4c45d91 --- /dev/null +++ b/third_party/AcademiCodec/egs/SoundStream_24k_240d/test.sh @@ -0,0 +1,10 @@ +#!/bin/bash +source path.sh +python3 ${BIN_DIR}/test.py \ + --input=./test_wav \ + --output=./output \ + --resume_path=checkpoint/soundstream.pth \ + --sr=24000 \ + --ratios 6 5 4 2 \ + --target_bandwidths 1 2 4 8 12 \ + --target_bw=12 diff --git a/third_party/AcademiCodec/egs/util/wavlstgen.py b/third_party/AcademiCodec/egs/util/wavlstgen.py new file mode 100644 index 0000000..fe7eb80 --- /dev/null +++ b/third_party/AcademiCodec/egs/util/wavlstgen.py @@ -0,0 +1,49 @@ +# -*- encoding: utf-8 -*- +# 2022-2023 by zhaomingwork@qq.com +# can be used for generating train.lst or valid.lst only given a root dir +# example: +# python wavlstgen.py --wavdir /data/asr_data/aishell/ --outfile train.lst +import os +import time + +import argparse +import json +import traceback + + +import logging + +logging.basicConfig(level=logging.ERROR) + +parser = argparse.ArgumentParser() +parser.add_argument("--wavdir", + type=str, + default="./", + required=True, + help="root dir of wav") + + +parser.add_argument("--outfile", + type=str, + default="./wav.lst", + required=False, + help="output list file name") + +args = parser.parse_args() + +print(args) + +def genwavlist(rootdir): + outlist = open(args.outfile, 'w+') + + for dirpath, dirnames, filenames in os.walk(rootdir): + for filename in filenames: + #print(os.path.join(dirpath, filename)) + if filename.endswith(".wav"): + outlist.write(os.path.join(dirpath, filename)+"\n") + outlist.close() + + +if __name__ == '__main__': + + genwavlist(args.wavdir) diff --git a/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/compute_metrics.sh b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/compute_metrics.sh new file mode 100644 index 0000000..c5971b1 --- /dev/null +++ b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/compute_metrics.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash + +# pip install pesq +# pip install pystoi +# pip install pyworld +# pip install pysptk +# pip install -U numpy +stage=1 +stop_stage=2 + +#ref_dir=$1 +#gen_dir=$2 + +ref_dir='your test folder' +gen_dir='the genereated samples' +echo ${ref_dir} +echo ${gen_dir} + + + +if [ $stage -le 1 ] && [ "${stop_stage}" -ge 2 ];then + echo "Compute PESQ" + python metrics/compute_pesq.py \ + -r ${ref_dir} \ + -d ${gen_dir} +fi + +if [ $stage -le 2 ] && [ "${stop_stage}" -ge 3 ];then + echo "Compute STOI" + python metrics/compute_stoi.py \ + -r ${ref_dir} \ + -d ${gen_dir} +fi diff --git a/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/compute_pesq.py b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/compute_pesq.py new file mode 100644 index 0000000..12706e9 --- /dev/null +++ b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/compute_pesq.py @@ -0,0 +1,48 @@ +import argparse +import glob +import os + +import scipy.signal as signal +from pesq import pesq +from scipy.io import wavfile +from tqdm import tqdm + + +def cal_pesq(ref_dir, deg_dir): + input_files = glob.glob(f"{deg_dir}/*.wav") + + nb_pesq_scores = 0.0 + wb_pesq_scores = 0.0 + for deg_wav in tqdm(input_files): + ref_wav = os.path.join(ref_dir, os.path.basename(deg_wav)) + ref_rate, ref = wavfile.read(ref_wav) + deg_rate, deg = wavfile.read(deg_wav) + if ref_rate != 16000: + ref = signal.resample(ref, 16000) + if deg_rate != 16000: + deg = signal.resample(deg, 16000) + + min_len = min(len(ref), len(deg)) + ref = ref[:min_len] + deg = deg[:min_len] + + nb_pesq_scores += pesq(16000, ref, deg, 'nb') + wb_pesq_scores += pesq(16000, ref, deg, 'wb') + + return nb_pesq_scores / len(input_files), wb_pesq_scores / len(input_files) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description="Compute PESQ measure.") + + parser.add_argument( + '-r', '--ref_dir', required=True, help="Reference wave folder.") + parser.add_argument( + '-d', '--deg_dir', required=True, help="Degraded wave folder.") + + args = parser.parse_args() + + nb_score, wb_score = cal_pesq(args.ref_dir, args.deg_dir) + print(f"NB PESQ: {nb_score}") + print(f"WB PESQ: {wb_score}") diff --git a/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/compute_stoi.py b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/compute_stoi.py new file mode 100644 index 0000000..b12cf4f --- /dev/null +++ b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/compute_stoi.py @@ -0,0 +1,42 @@ +import argparse +import glob +import os + +import numpy as np +from pystoi import stoi +from scipy.io import wavfile +from tqdm import tqdm + + +def calculate_stoi(ref_dir, deg_dir): + input_files = glob.glob(f"{deg_dir}/*.wav") + if len(input_files) < 1: + raise RuntimeError(f"Found no wavs in {ref_dir}") + + stoi_scores = [] + for deg_wav in tqdm(input_files): + ref_wav = os.path.join(ref_dir, os.path.basename(deg_wav)) + rate, ref = wavfile.read(ref_wav) + rate, deg = wavfile.read(deg_wav) + min_len = min(len(ref), len(deg)) + ref = ref[:min_len] + deg = deg[:min_len] + cur_stoi = stoi(ref, deg, rate, extended=False) + stoi_scores.append(cur_stoi) + + return np.mean(stoi_scores) + + +if __name__ == '__main__': + + parser = argparse.ArgumentParser(description="Compute STOI measure") + + parser.add_argument( + '-r', '--ref_dir', required=True, help="Reference wave folder.") + parser.add_argument( + '-d', '--deg_dir', required=True, help="Degraded wave folder.") + + args = parser.parse_args() + + stoi_score = calculate_stoi(args.ref_dir, args.deg_dir) + print(f"STOI: {stoi_score}") diff --git a/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/utils.py b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/utils.py new file mode 100644 index 0000000..327544e --- /dev/null +++ b/third_party/AcademiCodec/evaluation_metric/calculate_voc_obj_metrics/metrics/utils.py @@ -0,0 +1,102 @@ +import os +import random +from os.path import join as opj + +import numpy as np +import torch +import torch.nn as nn + + +def count_parameters(model): + return sum(p.numel() for p in model.parameters() if p.requires_grad) + + +def get_params(args): + + params = {} + args_ref = vars(args) + args_keys = vars(args).keys() + + for key in args_keys: + if '__' in key: + continue + else: + temp_params = args_ref[key] + if type(temp_params) == dict: + params.update(temp_params) + else: + params[key] = temp_params + + return params + + +def rescale_module(module, reference): + for sub in module.modules(): + if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): + rescale_conv(sub, reference) + + +def rescale_conv(conv, reference): + std = conv.weight.std().detach() + scale = (std / reference)**0.5 + conv.weight.data /= scale + if conv.bias is not None: + conv.bias.data /= scale + + +def write_result(estimate, noise, file, args): + if not os.path.exists(args.enhanced_path): + os.makedirs(args.enhanced_path) + file_name = opj(args.enhanced_path, + file[0].rsplit('.', 1)[0].replace('\\', '/').split('/')[-1]) + noise_path = file_name + '_noise.wav' + enhanced_path = file_name + '_enhanced.wav' + + torchaudio.save(noise_path, noise.squeeze(1), args.sample_rate) + torchaudio.save(enhanced_path, estimate.squeeze(1), args.sample_rate) + + +def seed_init(seed=100): + + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + os.environ['PYTHONHASHSEED'] = str(seed) + + +def args_dict(args): + """ + Get your arguments and make dictionary. + If you add some arguments in the model, you should edit here also. + """ + args.dataset = { + 'train': args.train, + 'val': args.val, + 'test': args.test, + 'matching': args.matching + } + args.setting = { + 'sample_rate': args.sample_rate, + 'segment': args.segment, + 'pad': args.pad, + 'stride': args.set_stride + } + args.manner = { + 'in_channels': args.in_channels, + 'out_channels': args.out_channels, + 'hidden': args.hidden, + 'depth': args.depth, + 'kernel_size': args.kernel_size, + 'stride': args.stride, + 'growth': args.growth, + 'head': args.head, + 'segment_len': args.segment_len + } + + args.ex_name = os.getcwd().replace('\\', '/').split('/')[-1] + + return args diff --git a/third_party/AcademiCodec/readme.md b/third_party/AcademiCodec/readme.md new file mode 100644 index 0000000..288ccaa --- /dev/null +++ b/third_party/AcademiCodec/readme.md @@ -0,0 +1,89 @@ +# AcademiCodec: An Open Source Audio Codec Model for Academic Research + +This repo is organized as follows: + +```text +AcademiCodec +├── academicodec +│   ├── utils.py # common parts of various models +│   ├── modules # common parts of various models +│   ├── ... +│   ├── quantization # common parts of various models +│   └── models # parts that are not shared by various models +│     ├── hificodec +│     ├── encodec +│     ├── soundstream +│     └── ... +├── evaluation_metric +├── egs +│ ├── SoundStream* +│ ├── EnCodec* +│ └── HiFi-Codec* +│      ├── start.sh +│      ├── ... +│     └── test.sh +└── README.md +``` + +### On going +This project is on going. You can find the paper on https://arxiv.org/pdf/2305.02765.pdf
+Furthermore, this project is lanched from University, we expect more researchers to be the contributor.
+ +#### Abstract +Audio codec models are widely used in audio communication as a crucial technique for compressing audio into discrete representations. Nowadays, audio codec models are increasingly utilized in generation fields as intermediate representations. For instance, AudioLM is ann audio generation model that uses the discrete representation of SoundStream as a training target, while VALL-E employs the Encodec model as an intermediate feature to aid TTS tasks. Despite their usefulness, two challenges persist: (1) training these audio codec models can be difficult due to the lack of publicly available training processes and the need for large-scale data and GPUs; (2) achieving good reconstruction performance requires many codebooks, which increases the burden on generation models. In this study, we propose a group-residual vector quantization (GRVQ) technique and use it to develop a novel \textbf{Hi}gh \textbf{Fi}delity Audio Codec model, HiFi-Codec, which only requires 4 codebooks. We train all the models using publicly available TTS data such as LibriTTS, VCTK, AISHELL, and more, with a total duration of over 1000 hours, using 8 GPUs. Our experimental results show that HiFi-Codec outperforms Encodec in terms of reconstruction performance despite requiring only 4 codebooks. To facilitate research in audio codec and generation, we introduce AcademiCodec, the first open-source audio codec toolkit that offers training codes and pre-trained models for Encodec, SoundStream, and HiFi-Codec. + +## 🔥 News +#### AcademiCodec +- 2023.4.16: We first release the training code for Encodec and SoundStream and our pre-trained models, includes 24khz and 16khz. +- 2023.5.5: We release the code of HiFi-Codec. +- 2023.6.2: Add `HiFi-Codec-24k-320d/infer.ipynb`, which can be used to infer acoustic tokens to use for later training of VALL-E, SoundStorm and etc. +- 2023.06.13: Refactor the code structure. +### Dependencies +* [PyTorch](http://pytorch.org/) version >= 1.13.0 +* Python version >= 3.8 + +# Train your own model + please refer to the specific version. + +## Data preparation +Just prepare your audio data in one folder. Make sure the sample rate is right. + +## Training or Inferce +Refer to the specical folders, e.g. Encodec_24k_240d represent, the Encodec model, sample rate is 24khz, downsample rate is 240. If you want to use our pre-trained models, please refer to https://huggingface.co/Dongchao/AcademiCodec/tree/main + +## Version Description +* Encodec_16k_320, we train it using 16khz audio, and we set the downsample as 320, which can be used to train SpearTTS +* Encodec_24k_240d, we train it using 24khz audio, and we set the downsample as 240, which can be used to InstructTTS +* Encodec_24k_32d, we train it using 24khz audio, we only set the downsample as 32, which can only use one codebook, such as AudioGen. +* SoundStream_24k_240d, the same configuration as Encodec_24k_240d. +## What the difference between SoundStream, Encodec and HiFi-Codec? +In our view, the mian difference between SoundStream and Encodec is the different Discriminator choice. For Encodec, it only uses a STFT-dicriminator, which forces the STFT-spectrogram be more real. SoundStream use two types of Discriminator, one forces the waveform-level to be more real, one forces the specrogram-level to be more real. In our code, we adopt the waveform-level discriminator from HIFI-GAN. The spectrogram level discrimimator from Encodec. In thoery, we think SoundStream enjoin better performance. Actually, Google's offical SoundStream proves this, Google can only use 3 codebooks to reconstruct a audio with high-quality. Although our implements can also use 3 codebooks to realize good performance, we admit our version cannot be compared with Google now.
+For the HiFi-Codec, which is our proposed novel methods, which aims to help to some generation tasks. Such as VALL-E, AudioLM, MusicLM, SpearTTS, IntructTTS and so on. HiFi-Codec codebook only needs 4 codebooks, which significantly reduce the token numbers. Some researchers use our HiFi-Codec to implement VALL-E, which proves that can get better audio quality. + +## Acknowledgements +This implementation uses parts of the code from the following Github repos: +https://github.com/facebookresearch/encodec
+https://github.com/yangdongchao/Text-to-sound-Synthesis
+https://github.com/b04901014/MQTTS +## Citations ## +If you find this code useful in your research, please cite our work: +```bib +@article{yang2023instructtts, + title={InstructTTS: Modelling Expressive TTS in Discrete Latent Space with Natural Language Style Prompt}, + author={Yang, Dongchao and Liu, Songxiang and Huang, Rongjie and Lei, Guangzhi and Weng, Chao and Meng, Helen and Yu, Dong}, + journal={arXiv preprint arXiv:2301.13662}, + year={2023} +} +``` +```bibtex +@article{yang2023hifi, + title={HiFi-Codec: Group-residual Vector quantization for High Fidelity Audio Codec}, + author={Yang, Dongchao and Liu, Songxiang and Huang, Rongjie and Tian, Jinchuan and Weng, Chao and Zou, Yuexian}, + journal={arXiv preprint arXiv:2305.02765}, + year={2023} +} +``` + +## Disclaimer ## +MIT license + diff --git a/third_party/AcademiCodec/requirements.txt b/third_party/AcademiCodec/requirements.txt new file mode 100644 index 0000000..a6846c4 --- /dev/null +++ b/third_party/AcademiCodec/requirements.txt @@ -0,0 +1,6 @@ +torchaudio +tensorboard +einops +matplotlib +pyyaml +tqdm \ No newline at end of file diff --git a/third_party/Matcha-TTS/.env.example b/third_party/Matcha-TTS/.env.example new file mode 100644 index 0000000..a790e32 --- /dev/null +++ b/third_party/Matcha-TTS/.env.example @@ -0,0 +1,6 @@ +# example of file for storing private and user specific environment variables, like keys or system paths +# rename it to ".env" (excluded from version control by default) +# .env is loaded by train.py automatically +# hydra allows you to reference variables in .yaml configs with special syntax: ${oc.env:MY_VAR} + +MY_VAR="/home/user/my/system/path" diff --git a/third_party/Matcha-TTS/.github/PULL_REQUEST_TEMPLATE.md b/third_party/Matcha-TTS/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..410bcd8 --- /dev/null +++ b/third_party/Matcha-TTS/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,22 @@ +## What does this PR do? + + + +Fixes #\ + +## Before submitting + +- [ ] Did you make sure **title is self-explanatory** and **the description concisely explains the PR**? +- [ ] Did you make sure your **PR does only one thing**, instead of bundling different changes together? +- [ ] Did you list all the **breaking changes** introduced by this pull request? +- [ ] Did you **test your PR locally** with `pytest` command? +- [ ] Did you **run pre-commit hooks** with `pre-commit run -a` command? + +## Did you have fun? + +Make sure you had fun coding 🙃 diff --git a/third_party/Matcha-TTS/.github/codecov.yml b/third_party/Matcha-TTS/.github/codecov.yml new file mode 100644 index 0000000..c66853c --- /dev/null +++ b/third_party/Matcha-TTS/.github/codecov.yml @@ -0,0 +1,15 @@ +coverage: + status: + # measures overall project coverage + project: + default: + threshold: 100% # how much decrease in coverage is needed to not consider success + + # measures PR or single commit coverage + patch: + default: + threshold: 100% # how much decrease in coverage is needed to not consider success + + + # project: off + # patch: off diff --git a/third_party/Matcha-TTS/.github/dependabot.yml b/third_party/Matcha-TTS/.github/dependabot.yml new file mode 100644 index 0000000..b19ccab --- /dev/null +++ b/third_party/Matcha-TTS/.github/dependabot.yml @@ -0,0 +1,17 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + target-branch: "dev" + schedule: + interval: "daily" + ignore: + - dependency-name: "pytorch-lightning" + update-types: ["version-update:semver-patch"] + - dependency-name: "torchmetrics" + update-types: ["version-update:semver-patch"] diff --git a/third_party/Matcha-TTS/.github/release-drafter.yml b/third_party/Matcha-TTS/.github/release-drafter.yml new file mode 100644 index 0000000..59af159 --- /dev/null +++ b/third_party/Matcha-TTS/.github/release-drafter.yml @@ -0,0 +1,44 @@ +name-template: "v$RESOLVED_VERSION" +tag-template: "v$RESOLVED_VERSION" + +categories: + - title: "🚀 Features" + labels: + - "feature" + - "enhancement" + - title: "🐛 Bug Fixes" + labels: + - "fix" + - "bugfix" + - "bug" + - title: "🧹 Maintenance" + labels: + - "maintenance" + - "dependencies" + - "refactoring" + - "cosmetic" + - "chore" + - title: "📝️ Documentation" + labels: + - "documentation" + - "docs" + +change-template: "- $TITLE @$AUTHOR (#$NUMBER)" +change-title-escapes: '\<*_&' # You can add # and @ to disable mentions + +version-resolver: + major: + labels: + - "major" + minor: + labels: + - "minor" + patch: + labels: + - "patch" + default: patch + +template: | + ## Changes + + $CHANGES diff --git a/third_party/Matcha-TTS/.gitignore b/third_party/Matcha-TTS/.gitignore new file mode 100644 index 0000000..cbec8b4 --- /dev/null +++ b/third_party/Matcha-TTS/.gitignore @@ -0,0 +1,163 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +### VisualStudioCode +.vscode/* +!.vscode/settings.json +!.vscode/tasks.json +!.vscode/launch.json +!.vscode/extensions.json +*.code-workspace +**/.vscode + +# JetBrains +.idea/ + +# Data & Models +*.h5 +*.tar +*.tar.gz + +# Lightning-Hydra-Template +configs/local/default.yaml +/data/ +/logs/ +.env + +# Aim logging +.aim + +# Cython complied files +matcha/utils/monotonic_align/core.c + +# Ignoring hifigan checkpoint +generator_v1 +g_02500000 +gradio_cached_examples/ +synth_output/ diff --git a/third_party/Matcha-TTS/.pre-commit-config.yaml b/third_party/Matcha-TTS/.pre-commit-config.yaml new file mode 100644 index 0000000..e695f11 --- /dev/null +++ b/third_party/Matcha-TTS/.pre-commit-config.yaml @@ -0,0 +1,59 @@ +default_language_version: + python: python3.10 + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.5.0 + hooks: + # list of supported hooks: https://pre-commit.com/hooks.html + - id: trailing-whitespace + - id: end-of-file-fixer + # - id: check-docstring-first + - id: check-yaml + - id: debug-statements + - id: detect-private-key + - id: check-toml + - id: check-case-conflict + - id: check-added-large-files + + # python code formatting + - repo: https://github.com/psf/black + rev: 23.12.1 + hooks: + - id: black + args: [--line-length, "120"] + + # python import sorting + - repo: https://github.com/PyCQA/isort + rev: 5.13.2 + hooks: + - id: isort + args: ["--profile", "black", "--filter-files"] + + # python upgrading syntax to newer version + - repo: https://github.com/asottile/pyupgrade + rev: v3.15.0 + hooks: + - id: pyupgrade + args: [--py38-plus] + + # python check (PEP8), programming errors and code complexity + - repo: https://github.com/PyCQA/flake8 + rev: 7.0.0 + hooks: + - id: flake8 + args: + [ + "--max-line-length", "120", + "--extend-ignore", + "E203,E402,E501,F401,F841,RST2,RST301", + "--exclude", + "logs/*,data/*,matcha/hifigan/*", + ] + additional_dependencies: [flake8-rst-docstrings==0.3.0] + + # pylint + - repo: https://github.com/pycqa/pylint + rev: v3.0.3 + hooks: + - id: pylint diff --git a/third_party/Matcha-TTS/.project-root b/third_party/Matcha-TTS/.project-root new file mode 100644 index 0000000..63eab77 --- /dev/null +++ b/third_party/Matcha-TTS/.project-root @@ -0,0 +1,2 @@ +# this file is required for inferring the project root directory +# do not delete diff --git a/third_party/Matcha-TTS/.pylintrc b/third_party/Matcha-TTS/.pylintrc new file mode 100644 index 0000000..9628641 --- /dev/null +++ b/third_party/Matcha-TTS/.pylintrc @@ -0,0 +1,525 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Specify a configuration file. +#rcfile= + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=missing-docstring, + too-many-public-methods, + too-many-lines, + bare-except, + ## for avoiding weird p3.6 CI linter error + ## TODO: see later if we can remove this + assigning-non-slot, + unsupported-assignment-operation, + ## end + line-too-long, + fixme, + wrong-import-order, + ungrouped-imports, + wrong-import-position, + import-error, + invalid-name, + too-many-instance-attributes, + arguments-differ, + arguments-renamed, + no-name-in-module, + no-member, + unsubscriptable-object, + raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + useless-object-inheritance, + too-few-public-methods, + too-many-branches, + too-many-arguments, + too-many-locals, + too-many-statements, + duplicate-code, + not-callable, + import-outside-toplevel, + logging-fstring-interpolation, + logging-not-lazy, + unused-argument, + no-else-return, + chained-comparison, + redefined-outer-name + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a note less than 10 (10 is the highest +# note). You have access to the variables errors warning, statement which +# respectively contain the number of errors / warnings messages and the total +# number of statements analyzed. This is used by the global evaluation report +# (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[LOGGING] + +# Format style used to check logging format string. `old` means using % +# formatting, while `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it working +# install python-enchant package.. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to indicated private dictionary in +# --spelling-private-dict-file option instead of raising a message. +spelling-store-unknown-words=no + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members=numpy.*,torch.* + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis. It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=120 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=no + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +argument-rgx=[a-z_][a-z0-9_]{0,30}$ + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + x, + ex, + Run, + _ + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +variable-rgx=[a-z_][a-z0-9_]{0,30}$ + + +[STRING] + +# This flag controls whether the implicit-str-concat-in-sequence should +# generate a warning on implicit string concatenation in sequences defined over +# several lines. +check-str-concat-over-line-jumps=no + + +[IMPORTS] + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement. +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=15 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=builtins.BaseException, + builtins.Exception diff --git a/third_party/Matcha-TTS/LICENSE b/third_party/Matcha-TTS/LICENSE new file mode 100644 index 0000000..858018e --- /dev/null +++ b/third_party/Matcha-TTS/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 Shivam Mehta + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/Matcha-TTS/MANIFEST.in b/third_party/Matcha-TTS/MANIFEST.in new file mode 100644 index 0000000..c013140 --- /dev/null +++ b/third_party/Matcha-TTS/MANIFEST.in @@ -0,0 +1,14 @@ +include README.md +include LICENSE.txt +include requirements.*.txt +include *.cff +include requirements.txt +include matcha/VERSION +recursive-include matcha *.json +recursive-include matcha *.html +recursive-include matcha *.png +recursive-include matcha *.md +recursive-include matcha *.py +recursive-include matcha *.pyx +recursive-exclude tests * +prune tests* diff --git a/third_party/Matcha-TTS/Makefile b/third_party/Matcha-TTS/Makefile new file mode 100644 index 0000000..4b523dd --- /dev/null +++ b/third_party/Matcha-TTS/Makefile @@ -0,0 +1,42 @@ + +help: ## Show help + @grep -E '^[.a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' + +clean: ## Clean autogenerated files + rm -rf dist + find . -type f -name "*.DS_Store" -ls -delete + find . | grep -E "(__pycache__|\.pyc|\.pyo)" | xargs rm -rf + find . | grep -E ".pytest_cache" | xargs rm -rf + find . | grep -E ".ipynb_checkpoints" | xargs rm -rf + rm -f .coverage + +clean-logs: ## Clean logs + rm -rf logs/** + +create-package: ## Create wheel and tar gz + rm -rf dist/ + python setup.py bdist_wheel --plat-name=manylinux1_x86_64 + python setup.py sdist + python -m twine upload dist/* --verbose --skip-existing + +format: ## Run pre-commit hooks + pre-commit run -a + +sync: ## Merge changes from main branch to your current branch + git pull + git pull origin main + +test: ## Run not slow tests + pytest -k "not slow" + +test-full: ## Run all tests + pytest + +train-ljspeech: ## Train the model + python matcha/train.py experiment=ljspeech + +train-ljspeech-min: ## Train the model with minimum memory + python matcha/train.py experiment=ljspeech_min_memory + +start_app: ## Start the app + python matcha/app.py diff --git a/third_party/Matcha-TTS/README.md b/third_party/Matcha-TTS/README.md new file mode 100644 index 0000000..ebc6b7c --- /dev/null +++ b/third_party/Matcha-TTS/README.md @@ -0,0 +1,278 @@ +
+ +# 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching + +### [Shivam Mehta](https://www.kth.se/profile/smehta), [Ruibo Tu](https://www.kth.se/profile/ruibo), [Jonas Beskow](https://www.kth.se/profile/beskow), [Éva Székely](https://www.kth.se/profile/szekely), and [Gustav Eje Henter](https://people.kth.se/~ghe/) + +[![python](https://img.shields.io/badge/-Python_3.10-blue?logo=python&logoColor=white)](https://www.python.org/downloads/release/python-3100/) +[![pytorch](https://img.shields.io/badge/PyTorch_2.0+-ee4c2c?logo=pytorch&logoColor=white)](https://pytorch.org/get-started/locally/) +[![lightning](https://img.shields.io/badge/-Lightning_2.0+-792ee5?logo=pytorchlightning&logoColor=white)](https://pytorchlightning.ai/) +[![hydra](https://img.shields.io/badge/Config-Hydra_1.3-89b8cd)](https://hydra.cc/) +[![black](https://img.shields.io/badge/Code%20Style-Black-black.svg?labelColor=gray)](https://black.readthedocs.io/en/stable/) +[![isort](https://img.shields.io/badge/%20imports-isort-%231674b1?style=flat&labelColor=ef8336)](https://pycqa.github.io/isort/) + +

+ +

+ +
+ +> This is the official code implementation of 🍵 Matcha-TTS [ICASSP 2024]. + +We propose 🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses [conditional flow matching](https://arxiv.org/abs/2210.02747) (similar to [rectified flows](https://arxiv.org/abs/2209.03003)) to speed up ODE-based speech synthesis. Our method: + +- Is probabilistic +- Has compact memory footprint +- Sounds highly natural +- Is very fast to synthesise from + +Check out our [demo page](https://shivammehta25.github.io/Matcha-TTS) and read [our ICASSP 2024 paper](https://arxiv.org/abs/2309.03199) for more details. + +[Pre-trained models](https://drive.google.com/drive/folders/17C_gYgEHOxI5ZypcfE_k1piKCtyR0isJ?usp=sharing) will be automatically downloaded with the CLI or gradio interface. + +You can also [try 🍵 Matcha-TTS in your browser on HuggingFace 🤗 spaces](https://huggingface.co/spaces/shivammehta25/Matcha-TTS). + +## Teaser video + +[![Watch the video](https://img.youtube.com/vi/xmvJkz3bqw0/hqdefault.jpg)](https://youtu.be/xmvJkz3bqw0) + +## Installation + +1. Create an environment (suggested but optional) + +``` +conda create -n matcha-tts python=3.10 -y +conda activate matcha-tts +``` + +2. Install Matcha TTS using pip or from source + +```bash +pip install matcha-tts +``` + +from source + +```bash +pip install git+https://github.com/shivammehta25/Matcha-TTS.git +cd Matcha-TTS +pip install -e . +``` + +3. Run CLI / gradio app / jupyter notebook + +```bash +# This will download the required models +matcha-tts --text "" +``` + +or + +```bash +matcha-tts-app +``` + +or open `synthesis.ipynb` on jupyter notebook + +### CLI Arguments + +- To synthesise from given text, run: + +```bash +matcha-tts --text "" +``` + +- To synthesise from a file, run: + +```bash +matcha-tts --file +``` + +- To batch synthesise from a file, run: + +```bash +matcha-tts --file --batched +``` + +Additional arguments + +- Speaking rate + +```bash +matcha-tts --text "" --speaking_rate 1.0 +``` + +- Sampling temperature + +```bash +matcha-tts --text "" --temperature 0.667 +``` + +- Euler ODE solver steps + +```bash +matcha-tts --text "" --steps 10 +``` + +## Train with your own dataset + +Let's assume we are training with LJ Speech + +1. Download the dataset from [here](https://keithito.com/LJ-Speech-Dataset/), extract it to `data/LJSpeech-1.1`, and prepare the file lists to point to the extracted data like for [item 5 in the setup of the NVIDIA Tacotron 2 repo](https://github.com/NVIDIA/tacotron2#setup). + +2. Clone and enter the Matcha-TTS repository + +```bash +git clone https://github.com/shivammehta25/Matcha-TTS.git +cd Matcha-TTS +``` + +3. Install the package from source + +```bash +pip install -e . +``` + +4. Go to `configs/data/ljspeech.yaml` and change + +```yaml +train_filelist_path: data/filelists/ljs_audio_text_train_filelist.txt +valid_filelist_path: data/filelists/ljs_audio_text_val_filelist.txt +``` + +5. Generate normalisation statistics with the yaml file of dataset configuration + +```bash +matcha-data-stats -i ljspeech.yaml +# Output: +#{'mel_mean': -5.53662231756592, 'mel_std': 2.1161014277038574} +``` + +Update these values in `configs/data/ljspeech.yaml` under `data_statistics` key. + +```bash +data_statistics: # Computed for ljspeech dataset + mel_mean: -5.536622 + mel_std: 2.116101 +``` + +to the paths of your train and validation filelists. + +6. Run the training script + +```bash +make train-ljspeech +``` + +or + +```bash +python matcha/train.py experiment=ljspeech +``` + +- for a minimum memory run + +```bash +python matcha/train.py experiment=ljspeech_min_memory +``` + +- for multi-gpu training, run + +```bash +python matcha/train.py experiment=ljspeech trainer.devices=[0,1] +``` + +7. Synthesise from the custom trained model + +```bash +matcha-tts --text "" --checkpoint_path +``` + +## ONNX support + +> Special thanks to [@mush42](https://github.com/mush42) for implementing ONNX export and inference support. + +It is possible to export Matcha checkpoints to [ONNX](https://onnx.ai/), and run inference on the exported ONNX graph. + +### ONNX export + +To export a checkpoint to ONNX, first install ONNX with + +```bash +pip install onnx +``` + +then run the following: + +```bash +python3 -m matcha.onnx.export matcha.ckpt model.onnx --n-timesteps 5 +``` + +Optionally, the ONNX exporter accepts **vocoder-name** and **vocoder-checkpoint** arguments. This enables you to embed the vocoder in the exported graph and generate waveforms in a single run (similar to end-to-end TTS systems). + +**Note** that `n_timesteps` is treated as a hyper-parameter rather than a model input. This means you should specify it during export (not during inference). If not specified, `n_timesteps` is set to **5**. + +**Important**: for now, torch>=2.1.0 is needed for export since the `scaled_product_attention` operator is not exportable in older versions. Until the final version is released, those who want to export their models must install torch>=2.1.0 manually as a pre-release. + +### ONNX Inference + +To run inference on the exported model, first install `onnxruntime` using + +```bash +pip install onnxruntime +pip install onnxruntime-gpu # for GPU inference +``` + +then use the following: + +```bash +python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs +``` + +You can also control synthesis parameters: + +```bash +python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs --temperature 0.4 --speaking_rate 0.9 --spk 0 +``` + +To run inference on **GPU**, make sure to install **onnxruntime-gpu** package, and then pass `--gpu` to the inference command: + +```bash +python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs --gpu +``` + +If you exported only Matcha to ONNX, this will write mel-spectrogram as graphs and `numpy` arrays to the output directory. +If you embedded the vocoder in the exported graph, this will write `.wav` audio files to the output directory. + +If you exported only Matcha to ONNX, and you want to run a full TTS pipeline, you can pass a path to a vocoder model in `ONNX` format: + +```bash +python3 -m matcha.onnx.infer model.onnx --text "hey" --output-dir ./outputs --vocoder hifigan.small.onnx +``` + +This will write `.wav` audio files to the output directory. + +## Citation information + +If you use our code or otherwise find this work useful, please cite our paper: + +```text +@inproceedings{mehta2024matcha, + title={Matcha-{TTS}: A fast {TTS} architecture with conditional flow matching}, + author={Mehta, Shivam and Tu, Ruibo and Beskow, Jonas and Sz{\'e}kely, {\'E}va and Henter, Gustav Eje}, + booktitle={Proc. ICASSP}, + year={2024} +} +``` + +## Acknowledgements + +Since this code uses [Lightning-Hydra-Template](https://github.com/ashleve/lightning-hydra-template), you have all the powers that come with it. + +Other source code we would like to acknowledge: + +- [Coqui-TTS](https://github.com/coqui-ai/TTS/tree/dev): For helping me figure out how to make cython binaries pip installable and encouragement +- [Hugging Face Diffusers](https://huggingface.co/): For their awesome diffusers library and its components +- [Grad-TTS](https://github.com/huawei-noah/Speech-Backbones/tree/main/Grad-TTS): For the monotonic alignment search source code +- [torchdyn](https://github.com/DiffEqML/torchdyn): Useful for trying other ODE solvers during research and development +- [labml.ai](https://nn.labml.ai/transformers/rope/index.html): For the RoPE implementation diff --git a/third_party/Matcha-TTS/configs/__init__.py b/third_party/Matcha-TTS/configs/__init__.py new file mode 100644 index 0000000..56bf7f4 --- /dev/null +++ b/third_party/Matcha-TTS/configs/__init__.py @@ -0,0 +1 @@ +# this file is needed here to include configs when building project as a package diff --git a/third_party/Matcha-TTS/configs/callbacks/default.yaml b/third_party/Matcha-TTS/configs/callbacks/default.yaml new file mode 100644 index 0000000..ebaa3ed --- /dev/null +++ b/third_party/Matcha-TTS/configs/callbacks/default.yaml @@ -0,0 +1,5 @@ +defaults: + - model_checkpoint.yaml + - model_summary.yaml + - rich_progress_bar.yaml + - _self_ diff --git a/third_party/Matcha-TTS/configs/callbacks/model_checkpoint.yaml b/third_party/Matcha-TTS/configs/callbacks/model_checkpoint.yaml new file mode 100644 index 0000000..3d085c7 --- /dev/null +++ b/third_party/Matcha-TTS/configs/callbacks/model_checkpoint.yaml @@ -0,0 +1,17 @@ +# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.ModelCheckpoint.html + +model_checkpoint: + _target_: lightning.pytorch.callbacks.ModelCheckpoint + dirpath: ${paths.output_dir}/checkpoints # directory to save the model file + filename: checkpoint_{epoch:03d} # checkpoint filename + monitor: epoch # name of the logged metric which determines when model is improving + verbose: False # verbosity mode + save_last: true # additionally always save an exact copy of the last checkpoint to a file last.ckpt + save_top_k: 10 # save k best models (determined by above metric) + mode: "max" # "max" means higher metric value is better, can be also "min" + auto_insert_metric_name: True # when True, the checkpoints filenames will contain the metric name + save_weights_only: False # if True, then only the model’s weights will be saved + every_n_train_steps: null # number of training steps between checkpoints + train_time_interval: null # checkpoints are monitored at the specified time interval + every_n_epochs: 100 # number of epochs between checkpoints + save_on_train_epoch_end: null # whether to run checkpointing at the end of the training epoch or the end of validation diff --git a/third_party/Matcha-TTS/configs/callbacks/model_summary.yaml b/third_party/Matcha-TTS/configs/callbacks/model_summary.yaml new file mode 100644 index 0000000..6e5368d --- /dev/null +++ b/third_party/Matcha-TTS/configs/callbacks/model_summary.yaml @@ -0,0 +1,5 @@ +# https://lightning.ai/docs/pytorch/stable/api/lightning.pytorch.callbacks.RichModelSummary.html + +model_summary: + _target_: lightning.pytorch.callbacks.RichModelSummary + max_depth: 3 # the maximum depth of layer nesting that the summary will include diff --git a/third_party/Matcha-TTS/configs/callbacks/none.yaml b/third_party/Matcha-TTS/configs/callbacks/none.yaml new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/configs/callbacks/rich_progress_bar.yaml b/third_party/Matcha-TTS/configs/callbacks/rich_progress_bar.yaml new file mode 100644 index 0000000..de6f1cc --- /dev/null +++ b/third_party/Matcha-TTS/configs/callbacks/rich_progress_bar.yaml @@ -0,0 +1,4 @@ +# https://lightning.ai/docs/pytorch/latest/api/lightning.pytorch.callbacks.RichProgressBar.html + +rich_progress_bar: + _target_: lightning.pytorch.callbacks.RichProgressBar diff --git a/third_party/Matcha-TTS/configs/debug/default.yaml b/third_party/Matcha-TTS/configs/debug/default.yaml new file mode 100644 index 0000000..e3932c8 --- /dev/null +++ b/third_party/Matcha-TTS/configs/debug/default.yaml @@ -0,0 +1,35 @@ +# @package _global_ + +# default debugging setup, runs 1 full epoch +# other debugging configs can inherit from this one + +# overwrite task name so debugging logs are stored in separate folder +task_name: "debug" + +# disable callbacks and loggers during debugging +# callbacks: null +# logger: null + +extras: + ignore_warnings: False + enforce_tags: False + +# sets level of all command line loggers to 'DEBUG' +# https://hydra.cc/docs/tutorials/basic/running_your_app/logging/ +hydra: + job_logging: + root: + level: DEBUG + + # use this to also set hydra loggers to 'DEBUG' + # verbose: True + +trainer: + max_epochs: 1 + accelerator: cpu # debuggers don't like gpus + devices: 1 # debuggers don't like multiprocessing + detect_anomaly: true # raise exception if NaN or +/-inf is detected in any tensor + +data: + num_workers: 0 # debuggers don't like multiprocessing + pin_memory: False # disable gpu memory pin diff --git a/third_party/Matcha-TTS/configs/debug/fdr.yaml b/third_party/Matcha-TTS/configs/debug/fdr.yaml new file mode 100644 index 0000000..7f2d34f --- /dev/null +++ b/third_party/Matcha-TTS/configs/debug/fdr.yaml @@ -0,0 +1,9 @@ +# @package _global_ + +# runs 1 train, 1 validation and 1 test step + +defaults: + - default + +trainer: + fast_dev_run: true diff --git a/third_party/Matcha-TTS/configs/debug/limit.yaml b/third_party/Matcha-TTS/configs/debug/limit.yaml new file mode 100644 index 0000000..514d77f --- /dev/null +++ b/third_party/Matcha-TTS/configs/debug/limit.yaml @@ -0,0 +1,12 @@ +# @package _global_ + +# uses only 1% of the training data and 5% of validation/test data + +defaults: + - default + +trainer: + max_epochs: 3 + limit_train_batches: 0.01 + limit_val_batches: 0.05 + limit_test_batches: 0.05 diff --git a/third_party/Matcha-TTS/configs/debug/overfit.yaml b/third_party/Matcha-TTS/configs/debug/overfit.yaml new file mode 100644 index 0000000..9906586 --- /dev/null +++ b/third_party/Matcha-TTS/configs/debug/overfit.yaml @@ -0,0 +1,13 @@ +# @package _global_ + +# overfits to 3 batches + +defaults: + - default + +trainer: + max_epochs: 20 + overfit_batches: 3 + +# model ckpt and early stopping need to be disabled during overfitting +callbacks: null diff --git a/third_party/Matcha-TTS/configs/debug/profiler.yaml b/third_party/Matcha-TTS/configs/debug/profiler.yaml new file mode 100644 index 0000000..266295f --- /dev/null +++ b/third_party/Matcha-TTS/configs/debug/profiler.yaml @@ -0,0 +1,15 @@ +# @package _global_ + +# runs with execution time profiling + +defaults: + - default + +trainer: + max_epochs: 1 + # profiler: "simple" + profiler: "advanced" + # profiler: "pytorch" + accelerator: gpu + + limit_train_batches: 0.02 diff --git a/third_party/Matcha-TTS/configs/eval.yaml b/third_party/Matcha-TTS/configs/eval.yaml new file mode 100644 index 0000000..be31299 --- /dev/null +++ b/third_party/Matcha-TTS/configs/eval.yaml @@ -0,0 +1,18 @@ +# @package _global_ + +defaults: + - _self_ + - data: mnist # choose datamodule with `test_dataloader()` for evaluation + - model: mnist + - logger: null + - trainer: default + - paths: default + - extras: default + - hydra: default + +task_name: "eval" + +tags: ["dev"] + +# passing checkpoint path is necessary for evaluation +ckpt_path: ??? diff --git a/third_party/Matcha-TTS/configs/experiment/hifi_dataset_piper_phonemizer.yaml b/third_party/Matcha-TTS/configs/experiment/hifi_dataset_piper_phonemizer.yaml new file mode 100644 index 0000000..7e6c57a --- /dev/null +++ b/third_party/Matcha-TTS/configs/experiment/hifi_dataset_piper_phonemizer.yaml @@ -0,0 +1,14 @@ +# @package _global_ + +# to execute this experiment run: +# python train.py experiment=multispeaker + +defaults: + - override /data: hi-fi_en-US_female.yaml + +# all parameters below will be merged with parameters from default configurations set above +# this allows you to overwrite only specified parameters + +tags: ["hi-fi", "single_speaker", "piper_phonemizer", "en_US", "female"] + +run_name: hi-fi_en-US_female_piper_phonemizer diff --git a/third_party/Matcha-TTS/configs/experiment/ljspeech.yaml b/third_party/Matcha-TTS/configs/experiment/ljspeech.yaml new file mode 100644 index 0000000..d5723f4 --- /dev/null +++ b/third_party/Matcha-TTS/configs/experiment/ljspeech.yaml @@ -0,0 +1,14 @@ +# @package _global_ + +# to execute this experiment run: +# python train.py experiment=multispeaker + +defaults: + - override /data: ljspeech.yaml + +# all parameters below will be merged with parameters from default configurations set above +# this allows you to overwrite only specified parameters + +tags: ["ljspeech"] + +run_name: ljspeech diff --git a/third_party/Matcha-TTS/configs/experiment/ljspeech_min_memory.yaml b/third_party/Matcha-TTS/configs/experiment/ljspeech_min_memory.yaml new file mode 100644 index 0000000..ef554dc --- /dev/null +++ b/third_party/Matcha-TTS/configs/experiment/ljspeech_min_memory.yaml @@ -0,0 +1,18 @@ +# @package _global_ + +# to execute this experiment run: +# python train.py experiment=multispeaker + +defaults: + - override /data: ljspeech.yaml + +# all parameters below will be merged with parameters from default configurations set above +# this allows you to overwrite only specified parameters + +tags: ["ljspeech"] + +run_name: ljspeech_min + + +model: + out_size: 172 diff --git a/third_party/Matcha-TTS/configs/experiment/multispeaker.yaml b/third_party/Matcha-TTS/configs/experiment/multispeaker.yaml new file mode 100644 index 0000000..553842f --- /dev/null +++ b/third_party/Matcha-TTS/configs/experiment/multispeaker.yaml @@ -0,0 +1,14 @@ +# @package _global_ + +# to execute this experiment run: +# python train.py experiment=multispeaker + +defaults: + - override /data: vctk.yaml + +# all parameters below will be merged with parameters from default configurations set above +# this allows you to overwrite only specified parameters + +tags: ["multispeaker"] + +run_name: multispeaker diff --git a/third_party/Matcha-TTS/configs/extras/default.yaml b/third_party/Matcha-TTS/configs/extras/default.yaml new file mode 100644 index 0000000..b9c6b62 --- /dev/null +++ b/third_party/Matcha-TTS/configs/extras/default.yaml @@ -0,0 +1,8 @@ +# disable python warnings if they annoy you +ignore_warnings: False + +# ask user for tags if none are provided in the config +enforce_tags: True + +# pretty print config tree at the start of the run using Rich library +print_config: True diff --git a/third_party/Matcha-TTS/configs/hparams_search/mnist_optuna.yaml b/third_party/Matcha-TTS/configs/hparams_search/mnist_optuna.yaml new file mode 100644 index 0000000..1391183 --- /dev/null +++ b/third_party/Matcha-TTS/configs/hparams_search/mnist_optuna.yaml @@ -0,0 +1,52 @@ +# @package _global_ + +# example hyperparameter optimization of some experiment with Optuna: +# python train.py -m hparams_search=mnist_optuna experiment=example + +defaults: + - override /hydra/sweeper: optuna + +# choose metric which will be optimized by Optuna +# make sure this is the correct name of some metric logged in lightning module! +optimized_metric: "val/acc_best" + +# here we define Optuna hyperparameter search +# it optimizes for value returned from function with @hydra.main decorator +# docs: https://hydra.cc/docs/next/plugins/optuna_sweeper +hydra: + mode: "MULTIRUN" # set hydra to multirun by default if this config is attached + + sweeper: + _target_: hydra_plugins.hydra_optuna_sweeper.optuna_sweeper.OptunaSweeper + + # storage URL to persist optimization results + # for example, you can use SQLite if you set 'sqlite:///example.db' + storage: null + + # name of the study to persist optimization results + study_name: null + + # number of parallel workers + n_jobs: 1 + + # 'minimize' or 'maximize' the objective + direction: maximize + + # total number of runs that will be executed + n_trials: 20 + + # choose Optuna hyperparameter sampler + # you can choose bayesian sampler (tpe), random search (without optimization), grid sampler, and others + # docs: https://optuna.readthedocs.io/en/stable/reference/samplers.html + sampler: + _target_: optuna.samplers.TPESampler + seed: 1234 + n_startup_trials: 10 # number of random sampling runs before optimization starts + + # define hyperparameter search space + params: + model.optimizer.lr: interval(0.0001, 0.1) + data.batch_size: choice(32, 64, 128, 256) + model.net.lin1_size: choice(64, 128, 256) + model.net.lin2_size: choice(64, 128, 256) + model.net.lin3_size: choice(32, 64, 128, 256) diff --git a/third_party/Matcha-TTS/configs/hydra/default.yaml b/third_party/Matcha-TTS/configs/hydra/default.yaml new file mode 100644 index 0000000..1533136 --- /dev/null +++ b/third_party/Matcha-TTS/configs/hydra/default.yaml @@ -0,0 +1,19 @@ +# https://hydra.cc/docs/configure_hydra/intro/ + +# enable color logging +defaults: + - override hydra_logging: colorlog + - override job_logging: colorlog + +# output directory, generated dynamically on each run +run: + dir: ${paths.log_dir}/${task_name}/${run_name}/runs/${now:%Y-%m-%d}_${now:%H-%M-%S} +sweep: + dir: ${paths.log_dir}/${task_name}/${run_name}/multiruns/${now:%Y-%m-%d}_${now:%H-%M-%S} + subdir: ${hydra.job.num} + +job_logging: + handlers: + file: + # Incorporates fix from https://github.com/facebookresearch/hydra/pull/2242 + filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log diff --git a/third_party/Matcha-TTS/configs/local/.gitkeep b/third_party/Matcha-TTS/configs/local/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/configs/logger/aim.yaml b/third_party/Matcha-TTS/configs/logger/aim.yaml new file mode 100644 index 0000000..8f9f6ad --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/aim.yaml @@ -0,0 +1,28 @@ +# https://aimstack.io/ + +# example usage in lightning module: +# https://github.com/aimhubio/aim/blob/main/examples/pytorch_lightning_track.py + +# open the Aim UI with the following command (run in the folder containing the `.aim` folder): +# `aim up` + +aim: + _target_: aim.pytorch_lightning.AimLogger + repo: ${paths.root_dir} # .aim folder will be created here + # repo: "aim://ip_address:port" # can instead provide IP address pointing to Aim remote tracking server which manages the repo, see https://aimstack.readthedocs.io/en/latest/using/remote_tracking.html# + + # aim allows to group runs under experiment name + experiment: null # any string, set to "default" if not specified + + train_metric_prefix: "train/" + val_metric_prefix: "val/" + test_metric_prefix: "test/" + + # sets the tracking interval in seconds for system usage metrics (CPU, GPU, memory, etc.) + system_tracking_interval: 10 # set to null to disable system metrics tracking + + # enable/disable logging of system params such as installed packages, git info, env vars, etc. + log_system_params: true + + # enable/disable tracking console logs (default value is true) + capture_terminal_logs: false # set to false to avoid infinite console log loop issue https://github.com/aimhubio/aim/issues/2550 diff --git a/third_party/Matcha-TTS/configs/logger/comet.yaml b/third_party/Matcha-TTS/configs/logger/comet.yaml new file mode 100644 index 0000000..e078927 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/comet.yaml @@ -0,0 +1,12 @@ +# https://www.comet.ml + +comet: + _target_: lightning.pytorch.loggers.comet.CometLogger + api_key: ${oc.env:COMET_API_TOKEN} # api key is loaded from environment variable + save_dir: "${paths.output_dir}" + project_name: "lightning-hydra-template" + rest_api_key: null + # experiment_name: "" + experiment_key: null # set to resume experiment + offline: False + prefix: "" diff --git a/third_party/Matcha-TTS/configs/logger/csv.yaml b/third_party/Matcha-TTS/configs/logger/csv.yaml new file mode 100644 index 0000000..fa028e9 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/csv.yaml @@ -0,0 +1,7 @@ +# csv logger built in lightning + +csv: + _target_: lightning.pytorch.loggers.csv_logs.CSVLogger + save_dir: "${paths.output_dir}" + name: "csv/" + prefix: "" diff --git a/third_party/Matcha-TTS/configs/logger/many_loggers.yaml b/third_party/Matcha-TTS/configs/logger/many_loggers.yaml new file mode 100644 index 0000000..dd58680 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/many_loggers.yaml @@ -0,0 +1,9 @@ +# train with many loggers at once + +defaults: + # - comet + - csv + # - mlflow + # - neptune + - tensorboard + - wandb diff --git a/third_party/Matcha-TTS/configs/logger/mlflow.yaml b/third_party/Matcha-TTS/configs/logger/mlflow.yaml new file mode 100644 index 0000000..f8fb7e6 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/mlflow.yaml @@ -0,0 +1,12 @@ +# https://mlflow.org + +mlflow: + _target_: lightning.pytorch.loggers.mlflow.MLFlowLogger + # experiment_name: "" + # run_name: "" + tracking_uri: ${paths.log_dir}/mlflow/mlruns # run `mlflow ui` command inside the `logs/mlflow/` dir to open the UI + tags: null + # save_dir: "./mlruns" + prefix: "" + artifact_location: null + # run_id: "" diff --git a/third_party/Matcha-TTS/configs/logger/neptune.yaml b/third_party/Matcha-TTS/configs/logger/neptune.yaml new file mode 100644 index 0000000..8233c14 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/neptune.yaml @@ -0,0 +1,9 @@ +# https://neptune.ai + +neptune: + _target_: lightning.pytorch.loggers.neptune.NeptuneLogger + api_key: ${oc.env:NEPTUNE_API_TOKEN} # api key is loaded from environment variable + project: username/lightning-hydra-template + # name: "" + log_model_checkpoints: True + prefix: "" diff --git a/third_party/Matcha-TTS/configs/logger/tensorboard.yaml b/third_party/Matcha-TTS/configs/logger/tensorboard.yaml new file mode 100644 index 0000000..2bd31f6 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/tensorboard.yaml @@ -0,0 +1,10 @@ +# https://www.tensorflow.org/tensorboard/ + +tensorboard: + _target_: lightning.pytorch.loggers.tensorboard.TensorBoardLogger + save_dir: "${paths.output_dir}/tensorboard/" + name: null + log_graph: False + default_hp_metric: True + prefix: "" + # version: "" diff --git a/third_party/Matcha-TTS/configs/logger/wandb.yaml b/third_party/Matcha-TTS/configs/logger/wandb.yaml new file mode 100644 index 0000000..ece1658 --- /dev/null +++ b/third_party/Matcha-TTS/configs/logger/wandb.yaml @@ -0,0 +1,16 @@ +# https://wandb.ai + +wandb: + _target_: lightning.pytorch.loggers.wandb.WandbLogger + # name: "" # name of the run (normally generated by wandb) + save_dir: "${paths.output_dir}" + offline: False + id: null # pass correct id to resume experiment! + anonymous: null # enable anonymous logging + project: "lightning-hydra-template" + log_model: False # upload lightning ckpts + prefix: "" # a string to put at the beginning of metric keys + # entity: "" # set to name of your wandb team + group: "" + tags: [] + job_type: "" diff --git a/third_party/Matcha-TTS/configs/model/cfm/default.yaml b/third_party/Matcha-TTS/configs/model/cfm/default.yaml new file mode 100644 index 0000000..0d1d960 --- /dev/null +++ b/third_party/Matcha-TTS/configs/model/cfm/default.yaml @@ -0,0 +1,3 @@ +name: CFM +solver: euler +sigma_min: 1e-4 diff --git a/third_party/Matcha-TTS/configs/model/decoder/default.yaml b/third_party/Matcha-TTS/configs/model/decoder/default.yaml new file mode 100644 index 0000000..aaa00e6 --- /dev/null +++ b/third_party/Matcha-TTS/configs/model/decoder/default.yaml @@ -0,0 +1,7 @@ +channels: [256, 256] +dropout: 0.05 +attention_head_dim: 64 +n_blocks: 1 +num_mid_blocks: 2 +num_heads: 2 +act_fn: snakebeta diff --git a/third_party/Matcha-TTS/configs/model/encoder/default.yaml b/third_party/Matcha-TTS/configs/model/encoder/default.yaml new file mode 100644 index 0000000..d4d5e5a --- /dev/null +++ b/third_party/Matcha-TTS/configs/model/encoder/default.yaml @@ -0,0 +1,18 @@ +encoder_type: RoPE Encoder +encoder_params: + n_feats: ${model.n_feats} + n_channels: 192 + filter_channels: 768 + filter_channels_dp: 256 + n_heads: 2 + n_layers: 6 + kernel_size: 3 + p_dropout: 0.1 + spk_emb_dim: 64 + n_spks: 1 + prenet: true + +duration_predictor_params: + filter_channels_dp: ${model.encoder.encoder_params.filter_channels_dp} + kernel_size: 3 + p_dropout: ${model.encoder.encoder_params.p_dropout} diff --git a/third_party/Matcha-TTS/configs/model/matcha.yaml b/third_party/Matcha-TTS/configs/model/matcha.yaml new file mode 100644 index 0000000..36f6eaf --- /dev/null +++ b/third_party/Matcha-TTS/configs/model/matcha.yaml @@ -0,0 +1,15 @@ +defaults: + - _self_ + - encoder: default.yaml + - decoder: default.yaml + - cfm: default.yaml + - optimizer: adam.yaml + +_target_: matcha.models.matcha_tts.MatchaTTS +n_vocab: 178 +n_spks: ${data.n_spks} +spk_emb_dim: 64 +n_feats: 80 +data_statistics: ${data.data_statistics} +out_size: null # Must be divisible by 4 +prior_loss: true diff --git a/third_party/Matcha-TTS/configs/model/optimizer/adam.yaml b/third_party/Matcha-TTS/configs/model/optimizer/adam.yaml new file mode 100644 index 0000000..4279557 --- /dev/null +++ b/third_party/Matcha-TTS/configs/model/optimizer/adam.yaml @@ -0,0 +1,4 @@ +_target_: torch.optim.Adam +_partial_: true +lr: 1e-4 +weight_decay: 0.0 diff --git a/third_party/Matcha-TTS/configs/paths/default.yaml b/third_party/Matcha-TTS/configs/paths/default.yaml new file mode 100644 index 0000000..ec81db2 --- /dev/null +++ b/third_party/Matcha-TTS/configs/paths/default.yaml @@ -0,0 +1,18 @@ +# path to root directory +# this requires PROJECT_ROOT environment variable to exist +# you can replace it with "." if you want the root to be the current working directory +root_dir: ${oc.env:PROJECT_ROOT} + +# path to data directory +data_dir: ${paths.root_dir}/data/ + +# path to logging directory +log_dir: ${paths.root_dir}/logs/ + +# path to output directory, created dynamically by hydra +# path generation pattern is specified in `configs/hydra/default.yaml` +# use it to store all files generated during the run, like ckpts and metrics +output_dir: ${hydra:runtime.output_dir} + +# path to working directory +work_dir: ${hydra:runtime.cwd} diff --git a/third_party/Matcha-TTS/configs/train.yaml b/third_party/Matcha-TTS/configs/train.yaml new file mode 100644 index 0000000..e6f5c2e --- /dev/null +++ b/third_party/Matcha-TTS/configs/train.yaml @@ -0,0 +1,51 @@ +# @package _global_ + +# specify here default configuration +# order of defaults determines the order in which configs override each other +defaults: + - _self_ + - data: ljspeech + - model: matcha + - callbacks: default + - logger: tensorboard # set logger here or use command line (e.g. `python train.py logger=tensorboard`) + - trainer: default + - paths: default + - extras: default + - hydra: default + + # experiment configs allow for version control of specific hyperparameters + # e.g. best hyperparameters for given model and datamodule + - experiment: null + + # config for hyperparameter optimization + - hparams_search: null + + # optional local config for machine/user specific settings + # it's optional since it doesn't need to exist and is excluded from version control + - optional local: default + + # debugging config (enable through command line, e.g. `python train.py debug=default) + - debug: null + +# task name, determines output directory path +task_name: "train" + +run_name: ??? + +# tags to help you identify your experiments +# you can overwrite this in experiment configs +# overwrite from command line with `python train.py tags="[first_tag, second_tag]"` +tags: ["dev"] + +# set False to skip model training +train: True + +# evaluate on test set, using best model weights achieved during training +# lightning chooses best weights based on the metric specified in checkpoint callback +test: True + +# simply provide checkpoint path to resume training +ckpt_path: null + +# seed for random number generators in pytorch, numpy and python.random +seed: 1234 diff --git a/third_party/Matcha-TTS/configs/trainer/cpu.yaml b/third_party/Matcha-TTS/configs/trainer/cpu.yaml new file mode 100644 index 0000000..b7d6767 --- /dev/null +++ b/third_party/Matcha-TTS/configs/trainer/cpu.yaml @@ -0,0 +1,5 @@ +defaults: + - default + +accelerator: cpu +devices: 1 diff --git a/third_party/Matcha-TTS/configs/trainer/ddp.yaml b/third_party/Matcha-TTS/configs/trainer/ddp.yaml new file mode 100644 index 0000000..94b43e2 --- /dev/null +++ b/third_party/Matcha-TTS/configs/trainer/ddp.yaml @@ -0,0 +1,9 @@ +defaults: + - default + +strategy: ddp + +accelerator: gpu +devices: [0,1] +num_nodes: 1 +sync_batchnorm: True diff --git a/third_party/Matcha-TTS/configs/trainer/ddp_sim.yaml b/third_party/Matcha-TTS/configs/trainer/ddp_sim.yaml new file mode 100644 index 0000000..8404419 --- /dev/null +++ b/third_party/Matcha-TTS/configs/trainer/ddp_sim.yaml @@ -0,0 +1,7 @@ +defaults: + - default + +# simulate DDP on CPU, useful for debugging +accelerator: cpu +devices: 2 +strategy: ddp_spawn diff --git a/third_party/Matcha-TTS/configs/trainer/default.yaml b/third_party/Matcha-TTS/configs/trainer/default.yaml new file mode 100644 index 0000000..ee3d370 --- /dev/null +++ b/third_party/Matcha-TTS/configs/trainer/default.yaml @@ -0,0 +1,20 @@ +_target_: lightning.pytorch.trainer.Trainer + +default_root_dir: ${paths.output_dir} + +max_epochs: -1 + +accelerator: gpu +devices: [0] + +# mixed precision for extra speed-up +precision: 16-mixed + +# perform a validation loop every N training epochs +check_val_every_n_epoch: 1 + +# set True to to ensure deterministic results +# makes training slower but gives more reproducibility than just setting seeds +deterministic: False + +gradient_clip_val: 5.0 diff --git a/third_party/Matcha-TTS/configs/trainer/gpu.yaml b/third_party/Matcha-TTS/configs/trainer/gpu.yaml new file mode 100644 index 0000000..b238951 --- /dev/null +++ b/third_party/Matcha-TTS/configs/trainer/gpu.yaml @@ -0,0 +1,5 @@ +defaults: + - default + +accelerator: gpu +devices: 1 diff --git a/third_party/Matcha-TTS/configs/trainer/mps.yaml b/third_party/Matcha-TTS/configs/trainer/mps.yaml new file mode 100644 index 0000000..1ecf6d5 --- /dev/null +++ b/third_party/Matcha-TTS/configs/trainer/mps.yaml @@ -0,0 +1,5 @@ +defaults: + - default + +accelerator: mps +devices: 1 diff --git a/third_party/Matcha-TTS/matcha/VERSION b/third_party/Matcha-TTS/matcha/VERSION new file mode 100644 index 0000000..442b113 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/VERSION @@ -0,0 +1 @@ +0.0.5.1 diff --git a/third_party/Matcha-TTS/matcha/__init__.py b/third_party/Matcha-TTS/matcha/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/matcha/app.py b/third_party/Matcha-TTS/matcha/app.py new file mode 100644 index 0000000..d68fbaa --- /dev/null +++ b/third_party/Matcha-TTS/matcha/app.py @@ -0,0 +1,357 @@ +import tempfile +from argparse import Namespace +from pathlib import Path + +import gradio as gr +import soundfile as sf +import torch + +from matcha.cli import ( + MATCHA_URLS, + VOCODER_URLS, + assert_model_downloaded, + get_device, + load_matcha, + load_vocoder, + process_text, + to_waveform, +) +from matcha.utils.utils import get_user_data_dir, plot_tensor + +LOCATION = Path(get_user_data_dir()) + +args = Namespace( + cpu=False, + model="matcha_vctk", + vocoder="hifigan_univ_v1", + spk=0, +) + +CURRENTLY_LOADED_MODEL = args.model + + +def MATCHA_TTS_LOC(x): + return LOCATION / f"{x}.ckpt" + + +def VOCODER_LOC(x): + return LOCATION / f"{x}" + + +LOGO_URL = "https://shivammehta25.github.io/Matcha-TTS/images/logo.png" +RADIO_OPTIONS = { + "Multi Speaker (VCTK)": { + "model": "matcha_vctk", + "vocoder": "hifigan_univ_v1", + }, + "Single Speaker (LJ Speech)": { + "model": "matcha_ljspeech", + "vocoder": "hifigan_T2_v1", + }, +} + +# Ensure all the required models are downloaded +assert_model_downloaded(MATCHA_TTS_LOC("matcha_ljspeech"), MATCHA_URLS["matcha_ljspeech"]) +assert_model_downloaded(VOCODER_LOC("hifigan_T2_v1"), VOCODER_URLS["hifigan_T2_v1"]) +assert_model_downloaded(MATCHA_TTS_LOC("matcha_vctk"), MATCHA_URLS["matcha_vctk"]) +assert_model_downloaded(VOCODER_LOC("hifigan_univ_v1"), VOCODER_URLS["hifigan_univ_v1"]) + +device = get_device(args) + +# Load default model +model = load_matcha(args.model, MATCHA_TTS_LOC(args.model), device) +vocoder, denoiser = load_vocoder(args.vocoder, VOCODER_LOC(args.vocoder), device) + + +def load_model(model_name, vocoder_name): + model = load_matcha(model_name, MATCHA_TTS_LOC(model_name), device) + vocoder, denoiser = load_vocoder(vocoder_name, VOCODER_LOC(vocoder_name), device) + return model, vocoder, denoiser + + +def load_model_ui(model_type, textbox): + model_name, vocoder_name = RADIO_OPTIONS[model_type]["model"], RADIO_OPTIONS[model_type]["vocoder"] + + global model, vocoder, denoiser, CURRENTLY_LOADED_MODEL # pylint: disable=global-statement + if CURRENTLY_LOADED_MODEL != model_name: + model, vocoder, denoiser = load_model(model_name, vocoder_name) + CURRENTLY_LOADED_MODEL = model_name + + if model_name == "matcha_ljspeech": + spk_slider = gr.update(visible=False, value=-1) + single_speaker_examples = gr.update(visible=True) + multi_speaker_examples = gr.update(visible=False) + length_scale = gr.update(value=0.95) + else: + spk_slider = gr.update(visible=True, value=0) + single_speaker_examples = gr.update(visible=False) + multi_speaker_examples = gr.update(visible=True) + length_scale = gr.update(value=0.85) + + return ( + textbox, + gr.update(interactive=True), + spk_slider, + single_speaker_examples, + multi_speaker_examples, + length_scale, + ) + + +@torch.inference_mode() +def process_text_gradio(text): + output = process_text(1, text, device) + return output["x_phones"][1::2], output["x"], output["x_lengths"] + + +@torch.inference_mode() +def synthesise_mel(text, text_length, n_timesteps, temperature, length_scale, spk): + spk = torch.tensor([spk], device=device, dtype=torch.long) if spk >= 0 else None + output = model.synthesise( + text, + text_length, + n_timesteps=n_timesteps, + temperature=temperature, + spks=spk, + length_scale=length_scale, + ) + output["waveform"] = to_waveform(output["mel"], vocoder, denoiser) + with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp: + sf.write(fp.name, output["waveform"], 22050, "PCM_24") + + return fp.name, plot_tensor(output["mel"].squeeze().cpu().numpy()) + + +def multispeaker_example_cacher(text, n_timesteps, mel_temp, length_scale, spk): + global CURRENTLY_LOADED_MODEL # pylint: disable=global-statement + if CURRENTLY_LOADED_MODEL != "matcha_vctk": + global model, vocoder, denoiser # pylint: disable=global-statement + model, vocoder, denoiser = load_model("matcha_vctk", "hifigan_univ_v1") + CURRENTLY_LOADED_MODEL = "matcha_vctk" + + phones, text, text_lengths = process_text_gradio(text) + audio, mel_spectrogram = synthesise_mel(text, text_lengths, n_timesteps, mel_temp, length_scale, spk) + return phones, audio, mel_spectrogram + + +def ljspeech_example_cacher(text, n_timesteps, mel_temp, length_scale, spk=-1): + global CURRENTLY_LOADED_MODEL # pylint: disable=global-statement + if CURRENTLY_LOADED_MODEL != "matcha_ljspeech": + global model, vocoder, denoiser # pylint: disable=global-statement + model, vocoder, denoiser = load_model("matcha_ljspeech", "hifigan_T2_v1") + CURRENTLY_LOADED_MODEL = "matcha_ljspeech" + + phones, text, text_lengths = process_text_gradio(text) + audio, mel_spectrogram = synthesise_mel(text, text_lengths, n_timesteps, mel_temp, length_scale, spk) + return phones, audio, mel_spectrogram + + +def main(): + description = """# 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching + ### [Shivam Mehta](https://www.kth.se/profile/smehta), [Ruibo Tu](https://www.kth.se/profile/ruibo), [Jonas Beskow](https://www.kth.se/profile/beskow), [Éva Székely](https://www.kth.se/profile/szekely), and [Gustav Eje Henter](https://people.kth.se/~ghe/) + We propose 🍵 Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up ODE-based speech synthesis. Our method: + + + * Is probabilistic + * Has compact memory footprint + * Sounds highly natural + * Is very fast to synthesise from + + + Check out our [demo page](https://shivammehta25.github.io/Matcha-TTS). Read our [arXiv preprint for more details](https://arxiv.org/abs/2309.03199). + Code is available in our [GitHub repository](https://github.com/shivammehta25/Matcha-TTS), along with pre-trained models. + + Cached examples are available at the bottom of the page. + """ + + with gr.Blocks(title="🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching") as demo: + processed_text = gr.State(value=None) + processed_text_len = gr.State(value=None) + + with gr.Box(): + with gr.Row(): + gr.Markdown(description, scale=3) + with gr.Column(): + gr.Image(LOGO_URL, label="Matcha-TTS logo", height=50, width=50, scale=1, show_label=False) + html = '
' + gr.HTML(html) + + with gr.Box(): + radio_options = list(RADIO_OPTIONS.keys()) + model_type = gr.Radio( + radio_options, value=radio_options[0], label="Choose a Model", interactive=True, container=False + ) + + with gr.Row(): + gr.Markdown("# Text Input") + with gr.Row(): + text = gr.Textbox(value="", lines=2, label="Text to synthesise", scale=3) + spk_slider = gr.Slider( + minimum=0, maximum=107, step=1, value=args.spk, label="Speaker ID", interactive=True, scale=1 + ) + + with gr.Row(): + gr.Markdown("### Hyper parameters") + with gr.Row(): + n_timesteps = gr.Slider( + label="Number of ODE steps", + minimum=1, + maximum=100, + step=1, + value=10, + interactive=True, + ) + length_scale = gr.Slider( + label="Length scale (Speaking rate)", + minimum=0.5, + maximum=1.5, + step=0.05, + value=1.0, + interactive=True, + ) + mel_temp = gr.Slider( + label="Sampling temperature", + minimum=0.00, + maximum=2.001, + step=0.16675, + value=0.667, + interactive=True, + ) + + synth_btn = gr.Button("Synthesise") + + with gr.Box(): + with gr.Row(): + gr.Markdown("### Phonetised text") + phonetised_text = gr.Textbox(interactive=False, scale=10, label="Phonetised text") + + with gr.Box(): + with gr.Row(): + mel_spectrogram = gr.Image(interactive=False, label="mel spectrogram") + + # with gr.Row(): + audio = gr.Audio(interactive=False, label="Audio") + + with gr.Row(visible=False) as example_row_lj_speech: + examples = gr.Examples( # pylint: disable=unused-variable + examples=[ + [ + "We propose Matcha-TTS, a new approach to non-autoregressive neural TTS, that uses conditional flow matching (similar to rectified flows) to speed up O D E-based speech synthesis.", + 50, + 0.677, + 0.95, + ], + [ + "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", + 2, + 0.677, + 0.95, + ], + [ + "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", + 4, + 0.677, + 0.95, + ], + [ + "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", + 10, + 0.677, + 0.95, + ], + [ + "The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.", + 50, + 0.677, + 0.95, + ], + [ + "The narrative of these events is based largely on the recollections of the participants.", + 10, + 0.677, + 0.95, + ], + [ + "The jury did not believe him, and the verdict was for the defendants.", + 10, + 0.677, + 0.95, + ], + ], + fn=ljspeech_example_cacher, + inputs=[text, n_timesteps, mel_temp, length_scale], + outputs=[phonetised_text, audio, mel_spectrogram], + cache_examples=True, + ) + + with gr.Row() as example_row_multispeaker: + multi_speaker_examples = gr.Examples( # pylint: disable=unused-variable + examples=[ + [ + "Hello everyone! I am speaker 0 and I am here to tell you that Matcha-TTS is amazing!", + 10, + 0.677, + 0.85, + 0, + ], + [ + "Hello everyone! I am speaker 16 and I am here to tell you that Matcha-TTS is amazing!", + 10, + 0.677, + 0.85, + 16, + ], + [ + "Hello everyone! I am speaker 44 and I am here to tell you that Matcha-TTS is amazing!", + 50, + 0.677, + 0.85, + 44, + ], + [ + "Hello everyone! I am speaker 45 and I am here to tell you that Matcha-TTS is amazing!", + 50, + 0.677, + 0.85, + 45, + ], + [ + "Hello everyone! I am speaker 58 and I am here to tell you that Matcha-TTS is amazing!", + 4, + 0.677, + 0.85, + 58, + ], + ], + fn=multispeaker_example_cacher, + inputs=[text, n_timesteps, mel_temp, length_scale, spk_slider], + outputs=[phonetised_text, audio, mel_spectrogram], + cache_examples=True, + label="Multi Speaker Examples", + ) + + model_type.change(lambda x: gr.update(interactive=False), inputs=[synth_btn], outputs=[synth_btn]).then( + load_model_ui, + inputs=[model_type, text], + outputs=[text, synth_btn, spk_slider, example_row_lj_speech, example_row_multispeaker, length_scale], + ) + + synth_btn.click( + fn=process_text_gradio, + inputs=[ + text, + ], + outputs=[phonetised_text, processed_text, processed_text_len], + api_name="matcha_tts", + queue=True, + ).then( + fn=synthesise_mel, + inputs=[processed_text, processed_text_len, n_timesteps, mel_temp, length_scale, spk_slider], + outputs=[audio, mel_spectrogram], + ) + + demo.queue().launch(share=True) + + +if __name__ == "__main__": + main() diff --git a/third_party/Matcha-TTS/matcha/cli.py b/third_party/Matcha-TTS/matcha/cli.py new file mode 100644 index 0000000..579d7d6 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/cli.py @@ -0,0 +1,418 @@ +import argparse +import datetime as dt +import os +import warnings +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np +import soundfile as sf +import torch + +from matcha.hifigan.config import v1 +from matcha.hifigan.denoiser import Denoiser +from matcha.hifigan.env import AttrDict +from matcha.hifigan.models import Generator as HiFiGAN +from matcha.models.matcha_tts import MatchaTTS +from matcha.text import sequence_to_text, text_to_sequence +from matcha.utils.utils import assert_model_downloaded, get_user_data_dir, intersperse + +MATCHA_URLS = { + "matcha_ljspeech": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/matcha_ljspeech.ckpt", + "matcha_vctk": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/matcha_vctk.ckpt", +} + +VOCODER_URLS = { + "hifigan_T2_v1": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/generator_v1", # Old url: https://drive.google.com/file/d/14NENd4equCBLyyCSke114Mv6YR_j_uFs/view?usp=drive_link + "hifigan_univ_v1": "https://github.com/shivammehta25/Matcha-TTS-checkpoints/releases/download/v1.0/g_02500000", # Old url: https://drive.google.com/file/d/1qpgI41wNXFcH-iKq1Y42JlBC9j0je8PW/view?usp=drive_link +} + +MULTISPEAKER_MODEL = { + "matcha_vctk": {"vocoder": "hifigan_univ_v1", "speaking_rate": 0.85, "spk": 0, "spk_range": (0, 107)} +} + +SINGLESPEAKER_MODEL = {"matcha_ljspeech": {"vocoder": "hifigan_T2_v1", "speaking_rate": 0.95, "spk": None}} + + +def plot_spectrogram_to_numpy(spectrogram, filename): + fig, ax = plt.subplots(figsize=(12, 3)) + im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") + plt.colorbar(im, ax=ax) + plt.xlabel("Frames") + plt.ylabel("Channels") + plt.title("Synthesised Mel-Spectrogram") + fig.canvas.draw() + plt.savefig(filename) + + +def process_text(i: int, text: str, device: torch.device): + print(f"[{i}] - Input text: {text}") + x = torch.tensor( + intersperse(text_to_sequence(text, ["english_cleaners2"]), 0), + dtype=torch.long, + device=device, + )[None] + x_lengths = torch.tensor([x.shape[-1]], dtype=torch.long, device=device) + x_phones = sequence_to_text(x.squeeze(0).tolist()) + print(f"[{i}] - Phonetised text: {x_phones[1::2]}") + + return {"x_orig": text, "x": x, "x_lengths": x_lengths, "x_phones": x_phones} + + +def get_texts(args): + if args.text: + texts = [args.text] + else: + with open(args.file, encoding="utf-8") as f: + texts = f.readlines() + return texts + + +def assert_required_models_available(args): + save_dir = get_user_data_dir() + if not hasattr(args, "checkpoint_path") and args.checkpoint_path is None: + model_path = args.checkpoint_path + else: + model_path = save_dir / f"{args.model}.ckpt" + assert_model_downloaded(model_path, MATCHA_URLS[args.model]) + + vocoder_path = save_dir / f"{args.vocoder}" + assert_model_downloaded(vocoder_path, VOCODER_URLS[args.vocoder]) + return {"matcha": model_path, "vocoder": vocoder_path} + + +def load_hifigan(checkpoint_path, device): + h = AttrDict(v1) + hifigan = HiFiGAN(h).to(device) + hifigan.load_state_dict(torch.load(checkpoint_path, map_location=device)["generator"]) + _ = hifigan.eval() + hifigan.remove_weight_norm() + return hifigan + + +def load_vocoder(vocoder_name, checkpoint_path, device): + print(f"[!] Loading {vocoder_name}!") + vocoder = None + if vocoder_name in ("hifigan_T2_v1", "hifigan_univ_v1"): + vocoder = load_hifigan(checkpoint_path, device) + else: + raise NotImplementedError( + f"Vocoder {vocoder_name} not implemented! define a load_<> method for it" + ) + + denoiser = Denoiser(vocoder, mode="zeros") + print(f"[+] {vocoder_name} loaded!") + return vocoder, denoiser + + +def load_matcha(model_name, checkpoint_path, device): + print(f"[!] Loading {model_name}!") + model = MatchaTTS.load_from_checkpoint(checkpoint_path, map_location=device) + _ = model.eval() + + print(f"[+] {model_name} loaded!") + return model + + +def to_waveform(mel, vocoder, denoiser=None): + audio = vocoder(mel).clamp(-1, 1) + if denoiser is not None: + audio = denoiser(audio.squeeze(), strength=0.00025).cpu().squeeze() + + return audio.cpu().squeeze() + + +def save_to_folder(filename: str, output: dict, folder: str): + folder = Path(folder) + folder.mkdir(exist_ok=True, parents=True) + plot_spectrogram_to_numpy(np.array(output["mel"].squeeze().float().cpu()), f"{filename}.png") + np.save(folder / f"{filename}", output["mel"].cpu().numpy()) + sf.write(folder / f"{filename}.wav", output["waveform"], 22050, "PCM_24") + return folder.resolve() / f"{filename}.wav" + + +def validate_args(args): + assert ( + args.text or args.file + ), "Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms." + assert args.temperature >= 0, "Sampling temperature cannot be negative" + assert args.steps > 0, "Number of ODE steps must be greater than 0" + + if args.checkpoint_path is None: + # When using pretrained models + if args.model in SINGLESPEAKER_MODEL: + args = validate_args_for_single_speaker_model(args) + + if args.model in MULTISPEAKER_MODEL: + args = validate_args_for_multispeaker_model(args) + else: + # When using a custom model + if args.vocoder != "hifigan_univ_v1": + warn_ = "[-] Using custom model checkpoint! I would suggest passing --vocoder hifigan_univ_v1, unless the custom model is trained on LJ Speech." + warnings.warn(warn_, UserWarning) + if args.speaking_rate is None: + args.speaking_rate = 1.0 + + if args.batched: + assert args.batch_size > 0, "Batch size must be greater than 0" + assert args.speaking_rate > 0, "Speaking rate must be greater than 0" + + return args + + +def validate_args_for_multispeaker_model(args): + if args.vocoder is not None: + if args.vocoder != MULTISPEAKER_MODEL[args.model]["vocoder"]: + warn_ = f"[-] Using {args.model} model! I would suggest passing --vocoder {MULTISPEAKER_MODEL[args.model]['vocoder']}" + warnings.warn(warn_, UserWarning) + else: + args.vocoder = MULTISPEAKER_MODEL[args.model]["vocoder"] + + if args.speaking_rate is None: + args.speaking_rate = MULTISPEAKER_MODEL[args.model]["speaking_rate"] + + spk_range = MULTISPEAKER_MODEL[args.model]["spk_range"] + if args.spk is not None: + assert ( + args.spk >= spk_range[0] and args.spk <= spk_range[-1] + ), f"Speaker ID must be between {spk_range} for this model." + else: + available_spk_id = MULTISPEAKER_MODEL[args.model]["spk"] + warn_ = f"[!] Speaker ID not provided! Using speaker ID {available_spk_id}" + warnings.warn(warn_, UserWarning) + args.spk = available_spk_id + + return args + + +def validate_args_for_single_speaker_model(args): + if args.vocoder is not None: + if args.vocoder != SINGLESPEAKER_MODEL[args.model]["vocoder"]: + warn_ = f"[-] Using {args.model} model! I would suggest passing --vocoder {SINGLESPEAKER_MODEL[args.model]['vocoder']}" + warnings.warn(warn_, UserWarning) + else: + args.vocoder = SINGLESPEAKER_MODEL[args.model]["vocoder"] + + if args.speaking_rate is None: + args.speaking_rate = SINGLESPEAKER_MODEL[args.model]["speaking_rate"] + + if args.spk != SINGLESPEAKER_MODEL[args.model]["spk"]: + warn_ = f"[-] Ignoring speaker id {args.spk} for {args.model}" + warnings.warn(warn_, UserWarning) + args.spk = SINGLESPEAKER_MODEL[args.model]["spk"] + + return args + + +@torch.inference_mode() +def cli(): + parser = argparse.ArgumentParser( + description=" 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching" + ) + parser.add_argument( + "--model", + type=str, + default="matcha_ljspeech", + help="Model to use", + choices=MATCHA_URLS.keys(), + ) + + parser.add_argument( + "--checkpoint_path", + type=str, + default=None, + help="Path to the custom model checkpoint", + ) + + parser.add_argument( + "--vocoder", + type=str, + default=None, + help="Vocoder to use (default: will use the one suggested with the pretrained model))", + choices=VOCODER_URLS.keys(), + ) + parser.add_argument("--text", type=str, default=None, help="Text to synthesize") + parser.add_argument("--file", type=str, default=None, help="Text file to synthesize") + parser.add_argument("--spk", type=int, default=None, help="Speaker ID") + parser.add_argument( + "--temperature", + type=float, + default=0.667, + help="Variance of the x0 noise (default: 0.667)", + ) + parser.add_argument( + "--speaking_rate", + type=float, + default=None, + help="change the speaking rate, a higher value means slower speaking rate (default: 1.0)", + ) + parser.add_argument("--steps", type=int, default=10, help="Number of ODE steps (default: 10)") + parser.add_argument("--cpu", action="store_true", help="Use CPU for inference (default: use GPU if available)") + parser.add_argument( + "--denoiser_strength", + type=float, + default=0.00025, + help="Strength of the vocoder bias denoiser (default: 0.00025)", + ) + parser.add_argument( + "--output_folder", + type=str, + default=os.getcwd(), + help="Output folder to save results (default: current dir)", + ) + parser.add_argument("--batched", action="store_true", help="Batched inference (default: False)") + parser.add_argument( + "--batch_size", type=int, default=32, help="Batch size only useful when --batched (default: 32)" + ) + + args = parser.parse_args() + + args = validate_args(args) + device = get_device(args) + print_config(args) + paths = assert_required_models_available(args) + + if args.checkpoint_path is not None: + print(f"[🍵] Loading custom model from {args.checkpoint_path}") + paths["matcha"] = args.checkpoint_path + args.model = "custom_model" + + model = load_matcha(args.model, paths["matcha"], device) + vocoder, denoiser = load_vocoder(args.vocoder, paths["vocoder"], device) + + texts = get_texts(args) + + spk = torch.tensor([args.spk], device=device, dtype=torch.long) if args.spk is not None else None + if len(texts) == 1 or not args.batched: + unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk) + else: + batched_synthesis(args, device, model, vocoder, denoiser, texts, spk) + + +class BatchedSynthesisDataset(torch.utils.data.Dataset): + def __init__(self, processed_texts): + self.processed_texts = processed_texts + + def __len__(self): + return len(self.processed_texts) + + def __getitem__(self, idx): + return self.processed_texts[idx] + + +def batched_collate_fn(batch): + x = [] + x_lengths = [] + + for b in batch: + x.append(b["x"].squeeze(0)) + x_lengths.append(b["x_lengths"]) + + x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True) + x_lengths = torch.concat(x_lengths, dim=0) + return {"x": x, "x_lengths": x_lengths} + + +def batched_synthesis(args, device, model, vocoder, denoiser, texts, spk): + total_rtf = [] + total_rtf_w = [] + processed_text = [process_text(i, text, "cpu") for i, text in enumerate(texts)] + dataloader = torch.utils.data.DataLoader( + BatchedSynthesisDataset(processed_text), + batch_size=args.batch_size, + collate_fn=batched_collate_fn, + num_workers=8, + ) + for i, batch in enumerate(dataloader): + i = i + 1 + start_t = dt.datetime.now() + output = model.synthesise( + batch["x"].to(device), + batch["x_lengths"].to(device), + n_timesteps=args.steps, + temperature=args.temperature, + spks=spk, + length_scale=args.speaking_rate, + ) + + output["waveform"] = to_waveform(output["mel"], vocoder, denoiser) + t = (dt.datetime.now() - start_t).total_seconds() + rtf_w = t * 22050 / (output["waveform"].shape[-1]) + print(f"[🍵-Batch: {i}] Matcha-TTS RTF: {output['rtf']:.4f}") + print(f"[🍵-Batch: {i}] Matcha-TTS + VOCODER RTF: {rtf_w:.4f}") + total_rtf.append(output["rtf"]) + total_rtf_w.append(rtf_w) + for j in range(output["mel"].shape[0]): + base_name = f"utterance_{j:03d}_speaker_{args.spk:03d}" if args.spk is not None else f"utterance_{j:03d}" + length = output["mel_lengths"][j] + new_dict = {"mel": output["mel"][j][:, :length], "waveform": output["waveform"][j][: length * 256]} + location = save_to_folder(base_name, new_dict, args.output_folder) + print(f"[🍵-{j}] Waveform saved: {location}") + + print("".join(["="] * 100)) + print(f"[🍵] Average Matcha-TTS RTF: {np.mean(total_rtf):.4f} ± {np.std(total_rtf)}") + print(f"[🍵] Average Matcha-TTS + VOCODER RTF: {np.mean(total_rtf_w):.4f} ± {np.std(total_rtf_w)}") + print("[🍵] Enjoy the freshly whisked 🍵 Matcha-TTS!") + + +def unbatched_synthesis(args, device, model, vocoder, denoiser, texts, spk): + total_rtf = [] + total_rtf_w = [] + for i, text in enumerate(texts): + i = i + 1 + base_name = f"utterance_{i:03d}_speaker_{args.spk:03d}" if args.spk is not None else f"utterance_{i:03d}" + + print("".join(["="] * 100)) + text = text.strip() + text_processed = process_text(i, text, device) + + print(f"[🍵] Whisking Matcha-T(ea)TS for: {i}") + start_t = dt.datetime.now() + output = model.synthesise( + text_processed["x"], + text_processed["x_lengths"], + n_timesteps=args.steps, + temperature=args.temperature, + spks=spk, + length_scale=args.speaking_rate, + ) + output["waveform"] = to_waveform(output["mel"], vocoder, denoiser) + # RTF with HiFiGAN + t = (dt.datetime.now() - start_t).total_seconds() + rtf_w = t * 22050 / (output["waveform"].shape[-1]) + print(f"[🍵-{i}] Matcha-TTS RTF: {output['rtf']:.4f}") + print(f"[🍵-{i}] Matcha-TTS + VOCODER RTF: {rtf_w:.4f}") + total_rtf.append(output["rtf"]) + total_rtf_w.append(rtf_w) + + location = save_to_folder(base_name, output, args.output_folder) + print(f"[+] Waveform saved: {location}") + + print("".join(["="] * 100)) + print(f"[🍵] Average Matcha-TTS RTF: {np.mean(total_rtf):.4f} ± {np.std(total_rtf)}") + print(f"[🍵] Average Matcha-TTS + VOCODER RTF: {np.mean(total_rtf_w):.4f} ± {np.std(total_rtf_w)}") + print("[🍵] Enjoy the freshly whisked 🍵 Matcha-TTS!") + + +def print_config(args): + print("[!] Configurations: ") + print(f"\t- Model: {args.model}") + print(f"\t- Vocoder: {args.vocoder}") + print(f"\t- Temperature: {args.temperature}") + print(f"\t- Speaking rate: {args.speaking_rate}") + print(f"\t- Number of ODE steps: {args.steps}") + print(f"\t- Speaker: {args.spk}") + + +def get_device(args): + if torch.cuda.is_available() and not args.cpu: + print("[+] GPU Available! Using GPU") + device = torch.device("cuda") + else: + print("[-] GPU not available or forced CPU run! Using CPU") + device = torch.device("cpu") + return device + + +if __name__ == "__main__": + cli() diff --git a/third_party/Matcha-TTS/matcha/hifigan/LICENSE b/third_party/Matcha-TTS/matcha/hifigan/LICENSE new file mode 100644 index 0000000..91751da --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jungil Kong + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/third_party/Matcha-TTS/matcha/hifigan/README.md b/third_party/Matcha-TTS/matcha/hifigan/README.md new file mode 100644 index 0000000..5db2585 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/README.md @@ -0,0 +1,101 @@ +# HiFi-GAN: Generative Adversarial Networks for Efficient and High Fidelity Speech Synthesis + +### Jungil Kong, Jaehyeon Kim, Jaekyoung Bae + +In our [paper](https://arxiv.org/abs/2010.05646), +we proposed HiFi-GAN: a GAN-based model capable of generating high fidelity speech efficiently.
+We provide our implementation and pretrained models as open source in this repository. + +**Abstract :** +Several recent work on speech synthesis have employed generative adversarial networks (GANs) to produce raw waveforms. +Although such methods improve the sampling efficiency and memory usage, +their sample quality has not yet reached that of autoregressive and flow-based generative models. +In this work, we propose HiFi-GAN, which achieves both efficient and high-fidelity speech synthesis. +As speech audio consists of sinusoidal signals with various periods, +we demonstrate that modeling periodic patterns of an audio is crucial for enhancing sample quality. +A subjective human evaluation (mean opinion score, MOS) of a single speaker dataset indicates that our proposed method +demonstrates similarity to human quality while generating 22.05 kHz high-fidelity audio 167.9 times faster than +real-time on a single V100 GPU. We further show the generality of HiFi-GAN to the mel-spectrogram inversion of unseen +speakers and end-to-end speech synthesis. Finally, a small footprint version of HiFi-GAN generates samples 13.4 times +faster than real-time on CPU with comparable quality to an autoregressive counterpart. + +Visit our [demo website](https://jik876.github.io/hifi-gan-demo/) for audio samples. + +## Pre-requisites + +1. Python >= 3.6 +2. Clone this repository. +3. Install python requirements. Please refer [requirements.txt](requirements.txt) +4. Download and extract the [LJ Speech dataset](https://keithito.com/LJ-Speech-Dataset/). + And move all wav files to `LJSpeech-1.1/wavs` + +## Training + +``` +python train.py --config config_v1.json +``` + +To train V2 or V3 Generator, replace `config_v1.json` with `config_v2.json` or `config_v3.json`.
+Checkpoints and copy of the configuration file are saved in `cp_hifigan` directory by default.
+You can change the path by adding `--checkpoint_path` option. + +Validation loss during training with V1 generator.
+![validation loss](./validation_loss.png) + +## Pretrained Model + +You can also use pretrained models we provide.
+[Download pretrained models](https://drive.google.com/drive/folders/1-eEYTB5Av9jNql0WGBlRoi-WH2J7bp5Y?usp=sharing)
+Details of each folder are as in follows: + +| Folder Name | Generator | Dataset | Fine-Tuned | +| ------------ | --------- | --------- | ------------------------------------------------------ | +| LJ_V1 | V1 | LJSpeech | No | +| LJ_V2 | V2 | LJSpeech | No | +| LJ_V3 | V3 | LJSpeech | No | +| LJ_FT_T2_V1 | V1 | LJSpeech | Yes ([Tacotron2](https://github.com/NVIDIA/tacotron2)) | +| LJ_FT_T2_V2 | V2 | LJSpeech | Yes ([Tacotron2](https://github.com/NVIDIA/tacotron2)) | +| LJ_FT_T2_V3 | V3 | LJSpeech | Yes ([Tacotron2](https://github.com/NVIDIA/tacotron2)) | +| VCTK_V1 | V1 | VCTK | No | +| VCTK_V2 | V2 | VCTK | No | +| VCTK_V3 | V3 | VCTK | No | +| UNIVERSAL_V1 | V1 | Universal | No | + +We provide the universal model with discriminator weights that can be used as a base for transfer learning to other datasets. + +## Fine-Tuning + +1. Generate mel-spectrograms in numpy format using [Tacotron2](https://github.com/NVIDIA/tacotron2) with teacher-forcing.
+ The file name of the generated mel-spectrogram should match the audio file and the extension should be `.npy`.
+ Example: + ` Audio File : LJ001-0001.wav +Mel-Spectrogram File : LJ001-0001.npy` +2. Create `ft_dataset` folder and copy the generated mel-spectrogram files into it.
+3. Run the following command. + ``` + python train.py --fine_tuning True --config config_v1.json + ``` + For other command line options, please refer to the training section. + +## Inference from wav file + +1. Make `test_files` directory and copy wav files into the directory. +2. Run the following command. + ` python inference.py --checkpoint_file [generator checkpoint file path]` + Generated wav files are saved in `generated_files` by default.
+ You can change the path by adding `--output_dir` option. + +## Inference for end-to-end speech synthesis + +1. Make `test_mel_files` directory and copy generated mel-spectrogram files into the directory.
+ You can generate mel-spectrograms using [Tacotron2](https://github.com/NVIDIA/tacotron2), + [Glow-TTS](https://github.com/jaywalnut310/glow-tts) and so forth. +2. Run the following command. + ` python inference_e2e.py --checkpoint_file [generator checkpoint file path]` + Generated wav files are saved in `generated_files_from_mel` by default.
+ You can change the path by adding `--output_dir` option. + +## Acknowledgements + +We referred to [WaveGlow](https://github.com/NVIDIA/waveglow), [MelGAN](https://github.com/descriptinc/melgan-neurips) +and [Tacotron2](https://github.com/NVIDIA/tacotron2) to implement this. diff --git a/third_party/Matcha-TTS/matcha/hifigan/__init__.py b/third_party/Matcha-TTS/matcha/hifigan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/matcha/hifigan/config.py b/third_party/Matcha-TTS/matcha/hifigan/config.py new file mode 100644 index 0000000..b3abea9 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/config.py @@ -0,0 +1,28 @@ +v1 = { + "resblock": "1", + "num_gpus": 0, + "batch_size": 16, + "learning_rate": 0.0004, + "adam_b1": 0.8, + "adam_b2": 0.99, + "lr_decay": 0.999, + "seed": 1234, + "upsample_rates": [8, 8, 2, 2], + "upsample_kernel_sizes": [16, 16, 4, 4], + "upsample_initial_channel": 512, + "resblock_kernel_sizes": [3, 7, 11], + "resblock_dilation_sizes": [[1, 3, 5], [1, 3, 5], [1, 3, 5]], + "resblock_initial_channel": 256, + "segment_size": 8192, + "num_mels": 80, + "num_freq": 1025, + "n_fft": 1024, + "hop_size": 256, + "win_size": 1024, + "sampling_rate": 22050, + "fmin": 0, + "fmax": 8000, + "fmax_loss": None, + "num_workers": 4, + "dist_config": {"dist_backend": "nccl", "dist_url": "tcp://localhost:54321", "world_size": 1}, +} diff --git a/third_party/Matcha-TTS/matcha/hifigan/denoiser.py b/third_party/Matcha-TTS/matcha/hifigan/denoiser.py new file mode 100644 index 0000000..9fd3331 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/denoiser.py @@ -0,0 +1,64 @@ +# Code modified from Rafael Valle's implementation https://github.com/NVIDIA/waveglow/blob/5bc2a53e20b3b533362f974cfa1ea0267ae1c2b1/denoiser.py + +"""Waveglow style denoiser can be used to remove the artifacts from the HiFiGAN generated audio.""" +import torch + + +class Denoiser(torch.nn.Module): + """Removes model bias from audio produced with waveglow""" + + def __init__(self, vocoder, filter_length=1024, n_overlap=4, win_length=1024, mode="zeros"): + super().__init__() + self.filter_length = filter_length + self.hop_length = int(filter_length / n_overlap) + self.win_length = win_length + + dtype, device = next(vocoder.parameters()).dtype, next(vocoder.parameters()).device + self.device = device + if mode == "zeros": + mel_input = torch.zeros((1, 80, 88), dtype=dtype, device=device) + elif mode == "normal": + mel_input = torch.randn((1, 80, 88), dtype=dtype, device=device) + else: + raise Exception(f"Mode {mode} if not supported") + + def stft_fn(audio, n_fft, hop_length, win_length, window): + spec = torch.stft( + audio, + n_fft=n_fft, + hop_length=hop_length, + win_length=win_length, + window=window, + return_complex=True, + ) + spec = torch.view_as_real(spec) + return torch.sqrt(spec.pow(2).sum(-1)), torch.atan2(spec[..., -1], spec[..., 0]) + + self.stft = lambda x: stft_fn( + audio=x, + n_fft=self.filter_length, + hop_length=self.hop_length, + win_length=self.win_length, + window=torch.hann_window(self.win_length, device=device), + ) + self.istft = lambda x, y: torch.istft( + torch.complex(x * torch.cos(y), x * torch.sin(y)), + n_fft=self.filter_length, + hop_length=self.hop_length, + win_length=self.win_length, + window=torch.hann_window(self.win_length, device=device), + ) + + with torch.no_grad(): + bias_audio = vocoder(mel_input).float().squeeze(0) + bias_spec, _ = self.stft(bias_audio) + + self.register_buffer("bias_spec", bias_spec[:, :, 0][:, :, None]) + + @torch.inference_mode() + def forward(self, audio, strength=0.0005): + audio_spec, audio_angles = self.stft(audio) + audio_spec_denoised = audio_spec - self.bias_spec.to(audio.device) * strength + audio_spec_denoised = torch.clamp(audio_spec_denoised, 0.0) + audio_denoised = self.istft(audio_spec_denoised, audio_angles) + return audio_denoised diff --git a/third_party/Matcha-TTS/matcha/hifigan/env.py b/third_party/Matcha-TTS/matcha/hifigan/env.py new file mode 100644 index 0000000..9ea4f94 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/env.py @@ -0,0 +1,17 @@ +""" from https://github.com/jik876/hifi-gan """ + +import os +import shutil + + +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.__dict__ = self + + +def build_env(config, config_name, path): + t_path = os.path.join(path, config_name) + if config != t_path: + os.makedirs(path, exist_ok=True) + shutil.copyfile(config, os.path.join(path, config_name)) diff --git a/third_party/Matcha-TTS/matcha/hifigan/meldataset.py b/third_party/Matcha-TTS/matcha/hifigan/meldataset.py new file mode 100644 index 0000000..8b43ea7 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/meldataset.py @@ -0,0 +1,217 @@ +""" from https://github.com/jik876/hifi-gan """ + +import math +import os +import random + +import numpy as np +import torch +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn +from librosa.util import normalize +from scipy.io.wavfile import read + +MAX_WAV_VALUE = 32768.0 + + +def load_wav(full_path): + sampling_rate, data = read(full_path) + return data, sampling_rate + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.0: + print("min value is ", torch.min(y)) + if torch.max(y) > 1.0: + print("max value is ", torch.max(y)) + + global mel_basis, hann_window # pylint: disable=global-statement + if fmax not in mel_basis: + mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) + mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device) + hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) + + y = torch.nn.functional.pad( + y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect" + ) + y = y.squeeze(1) + + spec = torch.view_as_real( + torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[str(y.device)], + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + ) + + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + + spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec) + spec = spectral_normalize_torch(spec) + + return spec + + +def get_dataset_filelist(a): + with open(a.input_training_file, encoding="utf-8") as fi: + training_files = [ + os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 + ] + + with open(a.input_validation_file, encoding="utf-8") as fi: + validation_files = [ + os.path.join(a.input_wavs_dir, x.split("|")[0] + ".wav") for x in fi.read().split("\n") if len(x) > 0 + ] + return training_files, validation_files + + +class MelDataset(torch.utils.data.Dataset): + def __init__( + self, + training_files, + segment_size, + n_fft, + num_mels, + hop_size, + win_size, + sampling_rate, + fmin, + fmax, + split=True, + shuffle=True, + n_cache_reuse=1, + device=None, + fmax_loss=None, + fine_tuning=False, + base_mels_path=None, + ): + self.audio_files = training_files + random.seed(1234) + if shuffle: + random.shuffle(self.audio_files) + self.segment_size = segment_size + self.sampling_rate = sampling_rate + self.split = split + self.n_fft = n_fft + self.num_mels = num_mels + self.hop_size = hop_size + self.win_size = win_size + self.fmin = fmin + self.fmax = fmax + self.fmax_loss = fmax_loss + self.cached_wav = None + self.n_cache_reuse = n_cache_reuse + self._cache_ref_count = 0 + self.device = device + self.fine_tuning = fine_tuning + self.base_mels_path = base_mels_path + + def __getitem__(self, index): + filename = self.audio_files[index] + if self._cache_ref_count == 0: + audio, sampling_rate = load_wav(filename) + audio = audio / MAX_WAV_VALUE + if not self.fine_tuning: + audio = normalize(audio) * 0.95 + self.cached_wav = audio + if sampling_rate != self.sampling_rate: + raise ValueError(f"{sampling_rate} SR doesn't match target {self.sampling_rate} SR") + self._cache_ref_count = self.n_cache_reuse + else: + audio = self.cached_wav + self._cache_ref_count -= 1 + + audio = torch.FloatTensor(audio) + audio = audio.unsqueeze(0) + + if not self.fine_tuning: + if self.split: + if audio.size(1) >= self.segment_size: + max_audio_start = audio.size(1) - self.segment_size + audio_start = random.randint(0, max_audio_start) + audio = audio[:, audio_start : audio_start + self.segment_size] + else: + audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant") + + mel = mel_spectrogram( + audio, + self.n_fft, + self.num_mels, + self.sampling_rate, + self.hop_size, + self.win_size, + self.fmin, + self.fmax, + center=False, + ) + else: + mel = np.load(os.path.join(self.base_mels_path, os.path.splitext(os.path.split(filename)[-1])[0] + ".npy")) + mel = torch.from_numpy(mel) + + if len(mel.shape) < 3: + mel = mel.unsqueeze(0) + + if self.split: + frames_per_seg = math.ceil(self.segment_size / self.hop_size) + + if audio.size(1) >= self.segment_size: + mel_start = random.randint(0, mel.size(2) - frames_per_seg - 1) + mel = mel[:, :, mel_start : mel_start + frames_per_seg] + audio = audio[:, mel_start * self.hop_size : (mel_start + frames_per_seg) * self.hop_size] + else: + mel = torch.nn.functional.pad(mel, (0, frames_per_seg - mel.size(2)), "constant") + audio = torch.nn.functional.pad(audio, (0, self.segment_size - audio.size(1)), "constant") + + mel_loss = mel_spectrogram( + audio, + self.n_fft, + self.num_mels, + self.sampling_rate, + self.hop_size, + self.win_size, + self.fmin, + self.fmax_loss, + center=False, + ) + + return (mel.squeeze(), audio.squeeze(0), filename, mel_loss.squeeze()) + + def __len__(self): + return len(self.audio_files) diff --git a/third_party/Matcha-TTS/matcha/hifigan/models.py b/third_party/Matcha-TTS/matcha/hifigan/models.py new file mode 100644 index 0000000..d209d9a --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/models.py @@ -0,0 +1,368 @@ +""" from https://github.com/jik876/hifi-gan """ + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.nn import AvgPool1d, Conv1d, Conv2d, ConvTranspose1d +from torch.nn.utils import remove_weight_norm, spectral_norm, weight_norm + +from .xutils import get_padding, init_weights + +LRELU_SLOPE = 0.1 + + +class ResBlock1(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3, 5)): + super().__init__() + self.h = h + self.convs1 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[2], + padding=get_padding(kernel_size, dilation[2]), + ) + ), + ] + ) + self.convs1.apply(init_weights) + + self.convs2 = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=1, + padding=get_padding(kernel_size, 1), + ) + ), + ] + ) + self.convs2.apply(init_weights) + + def forward(self, x): + for c1, c2 in zip(self.convs1, self.convs2): + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c1(xt) + xt = F.leaky_relu(xt, LRELU_SLOPE) + xt = c2(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs1: + remove_weight_norm(l) + for l in self.convs2: + remove_weight_norm(l) + + +class ResBlock2(torch.nn.Module): + def __init__(self, h, channels, kernel_size=3, dilation=(1, 3)): + super().__init__() + self.h = h + self.convs = nn.ModuleList( + [ + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[0], + padding=get_padding(kernel_size, dilation[0]), + ) + ), + weight_norm( + Conv1d( + channels, + channels, + kernel_size, + 1, + dilation=dilation[1], + padding=get_padding(kernel_size, dilation[1]), + ) + ), + ] + ) + self.convs.apply(init_weights) + + def forward(self, x): + for c in self.convs: + xt = F.leaky_relu(x, LRELU_SLOPE) + xt = c(xt) + x = xt + x + return x + + def remove_weight_norm(self): + for l in self.convs: + remove_weight_norm(l) + + +class Generator(torch.nn.Module): + def __init__(self, h): + super().__init__() + self.h = h + self.num_kernels = len(h.resblock_kernel_sizes) + self.num_upsamples = len(h.upsample_rates) + self.conv_pre = weight_norm(Conv1d(80, h.upsample_initial_channel, 7, 1, padding=3)) + resblock = ResBlock1 if h.resblock == "1" else ResBlock2 + + self.ups = nn.ModuleList() + for i, (u, k) in enumerate(zip(h.upsample_rates, h.upsample_kernel_sizes)): + self.ups.append( + weight_norm( + ConvTranspose1d( + h.upsample_initial_channel // (2**i), + h.upsample_initial_channel // (2 ** (i + 1)), + k, + u, + padding=(k - u) // 2, + ) + ) + ) + + self.resblocks = nn.ModuleList() + for i in range(len(self.ups)): + ch = h.upsample_initial_channel // (2 ** (i + 1)) + for _, (k, d) in enumerate(zip(h.resblock_kernel_sizes, h.resblock_dilation_sizes)): + self.resblocks.append(resblock(h, ch, k, d)) + + self.conv_post = weight_norm(Conv1d(ch, 1, 7, 1, padding=3)) + self.ups.apply(init_weights) + self.conv_post.apply(init_weights) + + def forward(self, x): + x = self.conv_pre(x) + for i in range(self.num_upsamples): + x = F.leaky_relu(x, LRELU_SLOPE) + x = self.ups[i](x) + xs = None + for j in range(self.num_kernels): + if xs is None: + xs = self.resblocks[i * self.num_kernels + j](x) + else: + xs += self.resblocks[i * self.num_kernels + j](x) + x = xs / self.num_kernels + x = F.leaky_relu(x) + x = self.conv_post(x) + x = torch.tanh(x) + + return x + + def remove_weight_norm(self): + print("Removing weight norm...") + for l in self.ups: + remove_weight_norm(l) + for l in self.resblocks: + l.remove_weight_norm() + remove_weight_norm(self.conv_pre) + remove_weight_norm(self.conv_post) + + +class DiscriminatorP(torch.nn.Module): + def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): + super().__init__() + self.period = period + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(5, 1), 0))), + norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(2, 0))), + ] + ) + self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) + + def forward(self, x): + fmap = [] + + # 1d to 2d + b, c, t = x.shape + if t % self.period != 0: # pad first + n_pad = self.period - (t % self.period) + x = F.pad(x, (0, n_pad), "reflect") + t = t + n_pad + x = x.view(b, c, t // self.period, self.period) + + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiPeriodDiscriminator(torch.nn.Module): + def __init__(self): + super().__init__() + self.discriminators = nn.ModuleList( + [ + DiscriminatorP(2), + DiscriminatorP(3), + DiscriminatorP(5), + DiscriminatorP(7), + DiscriminatorP(11), + ] + ) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for _, d in enumerate(self.discriminators): + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +class DiscriminatorS(torch.nn.Module): + def __init__(self, use_spectral_norm=False): + super().__init__() + norm_f = weight_norm if use_spectral_norm is False else spectral_norm + self.convs = nn.ModuleList( + [ + norm_f(Conv1d(1, 128, 15, 1, padding=7)), + norm_f(Conv1d(128, 128, 41, 2, groups=4, padding=20)), + norm_f(Conv1d(128, 256, 41, 2, groups=16, padding=20)), + norm_f(Conv1d(256, 512, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(512, 1024, 41, 4, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 41, 1, groups=16, padding=20)), + norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), + ] + ) + self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) + + def forward(self, x): + fmap = [] + for l in self.convs: + x = l(x) + x = F.leaky_relu(x, LRELU_SLOPE) + fmap.append(x) + x = self.conv_post(x) + fmap.append(x) + x = torch.flatten(x, 1, -1) + + return x, fmap + + +class MultiScaleDiscriminator(torch.nn.Module): + def __init__(self): + super().__init__() + self.discriminators = nn.ModuleList( + [ + DiscriminatorS(use_spectral_norm=True), + DiscriminatorS(), + DiscriminatorS(), + ] + ) + self.meanpools = nn.ModuleList([AvgPool1d(4, 2, padding=2), AvgPool1d(4, 2, padding=2)]) + + def forward(self, y, y_hat): + y_d_rs = [] + y_d_gs = [] + fmap_rs = [] + fmap_gs = [] + for i, d in enumerate(self.discriminators): + if i != 0: + y = self.meanpools[i - 1](y) + y_hat = self.meanpools[i - 1](y_hat) + y_d_r, fmap_r = d(y) + y_d_g, fmap_g = d(y_hat) + y_d_rs.append(y_d_r) + fmap_rs.append(fmap_r) + y_d_gs.append(y_d_g) + fmap_gs.append(fmap_g) + + return y_d_rs, y_d_gs, fmap_rs, fmap_gs + + +def feature_loss(fmap_r, fmap_g): + loss = 0 + for dr, dg in zip(fmap_r, fmap_g): + for rl, gl in zip(dr, dg): + loss += torch.mean(torch.abs(rl - gl)) + + return loss * 2 + + +def discriminator_loss(disc_real_outputs, disc_generated_outputs): + loss = 0 + r_losses = [] + g_losses = [] + for dr, dg in zip(disc_real_outputs, disc_generated_outputs): + r_loss = torch.mean((1 - dr) ** 2) + g_loss = torch.mean(dg**2) + loss += r_loss + g_loss + r_losses.append(r_loss.item()) + g_losses.append(g_loss.item()) + + return loss, r_losses, g_losses + + +def generator_loss(disc_outputs): + loss = 0 + gen_losses = [] + for dg in disc_outputs: + l = torch.mean((1 - dg) ** 2) + gen_losses.append(l) + loss += l + + return loss, gen_losses diff --git a/third_party/Matcha-TTS/matcha/hifigan/xutils.py b/third_party/Matcha-TTS/matcha/hifigan/xutils.py new file mode 100644 index 0000000..eefadcb --- /dev/null +++ b/third_party/Matcha-TTS/matcha/hifigan/xutils.py @@ -0,0 +1,60 @@ +""" from https://github.com/jik876/hifi-gan """ + +import glob +import os + +import matplotlib +import torch +from torch.nn.utils import weight_norm + +matplotlib.use("Agg") +import matplotlib.pylab as plt + + +def plot_spectrogram(spectrogram): + fig, ax = plt.subplots(figsize=(10, 2)) + im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") + plt.colorbar(im, ax=ax) + + fig.canvas.draw() + plt.close() + + return fig + + +def init_weights(m, mean=0.0, std=0.01): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + m.weight.data.normal_(mean, std) + + +def apply_weight_norm(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + weight_norm(m) + + +def get_padding(kernel_size, dilation=1): + return int((kernel_size * dilation - dilation) / 2) + + +def load_checkpoint(filepath, device): + assert os.path.isfile(filepath) + print(f"Loading '{filepath}'") + checkpoint_dict = torch.load(filepath, map_location=device) + print("Complete.") + return checkpoint_dict + + +def save_checkpoint(filepath, obj): + print(f"Saving checkpoint to {filepath}") + torch.save(obj, filepath) + print("Complete.") + + +def scan_checkpoint(cp_dir, prefix): + pattern = os.path.join(cp_dir, prefix + "????????") + cp_list = glob.glob(pattern) + if len(cp_list) == 0: + return None + return sorted(cp_list)[-1] diff --git a/third_party/Matcha-TTS/matcha/models/__init__.py b/third_party/Matcha-TTS/matcha/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/matcha/models/baselightningmodule.py b/third_party/Matcha-TTS/matcha/models/baselightningmodule.py new file mode 100644 index 0000000..3724888 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/models/baselightningmodule.py @@ -0,0 +1,209 @@ +""" +This is a base lightning module that can be used to train a model. +The benefit of this abstraction is that all the logic outside of model definition can be reused for different models. +""" +import inspect +from abc import ABC +from typing import Any, Dict + +import torch +from lightning import LightningModule +from lightning.pytorch.utilities import grad_norm + +from matcha import utils +from matcha.utils.utils import plot_tensor + +log = utils.get_pylogger(__name__) + + +class BaseLightningClass(LightningModule, ABC): + def update_data_statistics(self, data_statistics): + if data_statistics is None: + data_statistics = { + "mel_mean": 0.0, + "mel_std": 1.0, + } + + self.register_buffer("mel_mean", torch.tensor(data_statistics["mel_mean"])) + self.register_buffer("mel_std", torch.tensor(data_statistics["mel_std"])) + + def configure_optimizers(self) -> Any: + optimizer = self.hparams.optimizer(params=self.parameters()) + if self.hparams.scheduler not in (None, {}): + scheduler_args = {} + # Manage last epoch for exponential schedulers + if "last_epoch" in inspect.signature(self.hparams.scheduler.scheduler).parameters: + if hasattr(self, "ckpt_loaded_epoch"): + current_epoch = self.ckpt_loaded_epoch - 1 + else: + current_epoch = -1 + + scheduler_args.update({"optimizer": optimizer}) + scheduler = self.hparams.scheduler.scheduler(**scheduler_args) + scheduler.last_epoch = current_epoch + return { + "optimizer": optimizer, + "lr_scheduler": { + "scheduler": scheduler, + "interval": self.hparams.scheduler.lightning_args.interval, + "frequency": self.hparams.scheduler.lightning_args.frequency, + "name": "learning_rate", + }, + } + + return {"optimizer": optimizer} + + def get_losses(self, batch): + x, x_lengths = batch["x"], batch["x_lengths"] + y, y_lengths = batch["y"], batch["y_lengths"] + spks = batch["spks"] + + dur_loss, prior_loss, diff_loss = self( + x=x, + x_lengths=x_lengths, + y=y, + y_lengths=y_lengths, + spks=spks, + out_size=self.out_size, + ) + return { + "dur_loss": dur_loss, + "prior_loss": prior_loss, + "diff_loss": diff_loss, + } + + def on_load_checkpoint(self, checkpoint: Dict[str, Any]) -> None: + self.ckpt_loaded_epoch = checkpoint["epoch"] # pylint: disable=attribute-defined-outside-init + + def training_step(self, batch: Any, batch_idx: int): + loss_dict = self.get_losses(batch) + self.log( + "step", + float(self.global_step), + on_step=True, + prog_bar=True, + logger=True, + sync_dist=True, + ) + + self.log( + "sub_loss/train_dur_loss", + loss_dict["dur_loss"], + on_step=True, + on_epoch=True, + logger=True, + sync_dist=True, + ) + self.log( + "sub_loss/train_prior_loss", + loss_dict["prior_loss"], + on_step=True, + on_epoch=True, + logger=True, + sync_dist=True, + ) + self.log( + "sub_loss/train_diff_loss", + loss_dict["diff_loss"], + on_step=True, + on_epoch=True, + logger=True, + sync_dist=True, + ) + + total_loss = sum(loss_dict.values()) + self.log( + "loss/train", + total_loss, + on_step=True, + on_epoch=True, + logger=True, + prog_bar=True, + sync_dist=True, + ) + + return {"loss": total_loss, "log": loss_dict} + + def validation_step(self, batch: Any, batch_idx: int): + loss_dict = self.get_losses(batch) + self.log( + "sub_loss/val_dur_loss", + loss_dict["dur_loss"], + on_step=True, + on_epoch=True, + logger=True, + sync_dist=True, + ) + self.log( + "sub_loss/val_prior_loss", + loss_dict["prior_loss"], + on_step=True, + on_epoch=True, + logger=True, + sync_dist=True, + ) + self.log( + "sub_loss/val_diff_loss", + loss_dict["diff_loss"], + on_step=True, + on_epoch=True, + logger=True, + sync_dist=True, + ) + + total_loss = sum(loss_dict.values()) + self.log( + "loss/val", + total_loss, + on_step=True, + on_epoch=True, + logger=True, + prog_bar=True, + sync_dist=True, + ) + + return total_loss + + def on_validation_end(self) -> None: + if self.trainer.is_global_zero: + one_batch = next(iter(self.trainer.val_dataloaders)) + if self.current_epoch == 0: + log.debug("Plotting original samples") + for i in range(2): + y = one_batch["y"][i].unsqueeze(0).to(self.device) + self.logger.experiment.add_image( + f"original/{i}", + plot_tensor(y.squeeze().cpu()), + self.current_epoch, + dataformats="HWC", + ) + + log.debug("Synthesising...") + for i in range(2): + x = one_batch["x"][i].unsqueeze(0).to(self.device) + x_lengths = one_batch["x_lengths"][i].unsqueeze(0).to(self.device) + spks = one_batch["spks"][i].unsqueeze(0).to(self.device) if one_batch["spks"] is not None else None + output = self.synthesise(x[:, :x_lengths], x_lengths, n_timesteps=10, spks=spks) + y_enc, y_dec = output["encoder_outputs"], output["decoder_outputs"] + attn = output["attn"] + self.logger.experiment.add_image( + f"generated_enc/{i}", + plot_tensor(y_enc.squeeze().cpu()), + self.current_epoch, + dataformats="HWC", + ) + self.logger.experiment.add_image( + f"generated_dec/{i}", + plot_tensor(y_dec.squeeze().cpu()), + self.current_epoch, + dataformats="HWC", + ) + self.logger.experiment.add_image( + f"alignment/{i}", + plot_tensor(attn.squeeze().cpu()), + self.current_epoch, + dataformats="HWC", + ) + + def on_before_optimizer_step(self, optimizer): + self.log_dict({f"grad_norm/{k}": v for k, v in grad_norm(self, norm_type=2).items()}) diff --git a/third_party/Matcha-TTS/matcha/models/components/__init__.py b/third_party/Matcha-TTS/matcha/models/components/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/matcha/models/components/decoder.py b/third_party/Matcha-TTS/matcha/models/components/decoder.py new file mode 100644 index 0000000..1137cd7 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/models/components/decoder.py @@ -0,0 +1,443 @@ +import math +from typing import Optional + +import torch +import torch.nn as nn +import torch.nn.functional as F +from conformer import ConformerBlock +from diffusers.models.activations import get_activation +from einops import pack, rearrange, repeat + +from matcha.models.components.transformer import BasicTransformerBlock + + +class SinusoidalPosEmb(torch.nn.Module): + def __init__(self, dim): + super().__init__() + self.dim = dim + assert self.dim % 2 == 0, "SinusoidalPosEmb requires dim to be even" + + def forward(self, x, scale=1000): + if x.ndim < 1: + x = x.unsqueeze(0) + device = x.device + half_dim = self.dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, device=device).float() * -emb) + emb = scale * x.unsqueeze(1) * emb.unsqueeze(0) + emb = torch.cat((emb.sin(), emb.cos()), dim=-1) + return emb + + +class Block1D(torch.nn.Module): + def __init__(self, dim, dim_out, groups=8): + super().__init__() + self.block = torch.nn.Sequential( + torch.nn.Conv1d(dim, dim_out, 3, padding=1), + torch.nn.GroupNorm(groups, dim_out), + nn.Mish(), + ) + + def forward(self, x, mask): + output = self.block(x * mask) + return output * mask + + +class ResnetBlock1D(torch.nn.Module): + def __init__(self, dim, dim_out, time_emb_dim, groups=8): + super().__init__() + self.mlp = torch.nn.Sequential(nn.Mish(), torch.nn.Linear(time_emb_dim, dim_out)) + + self.block1 = Block1D(dim, dim_out, groups=groups) + self.block2 = Block1D(dim_out, dim_out, groups=groups) + + self.res_conv = torch.nn.Conv1d(dim, dim_out, 1) + + def forward(self, x, mask, time_emb): + h = self.block1(x, mask) + h += self.mlp(time_emb).unsqueeze(-1) + h = self.block2(h, mask) + output = h + self.res_conv(x * mask) + return output + + +class Downsample1D(nn.Module): + def __init__(self, dim): + super().__init__() + self.conv = torch.nn.Conv1d(dim, dim, 3, 2, 1) + + def forward(self, x): + return self.conv(x) + + +class TimestepEmbedding(nn.Module): + def __init__( + self, + in_channels: int, + time_embed_dim: int, + act_fn: str = "silu", + out_dim: int = None, + post_act_fn: Optional[str] = None, + cond_proj_dim=None, + ): + super().__init__() + + self.linear_1 = nn.Linear(in_channels, time_embed_dim) + + if cond_proj_dim is not None: + self.cond_proj = nn.Linear(cond_proj_dim, in_channels, bias=False) + else: + self.cond_proj = None + + self.act = get_activation(act_fn) + + if out_dim is not None: + time_embed_dim_out = out_dim + else: + time_embed_dim_out = time_embed_dim + self.linear_2 = nn.Linear(time_embed_dim, time_embed_dim_out) + + if post_act_fn is None: + self.post_act = None + else: + self.post_act = get_activation(post_act_fn) + + def forward(self, sample, condition=None): + if condition is not None: + sample = sample + self.cond_proj(condition) + sample = self.linear_1(sample) + + if self.act is not None: + sample = self.act(sample) + + sample = self.linear_2(sample) + + if self.post_act is not None: + sample = self.post_act(sample) + return sample + + +class Upsample1D(nn.Module): + """A 1D upsampling layer with an optional convolution. + + Parameters: + channels (`int`): + number of channels in the inputs and outputs. + use_conv (`bool`, default `False`): + option to use a convolution. + use_conv_transpose (`bool`, default `False`): + option to use a convolution transpose. + out_channels (`int`, optional): + number of output channels. Defaults to `channels`. + """ + + def __init__(self, channels, use_conv=False, use_conv_transpose=True, out_channels=None, name="conv"): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_conv_transpose = use_conv_transpose + self.name = name + + self.conv = None + if use_conv_transpose: + self.conv = nn.ConvTranspose1d(channels, self.out_channels, 4, 2, 1) + elif use_conv: + self.conv = nn.Conv1d(self.channels, self.out_channels, 3, padding=1) + + def forward(self, inputs): + assert inputs.shape[1] == self.channels + if self.use_conv_transpose: + return self.conv(inputs) + + outputs = F.interpolate(inputs, scale_factor=2.0, mode="nearest") + + if self.use_conv: + outputs = self.conv(outputs) + + return outputs + + +class ConformerWrapper(ConformerBlock): + def __init__( # pylint: disable=useless-super-delegation + self, + *, + dim, + dim_head=64, + heads=8, + ff_mult=4, + conv_expansion_factor=2, + conv_kernel_size=31, + attn_dropout=0, + ff_dropout=0, + conv_dropout=0, + conv_causal=False, + ): + super().__init__( + dim=dim, + dim_head=dim_head, + heads=heads, + ff_mult=ff_mult, + conv_expansion_factor=conv_expansion_factor, + conv_kernel_size=conv_kernel_size, + attn_dropout=attn_dropout, + ff_dropout=ff_dropout, + conv_dropout=conv_dropout, + conv_causal=conv_causal, + ) + + def forward( + self, + hidden_states, + attention_mask, + encoder_hidden_states=None, + encoder_attention_mask=None, + timestep=None, + ): + return super().forward(x=hidden_states, mask=attention_mask.bool()) + + +class Decoder(nn.Module): + def __init__( + self, + in_channels, + out_channels, + channels=(256, 256), + dropout=0.05, + attention_head_dim=64, + n_blocks=1, + num_mid_blocks=2, + num_heads=4, + act_fn="snake", + down_block_type="transformer", + mid_block_type="transformer", + up_block_type="transformer", + ): + super().__init__() + channels = tuple(channels) + self.in_channels = in_channels + self.out_channels = out_channels + + self.time_embeddings = SinusoidalPosEmb(in_channels) + time_embed_dim = channels[0] * 4 + self.time_mlp = TimestepEmbedding( + in_channels=in_channels, + time_embed_dim=time_embed_dim, + act_fn="silu", + ) + + self.down_blocks = nn.ModuleList([]) + self.mid_blocks = nn.ModuleList([]) + self.up_blocks = nn.ModuleList([]) + + output_channel = in_channels + for i in range(len(channels)): # pylint: disable=consider-using-enumerate + input_channel = output_channel + output_channel = channels[i] + is_last = i == len(channels) - 1 + resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) + transformer_blocks = nn.ModuleList( + [ + self.get_block( + down_block_type, + output_channel, + attention_head_dim, + num_heads, + dropout, + act_fn, + ) + for _ in range(n_blocks) + ] + ) + downsample = ( + Downsample1D(output_channel) if not is_last else nn.Conv1d(output_channel, output_channel, 3, padding=1) + ) + + self.down_blocks.append(nn.ModuleList([resnet, transformer_blocks, downsample])) + + for i in range(num_mid_blocks): + input_channel = channels[-1] + out_channels = channels[-1] + + resnet = ResnetBlock1D(dim=input_channel, dim_out=output_channel, time_emb_dim=time_embed_dim) + + transformer_blocks = nn.ModuleList( + [ + self.get_block( + mid_block_type, + output_channel, + attention_head_dim, + num_heads, + dropout, + act_fn, + ) + for _ in range(n_blocks) + ] + ) + + self.mid_blocks.append(nn.ModuleList([resnet, transformer_blocks])) + + channels = channels[::-1] + (channels[0],) + for i in range(len(channels) - 1): + input_channel = channels[i] + output_channel = channels[i + 1] + is_last = i == len(channels) - 2 + + resnet = ResnetBlock1D( + dim=2 * input_channel, + dim_out=output_channel, + time_emb_dim=time_embed_dim, + ) + transformer_blocks = nn.ModuleList( + [ + self.get_block( + up_block_type, + output_channel, + attention_head_dim, + num_heads, + dropout, + act_fn, + ) + for _ in range(n_blocks) + ] + ) + upsample = ( + Upsample1D(output_channel, use_conv_transpose=True) + if not is_last + else nn.Conv1d(output_channel, output_channel, 3, padding=1) + ) + + self.up_blocks.append(nn.ModuleList([resnet, transformer_blocks, upsample])) + + self.final_block = Block1D(channels[-1], channels[-1]) + self.final_proj = nn.Conv1d(channels[-1], self.out_channels, 1) + + self.initialize_weights() + # nn.init.normal_(self.final_proj.weight) + + @staticmethod + def get_block(block_type, dim, attention_head_dim, num_heads, dropout, act_fn): + if block_type == "conformer": + block = ConformerWrapper( + dim=dim, + dim_head=attention_head_dim, + heads=num_heads, + ff_mult=1, + conv_expansion_factor=2, + ff_dropout=dropout, + attn_dropout=dropout, + conv_dropout=dropout, + conv_kernel_size=31, + ) + elif block_type == "transformer": + block = BasicTransformerBlock( + dim=dim, + num_attention_heads=num_heads, + attention_head_dim=attention_head_dim, + dropout=dropout, + activation_fn=act_fn, + ) + else: + raise ValueError(f"Unknown block type {block_type}") + + return block + + def initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv1d): + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.GroupNorm): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + elif isinstance(m, nn.Linear): + nn.init.kaiming_normal_(m.weight, nonlinearity="relu") + + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def forward(self, x, mask, mu, t, spks=None, cond=None): + """Forward pass of the UNet1DConditional model. + + Args: + x (torch.Tensor): shape (batch_size, in_channels, time) + mask (_type_): shape (batch_size, 1, time) + t (_type_): shape (batch_size) + spks (_type_, optional): shape: (batch_size, condition_channels). Defaults to None. + cond (_type_, optional): placeholder for future use. Defaults to None. + + Raises: + ValueError: _description_ + ValueError: _description_ + + Returns: + _type_: _description_ + """ + + t = self.time_embeddings(t) + t = self.time_mlp(t) + + x = pack([x, mu], "b * t")[0] + + if spks is not None: + spks = repeat(spks, "b c -> b c t", t=x.shape[-1]) + x = pack([x, spks], "b * t")[0] + + hiddens = [] + masks = [mask] + for resnet, transformer_blocks, downsample in self.down_blocks: + mask_down = masks[-1] + x = resnet(x, mask_down, t) + x = rearrange(x, "b c t -> b t c") + mask_down = rearrange(mask_down, "b 1 t -> b t") + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=mask_down, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t") + mask_down = rearrange(mask_down, "b t -> b 1 t") + hiddens.append(x) # Save hidden states for skip connections + x = downsample(x * mask_down) + masks.append(mask_down[:, :, ::2]) + + masks = masks[:-1] + mask_mid = masks[-1] + + for resnet, transformer_blocks in self.mid_blocks: + x = resnet(x, mask_mid, t) + x = rearrange(x, "b c t -> b t c") + mask_mid = rearrange(mask_mid, "b 1 t -> b t") + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=mask_mid, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t") + mask_mid = rearrange(mask_mid, "b t -> b 1 t") + + for resnet, transformer_blocks, upsample in self.up_blocks: + mask_up = masks.pop() + x = resnet(pack([x, hiddens.pop()], "b * t")[0], mask_up, t) + x = rearrange(x, "b c t -> b t c") + mask_up = rearrange(mask_up, "b 1 t -> b t") + for transformer_block in transformer_blocks: + x = transformer_block( + hidden_states=x, + attention_mask=mask_up, + timestep=t, + ) + x = rearrange(x, "b t c -> b c t") + mask_up = rearrange(mask_up, "b t -> b 1 t") + x = upsample(x * mask_up) + + x = self.final_block(x, mask_up) + output = self.final_proj(x * mask_up) + + return output * mask diff --git a/third_party/Matcha-TTS/matcha/models/components/flow_matching.py b/third_party/Matcha-TTS/matcha/models/components/flow_matching.py new file mode 100644 index 0000000..5cad743 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/models/components/flow_matching.py @@ -0,0 +1,132 @@ +from abc import ABC + +import torch +import torch.nn.functional as F + +from matcha.models.components.decoder import Decoder +from matcha.utils.pylogger import get_pylogger + +log = get_pylogger(__name__) + + +class BASECFM(torch.nn.Module, ABC): + def __init__( + self, + n_feats, + cfm_params, + n_spks=1, + spk_emb_dim=128, + ): + super().__init__() + self.n_feats = n_feats + self.n_spks = n_spks + self.spk_emb_dim = spk_emb_dim + self.solver = cfm_params.solver + if hasattr(cfm_params, "sigma_min"): + self.sigma_min = cfm_params.sigma_min + else: + self.sigma_min = 1e-4 + + self.estimator = None + + @torch.inference_mode() + def forward(self, mu, mask, n_timesteps, temperature=1.0, spks=None, cond=None): + """Forward diffusion + + Args: + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + n_timesteps (int): number of diffusion steps + temperature (float, optional): temperature for scaling noise. Defaults to 1.0. + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + + Returns: + sample: generated mel-spectrogram + shape: (batch_size, n_feats, mel_timesteps) + """ + z = torch.randn_like(mu) * temperature + t_span = torch.linspace(0, 1, n_timesteps + 1, device=mu.device) + return self.solve_euler(z, t_span=t_span, mu=mu, mask=mask, spks=spks, cond=cond) + + def solve_euler(self, x, t_span, mu, mask, spks, cond): + """ + Fixed euler solver for ODEs. + Args: + x (torch.Tensor): random noise + t_span (torch.Tensor): n_timesteps interpolated + shape: (n_timesteps + 1,) + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): output_mask + shape: (batch_size, 1, mel_timesteps) + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size, spk_emb_dim) + cond: Not used but kept for future purposes + """ + t, _, dt = t_span[0], t_span[-1], t_span[1] - t_span[0] + + # I am storing this because I can later plot it by putting a debugger here and saving it to a file + # Or in future might add like a return_all_steps flag + sol = [] + + for step in range(1, len(t_span)): + dphi_dt = self.estimator(x, mask, mu, t, spks, cond) + + x = x + dt * dphi_dt + t = t + dt + sol.append(x) + if step < len(t_span) - 1: + dt = t_span[step + 1] - t + + return sol[-1] + + def compute_loss(self, x1, mask, mu, spks=None, cond=None): + """Computes diffusion loss + + Args: + x1 (torch.Tensor): Target + shape: (batch_size, n_feats, mel_timesteps) + mask (torch.Tensor): target mask + shape: (batch_size, 1, mel_timesteps) + mu (torch.Tensor): output of encoder + shape: (batch_size, n_feats, mel_timesteps) + spks (torch.Tensor, optional): speaker embedding. Defaults to None. + shape: (batch_size, spk_emb_dim) + + Returns: + loss: conditional flow matching loss + y: conditional flow + shape: (batch_size, n_feats, mel_timesteps) + """ + b, _, t = mu.shape + + # random timestep + t = torch.rand([b, 1, 1], device=mu.device, dtype=mu.dtype) + # sample noise p(x_0) + z = torch.randn_like(x1) + + y = (1 - (1 - self.sigma_min) * t) * z + t * x1 + u = x1 - (1 - self.sigma_min) * z + + loss = F.mse_loss(self.estimator(y, mask, mu, t.squeeze(), spks), u, reduction="sum") / ( + torch.sum(mask) * u.shape[1] + ) + return loss, y + + +class CFM(BASECFM): + def __init__(self, in_channels, out_channel, cfm_params, decoder_params, n_spks=1, spk_emb_dim=64): + super().__init__( + n_feats=in_channels, + cfm_params=cfm_params, + n_spks=n_spks, + spk_emb_dim=spk_emb_dim, + ) + + in_channels = in_channels + (spk_emb_dim if n_spks > 1 else 0) + # Just change the architecture of the estimator here + self.estimator = Decoder(in_channels=in_channels, out_channels=out_channel, **decoder_params) diff --git a/third_party/Matcha-TTS/matcha/models/components/text_encoder.py b/third_party/Matcha-TTS/matcha/models/components/text_encoder.py new file mode 100644 index 0000000..a388d05 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/models/components/text_encoder.py @@ -0,0 +1,410 @@ +""" from https://github.com/jaywalnut310/glow-tts """ + +import math + +import torch +import torch.nn as nn +from einops import rearrange + +import matcha.utils as utils +from matcha.utils.model import sequence_mask + +log = utils.get_pylogger(__name__) + + +class LayerNorm(nn.Module): + def __init__(self, channels, eps=1e-4): + super().__init__() + self.channels = channels + self.eps = eps + + self.gamma = torch.nn.Parameter(torch.ones(channels)) + self.beta = torch.nn.Parameter(torch.zeros(channels)) + + def forward(self, x): + n_dims = len(x.shape) + mean = torch.mean(x, 1, keepdim=True) + variance = torch.mean((x - mean) ** 2, 1, keepdim=True) + + x = (x - mean) * torch.rsqrt(variance + self.eps) + + shape = [1, -1] + [1] * (n_dims - 2) + x = x * self.gamma.view(*shape) + self.beta.view(*shape) + return x + + +class ConvReluNorm(nn.Module): + def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): + super().__init__() + self.in_channels = in_channels + self.hidden_channels = hidden_channels + self.out_channels = out_channels + self.kernel_size = kernel_size + self.n_layers = n_layers + self.p_dropout = p_dropout + + self.conv_layers = torch.nn.ModuleList() + self.norm_layers = torch.nn.ModuleList() + self.conv_layers.append(torch.nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size // 2)) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.relu_drop = torch.nn.Sequential(torch.nn.ReLU(), torch.nn.Dropout(p_dropout)) + for _ in range(n_layers - 1): + self.conv_layers.append( + torch.nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size // 2) + ) + self.norm_layers.append(LayerNorm(hidden_channels)) + self.proj = torch.nn.Conv1d(hidden_channels, out_channels, 1) + self.proj.weight.data.zero_() + self.proj.bias.data.zero_() + + def forward(self, x, x_mask): + x_org = x + for i in range(self.n_layers): + x = self.conv_layers[i](x * x_mask) + x = self.norm_layers[i](x) + x = self.relu_drop(x) + x = x_org + self.proj(x) + return x * x_mask + + +class DurationPredictor(nn.Module): + def __init__(self, in_channels, filter_channels, kernel_size, p_dropout): + super().__init__() + self.in_channels = in_channels + self.filter_channels = filter_channels + self.p_dropout = p_dropout + + self.drop = torch.nn.Dropout(p_dropout) + self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_1 = LayerNorm(filter_channels) + self.conv_2 = torch.nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.norm_2 = LayerNorm(filter_channels) + self.proj = torch.nn.Conv1d(filter_channels, 1, 1) + + def forward(self, x, x_mask): + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.norm_1(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + x = torch.relu(x) + x = self.norm_2(x) + x = self.drop(x) + x = self.proj(x * x_mask) + return x * x_mask + + +class RotaryPositionalEmbeddings(nn.Module): + """ + ## RoPE module + + Rotary encoding transforms pairs of features by rotating in the 2D plane. + That is, it organizes the $d$ features as $\frac{d}{2}$ pairs. + Each pair can be considered a coordinate in a 2D plane, and the encoding will rotate it + by an angle depending on the position of the token. + """ + + def __init__(self, d: int, base: int = 10_000): + r""" + * `d` is the number of features $d$ + * `base` is the constant used for calculating $\Theta$ + """ + super().__init__() + + self.base = base + self.d = int(d) + self.cos_cached = None + self.sin_cached = None + + def _build_cache(self, x: torch.Tensor): + r""" + Cache $\cos$ and $\sin$ values + """ + # Return if cache is already built + if self.cos_cached is not None and x.shape[0] <= self.cos_cached.shape[0]: + return + + # Get sequence length + seq_len = x.shape[0] + + # $\Theta = {\theta_i = 10000^{-\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$ + theta = 1.0 / (self.base ** (torch.arange(0, self.d, 2).float() / self.d)).to(x.device) + + # Create position indexes `[0, 1, ..., seq_len - 1]` + seq_idx = torch.arange(seq_len, device=x.device).float().to(x.device) + + # Calculate the product of position index and $\theta_i$ + idx_theta = torch.einsum("n,d->nd", seq_idx, theta) + + # Concatenate so that for row $m$ we have + # $[m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}, m \theta_0, m \theta_1, ..., m \theta_{\frac{d}{2}}]$ + idx_theta2 = torch.cat([idx_theta, idx_theta], dim=1) + + # Cache them + self.cos_cached = idx_theta2.cos()[:, None, None, :] + self.sin_cached = idx_theta2.sin()[:, None, None, :] + + def _neg_half(self, x: torch.Tensor): + # $\frac{d}{2}$ + d_2 = self.d // 2 + + # Calculate $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$ + return torch.cat([-x[:, :, :, d_2:], x[:, :, :, :d_2]], dim=-1) + + def forward(self, x: torch.Tensor): + """ + * `x` is the Tensor at the head of a key or a query with shape `[seq_len, batch_size, n_heads, d]` + """ + # Cache $\cos$ and $\sin$ values + x = rearrange(x, "b h t d -> t b h d") + + self._build_cache(x) + + # Split the features, we can choose to apply rotary embeddings only to a partial set of features. + x_rope, x_pass = x[..., : self.d], x[..., self.d :] + + # Calculate + # $[-x^{(\frac{d}{2} + 1)}, -x^{(\frac{d}{2} + 2)}, ..., -x^{(d)}, x^{(1)}, x^{(2)}, ..., x^{(\frac{d}{2})}]$ + neg_half_x = self._neg_half(x_rope) + + x_rope = (x_rope * self.cos_cached[: x.shape[0]]) + (neg_half_x * self.sin_cached[: x.shape[0]]) + + return rearrange(torch.cat((x_rope, x_pass), dim=-1), "t b h d -> b h t d") + + +class MultiHeadAttention(nn.Module): + def __init__( + self, + channels, + out_channels, + n_heads, + heads_share=True, + p_dropout=0.0, + proximal_bias=False, + proximal_init=False, + ): + super().__init__() + assert channels % n_heads == 0 + + self.channels = channels + self.out_channels = out_channels + self.n_heads = n_heads + self.heads_share = heads_share + self.proximal_bias = proximal_bias + self.p_dropout = p_dropout + self.attn = None + + self.k_channels = channels // n_heads + self.conv_q = torch.nn.Conv1d(channels, channels, 1) + self.conv_k = torch.nn.Conv1d(channels, channels, 1) + self.conv_v = torch.nn.Conv1d(channels, channels, 1) + + # from https://nn.labml.ai/transformers/rope/index.html + self.query_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5) + self.key_rotary_pe = RotaryPositionalEmbeddings(self.k_channels * 0.5) + + self.conv_o = torch.nn.Conv1d(channels, out_channels, 1) + self.drop = torch.nn.Dropout(p_dropout) + + torch.nn.init.xavier_uniform_(self.conv_q.weight) + torch.nn.init.xavier_uniform_(self.conv_k.weight) + if proximal_init: + self.conv_k.weight.data.copy_(self.conv_q.weight.data) + self.conv_k.bias.data.copy_(self.conv_q.bias.data) + torch.nn.init.xavier_uniform_(self.conv_v.weight) + + def forward(self, x, c, attn_mask=None): + q = self.conv_q(x) + k = self.conv_k(c) + v = self.conv_v(c) + + x, self.attn = self.attention(q, k, v, mask=attn_mask) + + x = self.conv_o(x) + return x + + def attention(self, query, key, value, mask=None): + b, d, t_s, t_t = (*key.size(), query.size(2)) + query = rearrange(query, "b (h c) t-> b h t c", h=self.n_heads) + key = rearrange(key, "b (h c) t-> b h t c", h=self.n_heads) + value = rearrange(value, "b (h c) t-> b h t c", h=self.n_heads) + + query = self.query_rotary_pe(query) + key = self.key_rotary_pe(key) + + scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(self.k_channels) + + if self.proximal_bias: + assert t_s == t_t, "Proximal bias is only available for self-attention." + scores = scores + self._attention_bias_proximal(t_s).to(device=scores.device, dtype=scores.dtype) + if mask is not None: + scores = scores.masked_fill(mask == 0, -1e4) + p_attn = torch.nn.functional.softmax(scores, dim=-1) + p_attn = self.drop(p_attn) + output = torch.matmul(p_attn, value) + output = output.transpose(2, 3).contiguous().view(b, d, t_t) + return output, p_attn + + @staticmethod + def _attention_bias_proximal(length): + r = torch.arange(length, dtype=torch.float32) + diff = torch.unsqueeze(r, 0) - torch.unsqueeze(r, 1) + return torch.unsqueeze(torch.unsqueeze(-torch.log1p(torch.abs(diff)), 0), 0) + + +class FFN(nn.Module): + def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0.0): + super().__init__() + self.in_channels = in_channels + self.out_channels = out_channels + self.filter_channels = filter_channels + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.conv_1 = torch.nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size // 2) + self.conv_2 = torch.nn.Conv1d(filter_channels, out_channels, kernel_size, padding=kernel_size // 2) + self.drop = torch.nn.Dropout(p_dropout) + + def forward(self, x, x_mask): + x = self.conv_1(x * x_mask) + x = torch.relu(x) + x = self.drop(x) + x = self.conv_2(x * x_mask) + return x * x_mask + + +class Encoder(nn.Module): + def __init__( + self, + hidden_channels, + filter_channels, + n_heads, + n_layers, + kernel_size=1, + p_dropout=0.0, + **kwargs, + ): + super().__init__() + self.hidden_channels = hidden_channels + self.filter_channels = filter_channels + self.n_heads = n_heads + self.n_layers = n_layers + self.kernel_size = kernel_size + self.p_dropout = p_dropout + + self.drop = torch.nn.Dropout(p_dropout) + self.attn_layers = torch.nn.ModuleList() + self.norm_layers_1 = torch.nn.ModuleList() + self.ffn_layers = torch.nn.ModuleList() + self.norm_layers_2 = torch.nn.ModuleList() + for _ in range(self.n_layers): + self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) + self.norm_layers_1.append(LayerNorm(hidden_channels)) + self.ffn_layers.append( + FFN( + hidden_channels, + hidden_channels, + filter_channels, + kernel_size, + p_dropout=p_dropout, + ) + ) + self.norm_layers_2.append(LayerNorm(hidden_channels)) + + def forward(self, x, x_mask): + attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) + for i in range(self.n_layers): + x = x * x_mask + y = self.attn_layers[i](x, x, attn_mask) + y = self.drop(y) + x = self.norm_layers_1[i](x + y) + y = self.ffn_layers[i](x, x_mask) + y = self.drop(y) + x = self.norm_layers_2[i](x + y) + x = x * x_mask + return x + + +class TextEncoder(nn.Module): + def __init__( + self, + encoder_type, + encoder_params, + duration_predictor_params, + n_vocab, + n_spks=1, + spk_emb_dim=128, + ): + super().__init__() + self.encoder_type = encoder_type + self.n_vocab = n_vocab + self.n_feats = encoder_params.n_feats + self.n_channels = encoder_params.n_channels + self.spk_emb_dim = spk_emb_dim + self.n_spks = n_spks + + self.emb = torch.nn.Embedding(n_vocab, self.n_channels) + torch.nn.init.normal_(self.emb.weight, 0.0, self.n_channels**-0.5) + + if encoder_params.prenet: + self.prenet = ConvReluNorm( + self.n_channels, + self.n_channels, + self.n_channels, + kernel_size=5, + n_layers=3, + p_dropout=0.5, + ) + else: + self.prenet = lambda x, x_mask: x + + self.encoder = Encoder( + encoder_params.n_channels + (spk_emb_dim if n_spks > 1 else 0), + encoder_params.filter_channels, + encoder_params.n_heads, + encoder_params.n_layers, + encoder_params.kernel_size, + encoder_params.p_dropout, + ) + + self.proj_m = torch.nn.Conv1d(self.n_channels + (spk_emb_dim if n_spks > 1 else 0), self.n_feats, 1) + self.proj_w = DurationPredictor( + self.n_channels + (spk_emb_dim if n_spks > 1 else 0), + duration_predictor_params.filter_channels_dp, + duration_predictor_params.kernel_size, + duration_predictor_params.p_dropout, + ) + + def forward(self, x, x_lengths, spks=None): + """Run forward pass to the transformer based encoder and duration predictor + + Args: + x (torch.Tensor): text input + shape: (batch_size, max_text_length) + x_lengths (torch.Tensor): text input lengths + shape: (batch_size,) + spks (torch.Tensor, optional): speaker ids. Defaults to None. + shape: (batch_size,) + + Returns: + mu (torch.Tensor): average output of the encoder + shape: (batch_size, n_feats, max_text_length) + logw (torch.Tensor): log duration predicted by the duration predictor + shape: (batch_size, 1, max_text_length) + x_mask (torch.Tensor): mask for the text input + shape: (batch_size, 1, max_text_length) + """ + x = self.emb(x) * math.sqrt(self.n_channels) + x = torch.transpose(x, 1, -1) + x_mask = torch.unsqueeze(sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) + + x = self.prenet(x, x_mask) + if self.n_spks > 1: + x = torch.cat([x, spks.unsqueeze(-1).repeat(1, 1, x.shape[-1])], dim=1) + x = self.encoder(x, x_mask) + mu = self.proj_m(x) * x_mask + + x_dp = torch.detach(x) + logw = self.proj_w(x_dp, x_mask) + + return mu, logw, x_mask diff --git a/third_party/Matcha-TTS/matcha/models/components/transformer.py b/third_party/Matcha-TTS/matcha/models/components/transformer.py new file mode 100644 index 0000000..dd1afa3 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/models/components/transformer.py @@ -0,0 +1,316 @@ +from typing import Any, Dict, Optional + +import torch +import torch.nn as nn +from diffusers.models.attention import ( + GEGLU, + GELU, + AdaLayerNorm, + AdaLayerNormZero, + ApproximateGELU, +) +from diffusers.models.attention_processor import Attention +from diffusers.models.lora import LoRACompatibleLinear +from diffusers.utils.torch_utils import maybe_allow_in_graph + + +class SnakeBeta(nn.Module): + """ + A modified Snake function which uses separate parameters for the magnitude of the periodic components + Shape: + - Input: (B, C, T) + - Output: (B, C, T), same shape as the input + Parameters: + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + References: + - This activation function is a modified version based on this paper by Liu Ziyin, Tilman Hartwig, Masahito Ueda: + https://arxiv.org/abs/2006.08195 + Examples: + >>> a1 = snakebeta(256) + >>> x = torch.randn(256) + >>> x = a1(x) + """ + + def __init__(self, in_features, out_features, alpha=1.0, alpha_trainable=True, alpha_logscale=True): + """ + Initialization. + INPUT: + - in_features: shape of the input + - alpha - trainable parameter that controls frequency + - beta - trainable parameter that controls magnitude + alpha is initialized to 1 by default, higher values = higher-frequency. + beta is initialized to 1 by default, higher values = higher-magnitude. + alpha will be trained along with the rest of your model. + """ + super().__init__() + self.in_features = out_features if isinstance(out_features, list) else [out_features] + self.proj = LoRACompatibleLinear(in_features, out_features) + + # initialize alpha + self.alpha_logscale = alpha_logscale + if self.alpha_logscale: # log scale alphas initialized to zeros + self.alpha = nn.Parameter(torch.zeros(self.in_features) * alpha) + self.beta = nn.Parameter(torch.zeros(self.in_features) * alpha) + else: # linear scale alphas initialized to ones + self.alpha = nn.Parameter(torch.ones(self.in_features) * alpha) + self.beta = nn.Parameter(torch.ones(self.in_features) * alpha) + + self.alpha.requires_grad = alpha_trainable + self.beta.requires_grad = alpha_trainable + + self.no_div_by_zero = 0.000000001 + + def forward(self, x): + """ + Forward pass of the function. + Applies the function to the input elementwise. + SnakeBeta ∶= x + 1/b * sin^2 (xa) + """ + x = self.proj(x) + if self.alpha_logscale: + alpha = torch.exp(self.alpha) + beta = torch.exp(self.beta) + else: + alpha = self.alpha + beta = self.beta + + x = x + (1.0 / (beta + self.no_div_by_zero)) * torch.pow(torch.sin(x * alpha), 2) + + return x + + +class FeedForward(nn.Module): + r""" + A feed-forward layer. + + Parameters: + dim (`int`): The number of channels in the input. + dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`. + mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + final_dropout (`bool` *optional*, defaults to False): Apply a final dropout. + """ + + def __init__( + self, + dim: int, + dim_out: Optional[int] = None, + mult: int = 4, + dropout: float = 0.0, + activation_fn: str = "geglu", + final_dropout: bool = False, + ): + super().__init__() + inner_dim = int(dim * mult) + dim_out = dim_out if dim_out is not None else dim + + if activation_fn == "gelu": + act_fn = GELU(dim, inner_dim) + if activation_fn == "gelu-approximate": + act_fn = GELU(dim, inner_dim, approximate="tanh") + elif activation_fn == "geglu": + act_fn = GEGLU(dim, inner_dim) + elif activation_fn == "geglu-approximate": + act_fn = ApproximateGELU(dim, inner_dim) + elif activation_fn == "snakebeta": + act_fn = SnakeBeta(dim, inner_dim) + + self.net = nn.ModuleList([]) + # project in + self.net.append(act_fn) + # project dropout + self.net.append(nn.Dropout(dropout)) + # project out + self.net.append(LoRACompatibleLinear(inner_dim, dim_out)) + # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout + if final_dropout: + self.net.append(nn.Dropout(dropout)) + + def forward(self, hidden_states): + for module in self.net: + hidden_states = module(hidden_states) + return hidden_states + + +@maybe_allow_in_graph +class BasicTransformerBlock(nn.Module): + r""" + A basic Transformer block. + + Parameters: + dim (`int`): The number of channels in the input and output. + num_attention_heads (`int`): The number of heads to use for multi-head attention. + attention_head_dim (`int`): The number of channels in each head. + dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. + cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention. + only_cross_attention (`bool`, *optional*): + Whether to use only cross-attention layers. In this case two cross attention layers are used. + double_self_attention (`bool`, *optional*): + Whether to use two self-attention layers. In this case no cross attention layers are used. + activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. + num_embeds_ada_norm (: + obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`. + attention_bias (: + obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter. + """ + + def __init__( + self, + dim: int, + num_attention_heads: int, + attention_head_dim: int, + dropout=0.0, + cross_attention_dim: Optional[int] = None, + activation_fn: str = "geglu", + num_embeds_ada_norm: Optional[int] = None, + attention_bias: bool = False, + only_cross_attention: bool = False, + double_self_attention: bool = False, + upcast_attention: bool = False, + norm_elementwise_affine: bool = True, + norm_type: str = "layer_norm", + final_dropout: bool = False, + ): + super().__init__() + self.only_cross_attention = only_cross_attention + + self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero" + self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm" + + if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: + raise ValueError( + f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to" + f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}." + ) + + # Define 3 blocks. Each block has its own normalization layer. + # 1. Self-Attn + if self.use_ada_layer_norm: + self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm) + elif self.use_ada_layer_norm_zero: + self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm) + else: + self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + self.attn1 = Attention( + query_dim=dim, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + cross_attention_dim=cross_attention_dim if only_cross_attention else None, + upcast_attention=upcast_attention, + ) + + # 2. Cross-Attn + if cross_attention_dim is not None or double_self_attention: + # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. + # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during + # the second cross attention block. + self.norm2 = ( + AdaLayerNorm(dim, num_embeds_ada_norm) + if self.use_ada_layer_norm + else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + ) + self.attn2 = Attention( + query_dim=dim, + cross_attention_dim=cross_attention_dim if not double_self_attention else None, + heads=num_attention_heads, + dim_head=attention_head_dim, + dropout=dropout, + bias=attention_bias, + upcast_attention=upcast_attention, + # scale_qk=False, # uncomment this to not to use flash attention + ) # is self-attn if encoder_hidden_states is none + else: + self.norm2 = None + self.attn2 = None + + # 3. Feed-forward + self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine) + self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout) + + # let chunk size default to None + self._chunk_size = None + self._chunk_dim = 0 + + def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int): + # Sets chunk feed-forward + self._chunk_size = chunk_size + self._chunk_dim = dim + + def forward( + self, + hidden_states: torch.FloatTensor, + attention_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.FloatTensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + timestep: Optional[torch.LongTensor] = None, + cross_attention_kwargs: Dict[str, Any] = None, + class_labels: Optional[torch.LongTensor] = None, + ): + # Notice that normalization is always applied before the real computation in the following blocks. + # 1. Self-Attention + if self.use_ada_layer_norm: + norm_hidden_states = self.norm1(hidden_states, timestep) + elif self.use_ada_layer_norm_zero: + norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1( + hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype + ) + else: + norm_hidden_states = self.norm1(hidden_states) + + cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {} + + attn_output = self.attn1( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None, + attention_mask=encoder_attention_mask if self.only_cross_attention else attention_mask, + **cross_attention_kwargs, + ) + if self.use_ada_layer_norm_zero: + attn_output = gate_msa.unsqueeze(1) * attn_output + hidden_states = attn_output + hidden_states + + # 2. Cross-Attention + if self.attn2 is not None: + norm_hidden_states = ( + self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states) + ) + + attn_output = self.attn2( + norm_hidden_states, + encoder_hidden_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + **cross_attention_kwargs, + ) + hidden_states = attn_output + hidden_states + + # 3. Feed-forward + norm_hidden_states = self.norm3(hidden_states) + + if self.use_ada_layer_norm_zero: + norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] + + if self._chunk_size is not None: + # "feed_forward_chunk_size" can be used to save memory + if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: + raise ValueError( + f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`." + ) + + num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size + ff_output = torch.cat( + [self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)], + dim=self._chunk_dim, + ) + else: + ff_output = self.ff(norm_hidden_states) + + if self.use_ada_layer_norm_zero: + ff_output = gate_mlp.unsqueeze(1) * ff_output + + hidden_states = ff_output + hidden_states + + return hidden_states diff --git a/third_party/Matcha-TTS/matcha/models/matcha_tts.py b/third_party/Matcha-TTS/matcha/models/matcha_tts.py new file mode 100644 index 0000000..64b2c07 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/models/matcha_tts.py @@ -0,0 +1,239 @@ +import datetime as dt +import math +import random + +import torch + +import matcha.utils.monotonic_align as monotonic_align +from matcha import utils +from matcha.models.baselightningmodule import BaseLightningClass +from matcha.models.components.flow_matching import CFM +from matcha.models.components.text_encoder import TextEncoder +from matcha.utils.model import ( + denormalize, + duration_loss, + fix_len_compatibility, + generate_path, + sequence_mask, +) + +log = utils.get_pylogger(__name__) + + +class MatchaTTS(BaseLightningClass): # 🍵 + def __init__( + self, + n_vocab, + n_spks, + spk_emb_dim, + n_feats, + encoder, + decoder, + cfm, + data_statistics, + out_size, + optimizer=None, + scheduler=None, + prior_loss=True, + ): + super().__init__() + + self.save_hyperparameters(logger=False) + + self.n_vocab = n_vocab + self.n_spks = n_spks + self.spk_emb_dim = spk_emb_dim + self.n_feats = n_feats + self.out_size = out_size + self.prior_loss = prior_loss + + if n_spks > 1: + self.spk_emb = torch.nn.Embedding(n_spks, spk_emb_dim) + + self.encoder = TextEncoder( + encoder.encoder_type, + encoder.encoder_params, + encoder.duration_predictor_params, + n_vocab, + n_spks, + spk_emb_dim, + ) + + self.decoder = CFM( + in_channels=2 * encoder.encoder_params.n_feats, + out_channel=encoder.encoder_params.n_feats, + cfm_params=cfm, + decoder_params=decoder, + n_spks=n_spks, + spk_emb_dim=spk_emb_dim, + ) + + self.update_data_statistics(data_statistics) + + @torch.inference_mode() + def synthesise(self, x, x_lengths, n_timesteps, temperature=1.0, spks=None, length_scale=1.0): + """ + Generates mel-spectrogram from text. Returns: + 1. encoder outputs + 2. decoder outputs + 3. generated alignment + + Args: + x (torch.Tensor): batch of texts, converted to a tensor with phoneme embedding ids. + shape: (batch_size, max_text_length) + x_lengths (torch.Tensor): lengths of texts in batch. + shape: (batch_size,) + n_timesteps (int): number of steps to use for reverse diffusion in decoder. + temperature (float, optional): controls variance of terminal distribution. + spks (bool, optional): speaker ids. + shape: (batch_size,) + length_scale (float, optional): controls speech pace. + Increase value to slow down generated speech and vice versa. + + Returns: + dict: { + "encoder_outputs": torch.Tensor, shape: (batch_size, n_feats, max_mel_length), + # Average mel spectrogram generated by the encoder + "decoder_outputs": torch.Tensor, shape: (batch_size, n_feats, max_mel_length), + # Refined mel spectrogram improved by the CFM + "attn": torch.Tensor, shape: (batch_size, max_text_length, max_mel_length), + # Alignment map between text and mel spectrogram + "mel": torch.Tensor, shape: (batch_size, n_feats, max_mel_length), + # Denormalized mel spectrogram + "mel_lengths": torch.Tensor, shape: (batch_size,), + # Lengths of mel spectrograms + "rtf": float, + # Real-time factor + """ + # For RTF computation + t = dt.datetime.now() + + if self.n_spks > 1: + # Get speaker embedding + spks = self.spk_emb(spks.long()) + + # Get encoder_outputs `mu_x` and log-scaled token durations `logw` + mu_x, logw, x_mask = self.encoder(x, x_lengths, spks) + + w = torch.exp(logw) * x_mask + w_ceil = torch.ceil(w) * length_scale + y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() + y_max_length = y_lengths.max() + y_max_length_ = fix_len_compatibility(y_max_length) + + # Using obtained durations `w` construct alignment map `attn` + y_mask = sequence_mask(y_lengths, y_max_length_).unsqueeze(1).to(x_mask.dtype) + attn_mask = x_mask.unsqueeze(-1) * y_mask.unsqueeze(2) + attn = generate_path(w_ceil.squeeze(1), attn_mask.squeeze(1)).unsqueeze(1) + + # Align encoded text and get mu_y + mu_y = torch.matmul(attn.squeeze(1).transpose(1, 2), mu_x.transpose(1, 2)) + mu_y = mu_y.transpose(1, 2) + encoder_outputs = mu_y[:, :, :y_max_length] + + # Generate sample tracing the probability flow + decoder_outputs = self.decoder(mu_y, y_mask, n_timesteps, temperature, spks) + decoder_outputs = decoder_outputs[:, :, :y_max_length] + + t = (dt.datetime.now() - t).total_seconds() + rtf = t * 22050 / (decoder_outputs.shape[-1] * 256) + + return { + "encoder_outputs": encoder_outputs, + "decoder_outputs": decoder_outputs, + "attn": attn[:, :, :y_max_length], + "mel": denormalize(decoder_outputs, self.mel_mean, self.mel_std), + "mel_lengths": y_lengths, + "rtf": rtf, + } + + def forward(self, x, x_lengths, y, y_lengths, spks=None, out_size=None, cond=None): + """ + Computes 3 losses: + 1. duration loss: loss between predicted token durations and those extracted by Monotinic Alignment Search (MAS). + 2. prior loss: loss between mel-spectrogram and encoder outputs. + 3. flow matching loss: loss between mel-spectrogram and decoder outputs. + + Args: + x (torch.Tensor): batch of texts, converted to a tensor with phoneme embedding ids. + shape: (batch_size, max_text_length) + x_lengths (torch.Tensor): lengths of texts in batch. + shape: (batch_size,) + y (torch.Tensor): batch of corresponding mel-spectrograms. + shape: (batch_size, n_feats, max_mel_length) + y_lengths (torch.Tensor): lengths of mel-spectrograms in batch. + shape: (batch_size,) + out_size (int, optional): length (in mel's sampling rate) of segment to cut, on which decoder will be trained. + Should be divisible by 2^{num of UNet downsamplings}. Needed to increase batch size. + spks (torch.Tensor, optional): speaker ids. + shape: (batch_size,) + """ + if self.n_spks > 1: + # Get speaker embedding + spks = self.spk_emb(spks) + + # Get encoder_outputs `mu_x` and log-scaled token durations `logw` + mu_x, logw, x_mask = self.encoder(x, x_lengths, spks) + y_max_length = y.shape[-1] + + y_mask = sequence_mask(y_lengths, y_max_length).unsqueeze(1).to(x_mask) + attn_mask = x_mask.unsqueeze(-1) * y_mask.unsqueeze(2) + + # Use MAS to find most likely alignment `attn` between text and mel-spectrogram + with torch.no_grad(): + const = -0.5 * math.log(2 * math.pi) * self.n_feats + factor = -0.5 * torch.ones(mu_x.shape, dtype=mu_x.dtype, device=mu_x.device) + y_square = torch.matmul(factor.transpose(1, 2), y**2) + y_mu_double = torch.matmul(2.0 * (factor * mu_x).transpose(1, 2), y) + mu_square = torch.sum(factor * (mu_x**2), 1).unsqueeze(-1) + log_prior = y_square - y_mu_double + mu_square + const + + attn = monotonic_align.maximum_path(log_prior, attn_mask.squeeze(1)) + attn = attn.detach() + + # Compute loss between predicted log-scaled durations and those obtained from MAS + # refered to as prior loss in the paper + logw_ = torch.log(1e-8 + torch.sum(attn.unsqueeze(1), -1)) * x_mask + dur_loss = duration_loss(logw, logw_, x_lengths) + + # Cut a small segment of mel-spectrogram in order to increase batch size + # - "Hack" taken from Grad-TTS, in case of Grad-TTS, we cannot train batch size 32 on a 24GB GPU without it + # - Do not need this hack for Matcha-TTS, but it works with it as well + if not isinstance(out_size, type(None)): + max_offset = (y_lengths - out_size).clamp(0) + offset_ranges = list(zip([0] * max_offset.shape[0], max_offset.cpu().numpy())) + out_offset = torch.LongTensor( + [torch.tensor(random.choice(range(start, end)) if end > start else 0) for start, end in offset_ranges] + ).to(y_lengths) + attn_cut = torch.zeros(attn.shape[0], attn.shape[1], out_size, dtype=attn.dtype, device=attn.device) + y_cut = torch.zeros(y.shape[0], self.n_feats, out_size, dtype=y.dtype, device=y.device) + + y_cut_lengths = [] + for i, (y_, out_offset_) in enumerate(zip(y, out_offset)): + y_cut_length = out_size + (y_lengths[i] - out_size).clamp(None, 0) + y_cut_lengths.append(y_cut_length) + cut_lower, cut_upper = out_offset_, out_offset_ + y_cut_length + y_cut[i, :, :y_cut_length] = y_[:, cut_lower:cut_upper] + attn_cut[i, :, :y_cut_length] = attn[i, :, cut_lower:cut_upper] + + y_cut_lengths = torch.LongTensor(y_cut_lengths) + y_cut_mask = sequence_mask(y_cut_lengths).unsqueeze(1).to(y_mask) + + attn = attn_cut + y = y_cut + y_mask = y_cut_mask + + # Align encoded text with mel-spectrogram and get mu_y segment + mu_y = torch.matmul(attn.squeeze(1).transpose(1, 2), mu_x.transpose(1, 2)) + mu_y = mu_y.transpose(1, 2) + + # Compute loss of the decoder + diff_loss, _ = self.decoder.compute_loss(x1=y, mask=y_mask, mu=mu_y, spks=spks, cond=cond) + + if self.prior_loss: + prior_loss = torch.sum(0.5 * ((y - mu_y) ** 2 + math.log(2 * math.pi)) * y_mask) + prior_loss = prior_loss / (torch.sum(y_mask) * self.n_feats) + else: + prior_loss = 0 + + return dur_loss, prior_loss, diff_loss diff --git a/third_party/Matcha-TTS/matcha/onnx/__init__.py b/third_party/Matcha-TTS/matcha/onnx/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/matcha/onnx/export.py b/third_party/Matcha-TTS/matcha/onnx/export.py new file mode 100644 index 0000000..9b79508 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/onnx/export.py @@ -0,0 +1,181 @@ +import argparse +import random +from pathlib import Path + +import numpy as np +import torch +from lightning import LightningModule + +from matcha.cli import VOCODER_URLS, load_matcha, load_vocoder + +DEFAULT_OPSET = 15 + +SEED = 1234 +random.seed(SEED) +np.random.seed(SEED) +torch.manual_seed(SEED) +torch.cuda.manual_seed(SEED) +torch.backends.cudnn.deterministic = True +torch.backends.cudnn.benchmark = False + + +class MatchaWithVocoder(LightningModule): + def __init__(self, matcha, vocoder): + super().__init__() + self.matcha = matcha + self.vocoder = vocoder + + def forward(self, x, x_lengths, scales, spks=None): + mel, mel_lengths = self.matcha(x, x_lengths, scales, spks) + wavs = self.vocoder(mel).clamp(-1, 1) + lengths = mel_lengths * 256 + return wavs.squeeze(1), lengths + + +def get_exportable_module(matcha, vocoder, n_timesteps): + """ + Return an appropriate `LighteningModule` and output-node names + based on whether the vocoder is embedded in the final graph + """ + + def onnx_forward_func(x, x_lengths, scales, spks=None): + """ + Custom forward function for accepting + scaler parameters as tensors + """ + # Extract scaler parameters from tensors + temperature = scales[0] + length_scale = scales[1] + output = matcha.synthesise(x, x_lengths, n_timesteps, temperature, spks, length_scale) + return output["mel"], output["mel_lengths"] + + # Monkey-patch Matcha's forward function + matcha.forward = onnx_forward_func + + if vocoder is None: + model, output_names = matcha, ["mel", "mel_lengths"] + else: + model = MatchaWithVocoder(matcha, vocoder) + output_names = ["wav", "wav_lengths"] + return model, output_names + + +def get_inputs(is_multi_speaker): + """ + Create dummy inputs for tracing + """ + dummy_input_length = 50 + x = torch.randint(low=0, high=20, size=(1, dummy_input_length), dtype=torch.long) + x_lengths = torch.LongTensor([dummy_input_length]) + + # Scales + temperature = 0.667 + length_scale = 1.0 + scales = torch.Tensor([temperature, length_scale]) + + model_inputs = [x, x_lengths, scales] + input_names = [ + "x", + "x_lengths", + "scales", + ] + + if is_multi_speaker: + spks = torch.LongTensor([1]) + model_inputs.append(spks) + input_names.append("spks") + + return tuple(model_inputs), input_names + + +def main(): + parser = argparse.ArgumentParser(description="Export 🍵 Matcha-TTS to ONNX") + + parser.add_argument( + "checkpoint_path", + type=str, + help="Path to the model checkpoint", + ) + parser.add_argument("output", type=str, help="Path to output `.onnx` file") + parser.add_argument( + "--n-timesteps", type=int, default=5, help="Number of steps to use for reverse diffusion in decoder (default 5)" + ) + parser.add_argument( + "--vocoder-name", + type=str, + choices=list(VOCODER_URLS.keys()), + default=None, + help="Name of the vocoder to embed in the ONNX graph", + ) + parser.add_argument( + "--vocoder-checkpoint-path", + type=str, + default=None, + help="Vocoder checkpoint to embed in the ONNX graph for an `e2e` like experience", + ) + parser.add_argument("--opset", type=int, default=DEFAULT_OPSET, help="ONNX opset version to use (default 15") + + args = parser.parse_args() + + print(f"[🍵] Loading Matcha checkpoint from {args.checkpoint_path}") + print(f"Setting n_timesteps to {args.n_timesteps}") + + checkpoint_path = Path(args.checkpoint_path) + matcha = load_matcha(checkpoint_path.stem, checkpoint_path, "cpu") + + if args.vocoder_name or args.vocoder_checkpoint_path: + assert ( + args.vocoder_name and args.vocoder_checkpoint_path + ), "Both vocoder_name and vocoder-checkpoint are required when embedding the vocoder in the ONNX graph." + vocoder, _ = load_vocoder(args.vocoder_name, args.vocoder_checkpoint_path, "cpu") + else: + vocoder = None + + is_multi_speaker = matcha.n_spks > 1 + + dummy_input, input_names = get_inputs(is_multi_speaker) + model, output_names = get_exportable_module(matcha, vocoder, args.n_timesteps) + + # Set dynamic shape for inputs/outputs + dynamic_axes = { + "x": {0: "batch_size", 1: "time"}, + "x_lengths": {0: "batch_size"}, + } + + if vocoder is None: + dynamic_axes.update( + { + "mel": {0: "batch_size", 2: "time"}, + "mel_lengths": {0: "batch_size"}, + } + ) + else: + print("Embedding the vocoder in the ONNX graph") + dynamic_axes.update( + { + "wav": {0: "batch_size", 1: "time"}, + "wav_lengths": {0: "batch_size"}, + } + ) + + if is_multi_speaker: + dynamic_axes["spks"] = {0: "batch_size"} + + # Create the output directory (if not exists) + Path(args.output).parent.mkdir(parents=True, exist_ok=True) + + model.to_onnx( + args.output, + dummy_input, + input_names=input_names, + output_names=output_names, + dynamic_axes=dynamic_axes, + opset_version=args.opset, + export_params=True, + do_constant_folding=True, + ) + print(f"[🍵] ONNX model exported to {args.output}") + + +if __name__ == "__main__": + main() diff --git a/third_party/Matcha-TTS/matcha/onnx/infer.py b/third_party/Matcha-TTS/matcha/onnx/infer.py new file mode 100644 index 0000000..89ca925 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/onnx/infer.py @@ -0,0 +1,168 @@ +import argparse +import os +import warnings +from pathlib import Path +from time import perf_counter + +import numpy as np +import onnxruntime as ort +import soundfile as sf +import torch + +from matcha.cli import plot_spectrogram_to_numpy, process_text + + +def validate_args(args): + assert ( + args.text or args.file + ), "Either text or file must be provided Matcha-T(ea)TTS need sometext to whisk the waveforms." + assert args.temperature >= 0, "Sampling temperature cannot be negative" + assert args.speaking_rate >= 0, "Speaking rate must be greater than 0" + return args + + +def write_wavs(model, inputs, output_dir, external_vocoder=None): + if external_vocoder is None: + print("The provided model has the vocoder embedded in the graph.\nGenerating waveform directly") + t0 = perf_counter() + wavs, wav_lengths = model.run(None, inputs) + infer_secs = perf_counter() - t0 + mel_infer_secs = vocoder_infer_secs = None + else: + print("[🍵] Generating mel using Matcha") + mel_t0 = perf_counter() + mels, mel_lengths = model.run(None, inputs) + mel_infer_secs = perf_counter() - mel_t0 + print("Generating waveform from mel using external vocoder") + vocoder_inputs = {external_vocoder.get_inputs()[0].name: mels} + vocoder_t0 = perf_counter() + wavs = external_vocoder.run(None, vocoder_inputs)[0] + vocoder_infer_secs = perf_counter() - vocoder_t0 + wavs = wavs.squeeze(1) + wav_lengths = mel_lengths * 256 + infer_secs = mel_infer_secs + vocoder_infer_secs + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + for i, (wav, wav_length) in enumerate(zip(wavs, wav_lengths)): + output_filename = output_dir.joinpath(f"output_{i + 1}.wav") + audio = wav[:wav_length] + print(f"Writing audio to {output_filename}") + sf.write(output_filename, audio, 22050, "PCM_24") + + wav_secs = wav_lengths.sum() / 22050 + print(f"Inference seconds: {infer_secs}") + print(f"Generated wav seconds: {wav_secs}") + rtf = infer_secs / wav_secs + if mel_infer_secs is not None: + mel_rtf = mel_infer_secs / wav_secs + print(f"Matcha RTF: {mel_rtf}") + if vocoder_infer_secs is not None: + vocoder_rtf = vocoder_infer_secs / wav_secs + print(f"Vocoder RTF: {vocoder_rtf}") + print(f"Overall RTF: {rtf}") + + +def write_mels(model, inputs, output_dir): + t0 = perf_counter() + mels, mel_lengths = model.run(None, inputs) + infer_secs = perf_counter() - t0 + + output_dir = Path(output_dir) + output_dir.mkdir(parents=True, exist_ok=True) + for i, mel in enumerate(mels): + output_stem = output_dir.joinpath(f"output_{i + 1}") + plot_spectrogram_to_numpy(mel.squeeze(), output_stem.with_suffix(".png")) + np.save(output_stem.with_suffix(".numpy"), mel) + + wav_secs = (mel_lengths * 256).sum() / 22050 + print(f"Inference seconds: {infer_secs}") + print(f"Generated wav seconds: {wav_secs}") + rtf = infer_secs / wav_secs + print(f"RTF: {rtf}") + + +def main(): + parser = argparse.ArgumentParser( + description=" 🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching" + ) + parser.add_argument( + "model", + type=str, + help="ONNX model to use", + ) + parser.add_argument("--vocoder", type=str, default=None, help="Vocoder to use (defaults to None)") + parser.add_argument("--text", type=str, default=None, help="Text to synthesize") + parser.add_argument("--file", type=str, default=None, help="Text file to synthesize") + parser.add_argument("--spk", type=int, default=None, help="Speaker ID") + parser.add_argument( + "--temperature", + type=float, + default=0.667, + help="Variance of the x0 noise (default: 0.667)", + ) + parser.add_argument( + "--speaking-rate", + type=float, + default=1.0, + help="change the speaking rate, a higher value means slower speaking rate (default: 1.0)", + ) + parser.add_argument("--gpu", action="store_true", help="Use CPU for inference (default: use GPU if available)") + parser.add_argument( + "--output-dir", + type=str, + default=os.getcwd(), + help="Output folder to save results (default: current dir)", + ) + + args = parser.parse_args() + args = validate_args(args) + + if args.gpu: + providers = ["GPUExecutionProvider"] + else: + providers = ["CPUExecutionProvider"] + model = ort.InferenceSession(args.model, providers=providers) + + model_inputs = model.get_inputs() + model_outputs = list(model.get_outputs()) + + if args.text: + text_lines = args.text.splitlines() + else: + with open(args.file, encoding="utf-8") as file: + text_lines = file.read().splitlines() + + processed_lines = [process_text(0, line, "cpu") for line in text_lines] + x = [line["x"].squeeze() for line in processed_lines] + # Pad + x = torch.nn.utils.rnn.pad_sequence(x, batch_first=True) + x = x.detach().cpu().numpy() + x_lengths = np.array([line["x_lengths"].item() for line in processed_lines], dtype=np.int64) + inputs = { + "x": x, + "x_lengths": x_lengths, + "scales": np.array([args.temperature, args.speaking_rate], dtype=np.float32), + } + is_multi_speaker = len(model_inputs) == 4 + if is_multi_speaker: + if args.spk is None: + args.spk = 0 + warn = "[!] Speaker ID not provided! Using speaker ID 0" + warnings.warn(warn, UserWarning) + inputs["spks"] = np.repeat(args.spk, x.shape[0]).astype(np.int64) + + has_vocoder_embedded = model_outputs[0].name == "wav" + if has_vocoder_embedded: + write_wavs(model, inputs, args.output_dir) + elif args.vocoder: + external_vocoder = ort.InferenceSession(args.vocoder, providers=providers) + write_wavs(model, inputs, args.output_dir, external_vocoder=external_vocoder) + else: + warn = "[!] A vocoder is not embedded in the graph nor an external vocoder is provided. The mel output will be written as numpy arrays to `*.npy` files in the output directory" + warnings.warn(warn, UserWarning) + write_mels(model, inputs, args.output_dir) + + +if __name__ == "__main__": + main() diff --git a/third_party/Matcha-TTS/matcha/text/__init__.py b/third_party/Matcha-TTS/matcha/text/__init__.py new file mode 100644 index 0000000..71a4b57 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/text/__init__.py @@ -0,0 +1,53 @@ +""" from https://github.com/keithito/tacotron """ +from matcha.text import cleaners +from matcha.text.symbols import symbols + +# Mappings from symbol to numeric ID and vice versa: +_symbol_to_id = {s: i for i, s in enumerate(symbols)} +_id_to_symbol = {i: s for i, s in enumerate(symbols)} # pylint: disable=unnecessary-comprehension + + +def text_to_sequence(text, cleaner_names): + """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + cleaner_names: names of the cleaner functions to run the text through + Returns: + List of integers corresponding to the symbols in the text + """ + sequence = [] + + clean_text = _clean_text(text, cleaner_names) + for symbol in clean_text: + symbol_id = _symbol_to_id[symbol] + sequence += [symbol_id] + return sequence + + +def cleaned_text_to_sequence(cleaned_text): + """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. + Args: + text: string to convert to a sequence + Returns: + List of integers corresponding to the symbols in the text + """ + sequence = [_symbol_to_id[symbol] for symbol in cleaned_text] + return sequence + + +def sequence_to_text(sequence): + """Converts a sequence of IDs back to a string""" + result = "" + for symbol_id in sequence: + s = _id_to_symbol[symbol_id] + result += s + return result + + +def _clean_text(text, cleaner_names): + for name in cleaner_names: + cleaner = getattr(cleaners, name) + if not cleaner: + raise Exception("Unknown cleaner: %s" % name) + text = cleaner(text) + return text diff --git a/third_party/Matcha-TTS/matcha/text/cleaners.py b/third_party/Matcha-TTS/matcha/text/cleaners.py new file mode 100644 index 0000000..5e8d96b --- /dev/null +++ b/third_party/Matcha-TTS/matcha/text/cleaners.py @@ -0,0 +1,116 @@ +""" from https://github.com/keithito/tacotron + +Cleaners are transformations that run over the input text at both training and eval time. + +Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners" +hyperparameter. Some cleaners are English-specific. You'll typically want to use: + 1. "english_cleaners" for English text + 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using + the Unidecode library (https://pypi.python.org/pypi/Unidecode) + 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update + the symbols in symbols.py to match your data). +""" + +import logging +import re + +import phonemizer +import piper_phonemize +from unidecode import unidecode + +# To avoid excessive logging we set the log level of the phonemizer package to Critical +critical_logger = logging.getLogger("phonemizer") +critical_logger.setLevel(logging.CRITICAL) + +# Intializing the phonemizer globally significantly reduces the speed +# now the phonemizer is not initialising at every call +# Might be less flexible, but it is much-much faster +global_phonemizer = phonemizer.backend.EspeakBackend( + language="en-us", + preserve_punctuation=True, + with_stress=True, + language_switch="remove-flags", + logger=critical_logger, +) + + +# Regular expression matching whitespace: +_whitespace_re = re.compile(r"\s+") + +# List of (regular expression, replacement) pairs for abbreviations: +_abbreviations = [ + (re.compile("\\b%s\\." % x[0], re.IGNORECASE), x[1]) + for x in [ + ("mrs", "misess"), + ("mr", "mister"), + ("dr", "doctor"), + ("st", "saint"), + ("co", "company"), + ("jr", "junior"), + ("maj", "major"), + ("gen", "general"), + ("drs", "doctors"), + ("rev", "reverend"), + ("lt", "lieutenant"), + ("hon", "honorable"), + ("sgt", "sergeant"), + ("capt", "captain"), + ("esq", "esquire"), + ("ltd", "limited"), + ("col", "colonel"), + ("ft", "fort"), + ] +] + + +def expand_abbreviations(text): + for regex, replacement in _abbreviations: + text = re.sub(regex, replacement, text) + return text + + +def lowercase(text): + return text.lower() + + +def collapse_whitespace(text): + return re.sub(_whitespace_re, " ", text) + + +def convert_to_ascii(text): + return unidecode(text) + + +def basic_cleaners(text): + """Basic pipeline that lowercases and collapses whitespace without transliteration.""" + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def transliteration_cleaners(text): + """Pipeline for non-English text that transliterates to ASCII.""" + text = convert_to_ascii(text) + text = lowercase(text) + text = collapse_whitespace(text) + return text + + +def english_cleaners2(text): + """Pipeline for English text, including abbreviation expansion. + punctuation + stress""" + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_abbreviations(text) + phonemes = global_phonemizer.phonemize([text], strip=True, njobs=1)[0] + phonemes = collapse_whitespace(phonemes) + return phonemes + + +def english_cleaners_piper(text): + """Pipeline for English text, including abbreviation expansion. + punctuation + stress""" + text = convert_to_ascii(text) + text = lowercase(text) + text = expand_abbreviations(text) + phonemes = "".join(piper_phonemize.phonemize_espeak(text=text, voice="en-US")[0]) + phonemes = collapse_whitespace(phonemes) + return phonemes diff --git a/third_party/Matcha-TTS/matcha/text/numbers.py b/third_party/Matcha-TTS/matcha/text/numbers.py new file mode 100644 index 0000000..f99a868 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/text/numbers.py @@ -0,0 +1,71 @@ +""" from https://github.com/keithito/tacotron """ + +import re + +import inflect + +_inflect = inflect.engine() +_comma_number_re = re.compile(r"([0-9][0-9\,]+[0-9])") +_decimal_number_re = re.compile(r"([0-9]+\.[0-9]+)") +_pounds_re = re.compile(r"£([0-9\,]*[0-9]+)") +_dollars_re = re.compile(r"\$([0-9\.\,]*[0-9]+)") +_ordinal_re = re.compile(r"[0-9]+(st|nd|rd|th)") +_number_re = re.compile(r"[0-9]+") + + +def _remove_commas(m): + return m.group(1).replace(",", "") + + +def _expand_decimal_point(m): + return m.group(1).replace(".", " point ") + + +def _expand_dollars(m): + match = m.group(1) + parts = match.split(".") + if len(parts) > 2: + return match + " dollars" + dollars = int(parts[0]) if parts[0] else 0 + cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0 + if dollars and cents: + dollar_unit = "dollar" if dollars == 1 else "dollars" + cent_unit = "cent" if cents == 1 else "cents" + return f"{dollars} {dollar_unit}, {cents} {cent_unit}" + elif dollars: + dollar_unit = "dollar" if dollars == 1 else "dollars" + return f"{dollars} {dollar_unit}" + elif cents: + cent_unit = "cent" if cents == 1 else "cents" + return f"{cents} {cent_unit}" + else: + return "zero dollars" + + +def _expand_ordinal(m): + return _inflect.number_to_words(m.group(0)) + + +def _expand_number(m): + num = int(m.group(0)) + if num > 1000 and num < 3000: + if num == 2000: + return "two thousand" + elif num > 2000 and num < 2010: + return "two thousand " + _inflect.number_to_words(num % 100) + elif num % 100 == 0: + return _inflect.number_to_words(num // 100) + " hundred" + else: + return _inflect.number_to_words(num, andword="", zero="oh", group=2).replace(", ", " ") + else: + return _inflect.number_to_words(num, andword="") + + +def normalize_numbers(text): + text = re.sub(_comma_number_re, _remove_commas, text) + text = re.sub(_pounds_re, r"\1 pounds", text) + text = re.sub(_dollars_re, _expand_dollars, text) + text = re.sub(_decimal_number_re, _expand_decimal_point, text) + text = re.sub(_ordinal_re, _expand_ordinal, text) + text = re.sub(_number_re, _expand_number, text) + return text diff --git a/third_party/Matcha-TTS/matcha/text/symbols.py b/third_party/Matcha-TTS/matcha/text/symbols.py new file mode 100644 index 0000000..7018df5 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/text/symbols.py @@ -0,0 +1,17 @@ +""" from https://github.com/keithito/tacotron + +Defines the set of symbols used in text input to the model. +""" +_pad = "_" +_punctuation = ';:,.!?¡¿—…"«»“” ' +_letters = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" +_letters_ipa = ( + "ɑɐɒæɓʙβɔɕçɗɖðʤəɘɚɛɜɝɞɟʄɡɠɢʛɦɧħɥʜɨɪʝɭɬɫɮʟɱɯɰŋɳɲɴøɵɸθœɶʘɹɺɾɻʀʁɽʂʃʈʧʉʊʋⱱʌɣɤʍχʎʏʑʐʒʔʡʕʢǀǁǂǃˈˌːˑʼʴʰʱʲʷˠˤ˞↓↑→↗↘'̩'ᵻ" +) + + +# Export all symbols: +symbols = [_pad] + list(_punctuation) + list(_letters) + list(_letters_ipa) + +# Special symbol ids +SPACE_ID = symbols.index(" ") diff --git a/third_party/Matcha-TTS/matcha/train.py b/third_party/Matcha-TTS/matcha/train.py new file mode 100644 index 0000000..d1d64c6 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/train.py @@ -0,0 +1,122 @@ +from typing import Any, Dict, List, Optional, Tuple + +import hydra +import lightning as L +import rootutils +from lightning import Callback, LightningDataModule, LightningModule, Trainer +from lightning.pytorch.loggers import Logger +from omegaconf import DictConfig + +from matcha import utils + +rootutils.setup_root(__file__, indicator=".project-root", pythonpath=True) +# ------------------------------------------------------------------------------------ # +# the setup_root above is equivalent to: +# - adding project root dir to PYTHONPATH +# (so you don't need to force user to install project as a package) +# (necessary before importing any local modules e.g. `from src import utils`) +# - setting up PROJECT_ROOT environment variable +# (which is used as a base for paths in "configs/paths/default.yaml") +# (this way all filepaths are the same no matter where you run the code) +# - loading environment variables from ".env" in root dir +# +# you can remove it if you: +# 1. either install project as a package or move entry files to project root dir +# 2. set `root_dir` to "." in "configs/paths/default.yaml" +# +# more info: https://github.com/ashleve/rootutils +# ------------------------------------------------------------------------------------ # + + +log = utils.get_pylogger(__name__) + + +@utils.task_wrapper +def train(cfg: DictConfig) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """Trains the model. Can additionally evaluate on a testset, using best weights obtained during + training. + + This method is wrapped in optional @task_wrapper decorator, that controls the behavior during + failure. Useful for multiruns, saving info about the crash, etc. + + :param cfg: A DictConfig configuration composed by Hydra. + :return: A tuple with metrics and dict with all instantiated objects. + """ + # set seed for random number generators in pytorch, numpy and python.random + if cfg.get("seed"): + L.seed_everything(cfg.seed, workers=True) + + log.info(f"Instantiating datamodule <{cfg.data._target_}>") # pylint: disable=protected-access + datamodule: LightningDataModule = hydra.utils.instantiate(cfg.data) + + log.info(f"Instantiating model <{cfg.model._target_}>") # pylint: disable=protected-access + model: LightningModule = hydra.utils.instantiate(cfg.model) + + log.info("Instantiating callbacks...") + callbacks: List[Callback] = utils.instantiate_callbacks(cfg.get("callbacks")) + + log.info("Instantiating loggers...") + logger: List[Logger] = utils.instantiate_loggers(cfg.get("logger")) + + log.info(f"Instantiating trainer <{cfg.trainer._target_}>") # pylint: disable=protected-access + trainer: Trainer = hydra.utils.instantiate(cfg.trainer, callbacks=callbacks, logger=logger) + + object_dict = { + "cfg": cfg, + "datamodule": datamodule, + "model": model, + "callbacks": callbacks, + "logger": logger, + "trainer": trainer, + } + + if logger: + log.info("Logging hyperparameters!") + utils.log_hyperparameters(object_dict) + + if cfg.get("train"): + log.info("Starting training!") + trainer.fit(model=model, datamodule=datamodule, ckpt_path=cfg.get("ckpt_path")) + + train_metrics = trainer.callback_metrics + + if cfg.get("test"): + log.info("Starting testing!") + ckpt_path = trainer.checkpoint_callback.best_model_path + if ckpt_path == "": + log.warning("Best ckpt not found! Using current weights for testing...") + ckpt_path = None + trainer.test(model=model, datamodule=datamodule, ckpt_path=ckpt_path) + log.info(f"Best ckpt path: {ckpt_path}") + + test_metrics = trainer.callback_metrics + + # merge train and test metrics + metric_dict = {**train_metrics, **test_metrics} + + return metric_dict, object_dict + + +@hydra.main(version_base="1.3", config_path="../configs", config_name="train.yaml") +def main(cfg: DictConfig) -> Optional[float]: + """Main entry point for training. + + :param cfg: DictConfig configuration composed by Hydra. + :return: Optional[float] with optimized metric value. + """ + # apply extra utilities + # (e.g. ask for tags if none are provided in cfg, print cfg tree, etc.) + utils.extras(cfg) + + # train the model + metric_dict, _ = train(cfg) + + # safely retrieve metric value for hydra-based hyperparameter optimization + metric_value = utils.get_metric_value(metric_dict=metric_dict, metric_name=cfg.get("optimized_metric")) + + # return optimized metric + return metric_value + + +if __name__ == "__main__": + main() # pylint: disable=no-value-for-parameter diff --git a/third_party/Matcha-TTS/matcha/utils/__init__.py b/third_party/Matcha-TTS/matcha/utils/__init__.py new file mode 100644 index 0000000..074db64 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/__init__.py @@ -0,0 +1,5 @@ +from matcha.utils.instantiators import instantiate_callbacks, instantiate_loggers +from matcha.utils.logging_utils import log_hyperparameters +from matcha.utils.pylogger import get_pylogger +from matcha.utils.rich_utils import enforce_tags, print_config_tree +from matcha.utils.utils import extras, get_metric_value, task_wrapper diff --git a/third_party/Matcha-TTS/matcha/utils/audio.py b/third_party/Matcha-TTS/matcha/utils/audio.py new file mode 100644 index 0000000..0bcd74d --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/audio.py @@ -0,0 +1,82 @@ +import numpy as np +import torch +import torch.utils.data +from librosa.filters import mel as librosa_mel_fn +from scipy.io.wavfile import read + +MAX_WAV_VALUE = 32768.0 + + +def load_wav(full_path): + sampling_rate, data = read(full_path) + return data, sampling_rate + + +def dynamic_range_compression(x, C=1, clip_val=1e-5): + return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) + + +def dynamic_range_decompression(x, C=1): + return np.exp(x) / C + + +def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): + return torch.log(torch.clamp(x, min=clip_val) * C) + + +def dynamic_range_decompression_torch(x, C=1): + return torch.exp(x) / C + + +def spectral_normalize_torch(magnitudes): + output = dynamic_range_compression_torch(magnitudes) + return output + + +def spectral_de_normalize_torch(magnitudes): + output = dynamic_range_decompression_torch(magnitudes) + return output + + +mel_basis = {} +hann_window = {} + + +def mel_spectrogram(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): + if torch.min(y) < -1.0: + print("min value is ", torch.min(y)) + if torch.max(y) > 1.0: + print("max value is ", torch.max(y)) + + global mel_basis, hann_window # pylint: disable=global-statement + if f"{str(fmax)}_{str(y.device)}" not in mel_basis: + mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) + mel_basis[str(fmax) + "_" + str(y.device)] = torch.from_numpy(mel).float().to(y.device) + hann_window[str(y.device)] = torch.hann_window(win_size).to(y.device) + + y = torch.nn.functional.pad( + y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect" + ) + y = y.squeeze(1) + + spec = torch.view_as_real( + torch.stft( + y, + n_fft, + hop_length=hop_size, + win_length=win_size, + window=hann_window[str(y.device)], + center=center, + pad_mode="reflect", + normalized=False, + onesided=True, + return_complex=True, + ) + ) + + spec = torch.sqrt(spec.pow(2).sum(-1) + (1e-9)) + + spec = torch.matmul(mel_basis[str(fmax) + "_" + str(y.device)], spec) + spec = spectral_normalize_torch(spec) + + return spec diff --git a/third_party/Matcha-TTS/matcha/utils/generate_data_statistics.py b/third_party/Matcha-TTS/matcha/utils/generate_data_statistics.py new file mode 100644 index 0000000..96a5382 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/generate_data_statistics.py @@ -0,0 +1,111 @@ +r""" +The file creates a pickle file where the values needed for loading of dataset is stored and the model can load it +when needed. + +Parameters from hparam.py will be used +""" +import argparse +import json +import os +import sys +from pathlib import Path + +import rootutils +import torch +from hydra import compose, initialize +from omegaconf import open_dict +from tqdm.auto import tqdm + +from matcha.data.text_mel_datamodule import TextMelDataModule +from matcha.utils.logging_utils import pylogger + +log = pylogger.get_pylogger(__name__) + + +def compute_data_statistics(data_loader: torch.utils.data.DataLoader, out_channels: int): + """Generate data mean and standard deviation helpful in data normalisation + + Args: + data_loader (torch.utils.data.Dataloader): _description_ + out_channels (int): mel spectrogram channels + """ + total_mel_sum = 0 + total_mel_sq_sum = 0 + total_mel_len = 0 + + for batch in tqdm(data_loader, leave=False): + mels = batch["y"] + mel_lengths = batch["y_lengths"] + + total_mel_len += torch.sum(mel_lengths) + total_mel_sum += torch.sum(mels) + total_mel_sq_sum += torch.sum(torch.pow(mels, 2)) + + data_mean = total_mel_sum / (total_mel_len * out_channels) + data_std = torch.sqrt((total_mel_sq_sum / (total_mel_len * out_channels)) - torch.pow(data_mean, 2)) + + return {"mel_mean": data_mean.item(), "mel_std": data_std.item()} + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument( + "-i", + "--input-config", + type=str, + default="vctk.yaml", + help="The name of the yaml config file under configs/data", + ) + + parser.add_argument( + "-b", + "--batch-size", + type=int, + default="256", + help="Can have increased batch size for faster computation", + ) + + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + required=False, + help="force overwrite the file", + ) + args = parser.parse_args() + output_file = Path(args.input_config).with_suffix(".json") + + if os.path.exists(output_file) and not args.force: + print("File already exists. Use -f to force overwrite") + sys.exit(1) + + with initialize(version_base="1.3", config_path="../../configs/data"): + cfg = compose(config_name=args.input_config, return_hydra_config=True, overrides=[]) + + root_path = rootutils.find_root(search_from=__file__, indicator=".project-root") + + with open_dict(cfg): + del cfg["hydra"] + del cfg["_target_"] + cfg["data_statistics"] = None + cfg["seed"] = 1234 + cfg["batch_size"] = args.batch_size + cfg["train_filelist_path"] = str(os.path.join(root_path, cfg["train_filelist_path"])) + cfg["valid_filelist_path"] = str(os.path.join(root_path, cfg["valid_filelist_path"])) + + text_mel_datamodule = TextMelDataModule(**cfg) + text_mel_datamodule.setup() + data_loader = text_mel_datamodule.train_dataloader() + log.info("Dataloader loaded! Now computing stats...") + params = compute_data_statistics(data_loader, cfg["n_feats"]) + print(params) + json.dump( + params, + open(output_file, "w"), + ) + + +if __name__ == "__main__": + main() diff --git a/third_party/Matcha-TTS/matcha/utils/instantiators.py b/third_party/Matcha-TTS/matcha/utils/instantiators.py new file mode 100644 index 0000000..5547b4e --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/instantiators.py @@ -0,0 +1,56 @@ +from typing import List + +import hydra +from lightning import Callback +from lightning.pytorch.loggers import Logger +from omegaconf import DictConfig + +from matcha.utils import pylogger + +log = pylogger.get_pylogger(__name__) + + +def instantiate_callbacks(callbacks_cfg: DictConfig) -> List[Callback]: + """Instantiates callbacks from config. + + :param callbacks_cfg: A DictConfig object containing callback configurations. + :return: A list of instantiated callbacks. + """ + callbacks: List[Callback] = [] + + if not callbacks_cfg: + log.warning("No callback configs found! Skipping..") + return callbacks + + if not isinstance(callbacks_cfg, DictConfig): + raise TypeError("Callbacks config must be a DictConfig!") + + for _, cb_conf in callbacks_cfg.items(): + if isinstance(cb_conf, DictConfig) and "_target_" in cb_conf: + log.info(f"Instantiating callback <{cb_conf._target_}>") # pylint: disable=protected-access + callbacks.append(hydra.utils.instantiate(cb_conf)) + + return callbacks + + +def instantiate_loggers(logger_cfg: DictConfig) -> List[Logger]: + """Instantiates loggers from config. + + :param logger_cfg: A DictConfig object containing logger configurations. + :return: A list of instantiated loggers. + """ + logger: List[Logger] = [] + + if not logger_cfg: + log.warning("No logger configs found! Skipping...") + return logger + + if not isinstance(logger_cfg, DictConfig): + raise TypeError("Logger config must be a DictConfig!") + + for _, lg_conf in logger_cfg.items(): + if isinstance(lg_conf, DictConfig) and "_target_" in lg_conf: + log.info(f"Instantiating logger <{lg_conf._target_}>") # pylint: disable=protected-access + logger.append(hydra.utils.instantiate(lg_conf)) + + return logger diff --git a/third_party/Matcha-TTS/matcha/utils/logging_utils.py b/third_party/Matcha-TTS/matcha/utils/logging_utils.py new file mode 100644 index 0000000..1a12d1d --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/logging_utils.py @@ -0,0 +1,53 @@ +from typing import Any, Dict + +from lightning.pytorch.utilities import rank_zero_only +from omegaconf import OmegaConf + +from matcha.utils import pylogger + +log = pylogger.get_pylogger(__name__) + + +@rank_zero_only +def log_hyperparameters(object_dict: Dict[str, Any]) -> None: + """Controls which config parts are saved by Lightning loggers. + + Additionally saves: + - Number of model parameters + + :param object_dict: A dictionary containing the following objects: + - `"cfg"`: A DictConfig object containing the main config. + - `"model"`: The Lightning model. + - `"trainer"`: The Lightning trainer. + """ + hparams = {} + + cfg = OmegaConf.to_container(object_dict["cfg"]) + model = object_dict["model"] + trainer = object_dict["trainer"] + + if not trainer.logger: + log.warning("Logger not found! Skipping hyperparameter logging...") + return + + hparams["model"] = cfg["model"] + + # save number of model parameters + hparams["model/params/total"] = sum(p.numel() for p in model.parameters()) + hparams["model/params/trainable"] = sum(p.numel() for p in model.parameters() if p.requires_grad) + hparams["model/params/non_trainable"] = sum(p.numel() for p in model.parameters() if not p.requires_grad) + + hparams["data"] = cfg["data"] + hparams["trainer"] = cfg["trainer"] + + hparams["callbacks"] = cfg.get("callbacks") + hparams["extras"] = cfg.get("extras") + + hparams["task_name"] = cfg.get("task_name") + hparams["tags"] = cfg.get("tags") + hparams["ckpt_path"] = cfg.get("ckpt_path") + hparams["seed"] = cfg.get("seed") + + # send hparams to all loggers + for logger in trainer.loggers: + logger.log_hyperparams(hparams) diff --git a/third_party/Matcha-TTS/matcha/utils/model.py b/third_party/Matcha-TTS/matcha/utils/model.py new file mode 100644 index 0000000..869cc60 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/model.py @@ -0,0 +1,90 @@ +""" from https://github.com/jaywalnut310/glow-tts """ + +import numpy as np +import torch + + +def sequence_mask(length, max_length=None): + if max_length is None: + max_length = length.max() + x = torch.arange(max_length, dtype=length.dtype, device=length.device) + return x.unsqueeze(0) < length.unsqueeze(1) + + +def fix_len_compatibility(length, num_downsamplings_in_unet=2): + factor = torch.scalar_tensor(2).pow(num_downsamplings_in_unet) + length = (length / factor).ceil() * factor + if not torch.onnx.is_in_onnx_export(): + return length.int().item() + else: + return length + + +def convert_pad_shape(pad_shape): + inverted_shape = pad_shape[::-1] + pad_shape = [item for sublist in inverted_shape for item in sublist] + return pad_shape + + +def generate_path(duration, mask): + device = duration.device + + b, t_x, t_y = mask.shape + cum_duration = torch.cumsum(duration, 1) + path = torch.zeros(b, t_x, t_y, dtype=mask.dtype).to(device=device) + + cum_duration_flat = cum_duration.view(b * t_x) + path = sequence_mask(cum_duration_flat, t_y).to(mask.dtype) + path = path.view(b, t_x, t_y) + path = path - torch.nn.functional.pad(path, convert_pad_shape([[0, 0], [1, 0], [0, 0]]))[:, :-1] + path = path * mask + return path + + +def duration_loss(logw, logw_, lengths): + loss = torch.sum((logw - logw_) ** 2) / torch.sum(lengths) + return loss + + +def normalize(data, mu, std): + if not isinstance(mu, (float, int)): + if isinstance(mu, list): + mu = torch.tensor(mu, dtype=data.dtype, device=data.device) + elif isinstance(mu, torch.Tensor): + mu = mu.to(data.device) + elif isinstance(mu, np.ndarray): + mu = torch.from_numpy(mu).to(data.device) + mu = mu.unsqueeze(-1) + + if not isinstance(std, (float, int)): + if isinstance(std, list): + std = torch.tensor(std, dtype=data.dtype, device=data.device) + elif isinstance(std, torch.Tensor): + std = std.to(data.device) + elif isinstance(std, np.ndarray): + std = torch.from_numpy(std).to(data.device) + std = std.unsqueeze(-1) + + return (data - mu) / std + + +def denormalize(data, mu, std): + if not isinstance(mu, float): + if isinstance(mu, list): + mu = torch.tensor(mu, dtype=data.dtype, device=data.device) + elif isinstance(mu, torch.Tensor): + mu = mu.to(data.device) + elif isinstance(mu, np.ndarray): + mu = torch.from_numpy(mu).to(data.device) + mu = mu.unsqueeze(-1) + + if not isinstance(std, float): + if isinstance(std, list): + std = torch.tensor(std, dtype=data.dtype, device=data.device) + elif isinstance(std, torch.Tensor): + std = std.to(data.device) + elif isinstance(std, np.ndarray): + std = torch.from_numpy(std).to(data.device) + std = std.unsqueeze(-1) + + return data * std + mu diff --git a/third_party/Matcha-TTS/matcha/utils/monotonic_align/__init__.py b/third_party/Matcha-TTS/matcha/utils/monotonic_align/__init__.py new file mode 100644 index 0000000..eee6e0d --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/monotonic_align/__init__.py @@ -0,0 +1,22 @@ +import numpy as np +import torch + +from matcha.utils.monotonic_align.core import maximum_path_c + + +def maximum_path(value, mask): + """Cython optimised version. + value: [b, t_x, t_y] + mask: [b, t_x, t_y] + """ + value = value * mask + device = value.device + dtype = value.dtype + value = value.data.cpu().numpy().astype(np.float32) + path = np.zeros_like(value).astype(np.int32) + mask = mask.data.cpu().numpy() + + t_x_max = mask.sum(1)[:, 0].astype(np.int32) + t_y_max = mask.sum(2)[:, 0].astype(np.int32) + maximum_path_c(path, value, t_x_max, t_y_max) + return torch.from_numpy(path).to(device=device, dtype=dtype) diff --git a/third_party/Matcha-TTS/matcha/utils/monotonic_align/core.pyx b/third_party/Matcha-TTS/matcha/utils/monotonic_align/core.pyx new file mode 100644 index 0000000..091fcc3 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/monotonic_align/core.pyx @@ -0,0 +1,47 @@ +import numpy as np + +cimport cython +cimport numpy as np + +from cython.parallel import prange + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void maximum_path_each(int[:,::1] path, float[:,::1] value, int t_x, int t_y, float max_neg_val) nogil: + cdef int x + cdef int y + cdef float v_prev + cdef float v_cur + cdef float tmp + cdef int index = t_x - 1 + + for y in range(t_y): + for x in range(max(0, t_x + y - t_y), min(t_x, y + 1)): + if x == y: + v_cur = max_neg_val + else: + v_cur = value[x, y-1] + if x == 0: + if y == 0: + v_prev = 0. + else: + v_prev = max_neg_val + else: + v_prev = value[x-1, y-1] + value[x, y] = max(v_cur, v_prev) + value[x, y] + + for y in range(t_y - 1, -1, -1): + path[index, y] = 1 + if index != 0 and (index == y or value[index, y-1] < value[index-1, y-1]): + index = index - 1 + + +@cython.boundscheck(False) +@cython.wraparound(False) +cpdef void maximum_path_c(int[:,:,::1] paths, float[:,:,::1] values, int[::1] t_xs, int[::1] t_ys, float max_neg_val=-1e9) nogil: + cdef int b = values.shape[0] + + cdef int i + for i in prange(b, nogil=True): + maximum_path_each(paths[i], values[i], t_xs[i], t_ys[i], max_neg_val) diff --git a/third_party/Matcha-TTS/matcha/utils/monotonic_align/setup.py b/third_party/Matcha-TTS/matcha/utils/monotonic_align/setup.py new file mode 100644 index 0000000..f22bc6a --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/monotonic_align/setup.py @@ -0,0 +1,7 @@ +# from distutils.core import setup +# from Cython.Build import cythonize +# import numpy + +# setup(name='monotonic_align', +# ext_modules=cythonize("core.pyx"), +# include_dirs=[numpy.get_include()]) diff --git a/third_party/Matcha-TTS/matcha/utils/pylogger.py b/third_party/Matcha-TTS/matcha/utils/pylogger.py new file mode 100644 index 0000000..6160067 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/pylogger.py @@ -0,0 +1,21 @@ +import logging + +from lightning.pytorch.utilities import rank_zero_only + + +def get_pylogger(name: str = __name__) -> logging.Logger: + """Initializes a multi-GPU-friendly python command line logger. + + :param name: The name of the logger, defaults to ``__name__``. + + :return: A logger object. + """ + logger = logging.getLogger(name) + + # this ensures all logging levels get marked with the rank zero decorator + # otherwise logs would get multiplied for each GPU process in multi-GPU setup + logging_levels = ("debug", "info", "warning", "error", "exception", "fatal", "critical") + for level in logging_levels: + setattr(logger, level, rank_zero_only(getattr(logger, level))) + + return logger diff --git a/third_party/Matcha-TTS/matcha/utils/rich_utils.py b/third_party/Matcha-TTS/matcha/utils/rich_utils.py new file mode 100644 index 0000000..f602f6e --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/rich_utils.py @@ -0,0 +1,101 @@ +from pathlib import Path +from typing import Sequence + +import rich +import rich.syntax +import rich.tree +from hydra.core.hydra_config import HydraConfig +from lightning.pytorch.utilities import rank_zero_only +from omegaconf import DictConfig, OmegaConf, open_dict +from rich.prompt import Prompt + +from matcha.utils import pylogger + +log = pylogger.get_pylogger(__name__) + + +@rank_zero_only +def print_config_tree( + cfg: DictConfig, + print_order: Sequence[str] = ( + "data", + "model", + "callbacks", + "logger", + "trainer", + "paths", + "extras", + ), + resolve: bool = False, + save_to_file: bool = False, +) -> None: + """Prints the contents of a DictConfig as a tree structure using the Rich library. + + :param cfg: A DictConfig composed by Hydra. + :param print_order: Determines in what order config components are printed. Default is ``("data", "model", + "callbacks", "logger", "trainer", "paths", "extras")``. + :param resolve: Whether to resolve reference fields of DictConfig. Default is ``False``. + :param save_to_file: Whether to export config to the hydra output folder. Default is ``False``. + """ + style = "dim" + tree = rich.tree.Tree("CONFIG", style=style, guide_style=style) + + queue = [] + + # add fields from `print_order` to queue + for field in print_order: + _ = ( + queue.append(field) + if field in cfg + else log.warning(f"Field '{field}' not found in config. Skipping '{field}' config printing...") + ) + + # add all the other fields to queue (not specified in `print_order`) + for field in cfg: + if field not in queue: + queue.append(field) + + # generate config tree from queue + for field in queue: + branch = tree.add(field, style=style, guide_style=style) + + config_group = cfg[field] + if isinstance(config_group, DictConfig): + branch_content = OmegaConf.to_yaml(config_group, resolve=resolve) + else: + branch_content = str(config_group) + + branch.add(rich.syntax.Syntax(branch_content, "yaml")) + + # print config tree + rich.print(tree) + + # save config tree to file + if save_to_file: + with open(Path(cfg.paths.output_dir, "config_tree.log"), "w") as file: + rich.print(tree, file=file) + + +@rank_zero_only +def enforce_tags(cfg: DictConfig, save_to_file: bool = False) -> None: + """Prompts user to input tags from command line if no tags are provided in config. + + :param cfg: A DictConfig composed by Hydra. + :param save_to_file: Whether to export tags to the hydra output folder. Default is ``False``. + """ + if not cfg.get("tags"): + if "id" in HydraConfig().cfg.hydra.job: + raise ValueError("Specify tags before launching a multirun!") + + log.warning("No tags provided in config. Prompting user to input tags...") + tags = Prompt.ask("Enter a list of comma separated tags", default="dev") + tags = [t.strip() for t in tags.split(",") if t != ""] + + with open_dict(cfg): + cfg.tags = tags + + log.info(f"Tags: {cfg.tags}") + + if save_to_file: + with open(Path(cfg.paths.output_dir, "tags.log"), "w") as file: + rich.print(cfg.tags, file=file) diff --git a/third_party/Matcha-TTS/matcha/utils/utils.py b/third_party/Matcha-TTS/matcha/utils/utils.py new file mode 100644 index 0000000..af65e09 --- /dev/null +++ b/third_party/Matcha-TTS/matcha/utils/utils.py @@ -0,0 +1,219 @@ +import os +import sys +import warnings +from importlib.util import find_spec +from pathlib import Path +from typing import Any, Callable, Dict, Tuple + +import gdown +import matplotlib.pyplot as plt +import numpy as np +import torch +import wget +from omegaconf import DictConfig + +from matcha.utils import pylogger, rich_utils + +log = pylogger.get_pylogger(__name__) + + +def extras(cfg: DictConfig) -> None: + """Applies optional utilities before the task is started. + + Utilities: + - Ignoring python warnings + - Setting tags from command line + - Rich config printing + + :param cfg: A DictConfig object containing the config tree. + """ + # return if no `extras` config + if not cfg.get("extras"): + log.warning("Extras config not found! ") + return + + # disable python warnings + if cfg.extras.get("ignore_warnings"): + log.info("Disabling python warnings! ") + warnings.filterwarnings("ignore") + + # prompt user to input tags from command line if none are provided in the config + if cfg.extras.get("enforce_tags"): + log.info("Enforcing tags! ") + rich_utils.enforce_tags(cfg, save_to_file=True) + + # pretty print config tree using Rich library + if cfg.extras.get("print_config"): + log.info("Printing config tree with Rich! ") + rich_utils.print_config_tree(cfg, resolve=True, save_to_file=True) + + +def task_wrapper(task_func: Callable) -> Callable: + """Optional decorator that controls the failure behavior when executing the task function. + + This wrapper can be used to: + - make sure loggers are closed even if the task function raises an exception (prevents multirun failure) + - save the exception to a `.log` file + - mark the run as failed with a dedicated file in the `logs/` folder (so we can find and rerun it later) + - etc. (adjust depending on your needs) + + Example: + ``` + @utils.task_wrapper + def train(cfg: DictConfig) -> Tuple[Dict[str, Any], Dict[str, Any]]: + ... + return metric_dict, object_dict + ``` + + :param task_func: The task function to be wrapped. + + :return: The wrapped task function. + """ + + def wrap(cfg: DictConfig) -> Tuple[Dict[str, Any], Dict[str, Any]]: + # execute the task + try: + metric_dict, object_dict = task_func(cfg=cfg) + + # things to do if exception occurs + except Exception as ex: + # save exception to `.log` file + log.exception("") + + # some hyperparameter combinations might be invalid or cause out-of-memory errors + # so when using hparam search plugins like Optuna, you might want to disable + # raising the below exception to avoid multirun failure + raise ex + + # things to always do after either success or exception + finally: + # display output dir path in terminal + log.info(f"Output dir: {cfg.paths.output_dir}") + + # always close wandb run (even if exception occurs so multirun won't fail) + if find_spec("wandb"): # check if wandb is installed + import wandb + + if wandb.run: + log.info("Closing wandb!") + wandb.finish() + + return metric_dict, object_dict + + return wrap + + +def get_metric_value(metric_dict: Dict[str, Any], metric_name: str) -> float: + """Safely retrieves value of the metric logged in LightningModule. + + :param metric_dict: A dict containing metric values. + :param metric_name: The name of the metric to retrieve. + :return: The value of the metric. + """ + if not metric_name: + log.info("Metric name is None! Skipping metric value retrieval...") + return None + + if metric_name not in metric_dict: + raise ValueError( + f"Metric value not found! \n" + "Make sure metric name logged in LightningModule is correct!\n" + "Make sure `optimized_metric` name in `hparams_search` config is correct!" + ) + + metric_value = metric_dict[metric_name].item() + log.info(f"Retrieved metric value! <{metric_name}={metric_value}>") + + return metric_value + + +def intersperse(lst, item): + # Adds blank symbol + result = [item] * (len(lst) * 2 + 1) + result[1::2] = lst + return result + + +def save_figure_to_numpy(fig): + data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") + data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) + return data + + +def plot_tensor(tensor): + plt.style.use("default") + fig, ax = plt.subplots(figsize=(12, 3)) + im = ax.imshow(tensor, aspect="auto", origin="lower", interpolation="none") + plt.colorbar(im, ax=ax) + plt.tight_layout() + fig.canvas.draw() + data = save_figure_to_numpy(fig) + plt.close() + return data + + +def save_plot(tensor, savepath): + plt.style.use("default") + fig, ax = plt.subplots(figsize=(12, 3)) + im = ax.imshow(tensor, aspect="auto", origin="lower", interpolation="none") + plt.colorbar(im, ax=ax) + plt.tight_layout() + fig.canvas.draw() + plt.savefig(savepath) + plt.close() + + +def to_numpy(tensor): + if isinstance(tensor, np.ndarray): + return tensor + elif isinstance(tensor, torch.Tensor): + return tensor.detach().cpu().numpy() + elif isinstance(tensor, list): + return np.array(tensor) + else: + raise TypeError("Unsupported type for conversion to numpy array") + + +def get_user_data_dir(appname="matcha_tts"): + """ + Args: + appname (str): Name of application + + Returns: + Path: path to user data directory + """ + + MATCHA_HOME = os.environ.get("MATCHA_HOME") + if MATCHA_HOME is not None: + ans = Path(MATCHA_HOME).expanduser().resolve(strict=False) + elif sys.platform == "win32": + import winreg # pylint: disable=import-outside-toplevel + + key = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders", + ) + dir_, _ = winreg.QueryValueEx(key, "Local AppData") + ans = Path(dir_).resolve(strict=False) + elif sys.platform == "darwin": + ans = Path("~/Library/Application Support/").expanduser() + else: + ans = Path.home().joinpath(".local/share") + + final_path = ans.joinpath(appname) + final_path.mkdir(parents=True, exist_ok=True) + return final_path + + +def assert_model_downloaded(checkpoint_path, url, use_wget=True): + if Path(checkpoint_path).exists(): + log.debug(f"[+] Model already present at {checkpoint_path}!") + print(f"[+] Model already present at {checkpoint_path}!") + return + log.info(f"[-] Model not found at {checkpoint_path}! Will download it") + print(f"[-] Model not found at {checkpoint_path}! Will download it") + checkpoint_path = str(checkpoint_path) + if not use_wget: + gdown.download(url=url, output=checkpoint_path, quiet=False, fuzzy=True) + else: + wget.download(url=url, out=checkpoint_path) diff --git a/third_party/Matcha-TTS/notebooks/.gitkeep b/third_party/Matcha-TTS/notebooks/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/third_party/Matcha-TTS/pyproject.toml b/third_party/Matcha-TTS/pyproject.toml new file mode 100644 index 0000000..74aa393 --- /dev/null +++ b/third_party/Matcha-TTS/pyproject.toml @@ -0,0 +1,51 @@ +[build-system] +requires = ["setuptools", "wheel", "cython==0.29.35", "numpy==1.24.3", "packaging"] + +[tool.black] +line-length = 120 +target-version = ['py310'] +exclude = ''' + +( + /( + \.eggs # exclude a few common directories in the + | \.git # root of the project + | \.hg + | \.mypy_cache + | \.tox + | \.venv + | _build + | buck-out + | build + | dist + )/ + | foo.py # also separately exclude a file named foo.py in + # the root of the project +) +''' + +[tool.pytest.ini_options] +addopts = [ + "--color=yes", + "--durations=0", + "--strict-markers", + "--doctest-modules", +] +filterwarnings = [ + "ignore::DeprecationWarning", + "ignore::UserWarning", +] +log_cli = "True" +markers = [ + "slow: slow tests", +] +minversion = "6.0" +testpaths = "tests/" + +[tool.coverage.report] +exclude_lines = [ + "pragma: nocover", + "raise NotImplementedError", + "raise NotImplementedError()", + "if __name__ == .__main__.:", +] diff --git a/third_party/Matcha-TTS/requirements.txt b/third_party/Matcha-TTS/requirements.txt new file mode 100644 index 0000000..3e14a53 --- /dev/null +++ b/third_party/Matcha-TTS/requirements.txt @@ -0,0 +1,45 @@ +# --------- pytorch --------- # +torch>=2.0.0 +torchvision>=0.15.0 +lightning>=2.0.0 +torchmetrics>=0.11.4 + +# --------- hydra --------- # +hydra-core==1.3.2 +hydra-colorlog==1.2.0 +hydra-optuna-sweeper==1.2.0 + +# --------- loggers --------- # +# wandb +# neptune-client +# mlflow +# comet-ml +# aim>=3.16.2 # no lower than 3.16.2, see https://github.com/aimhubio/aim/issues/2550 + +# --------- others --------- # +rootutils # standardizing the project root setup +pre-commit # hooks for applying linters on commit +rich # beautiful text formatting in terminal +pytest # tests +# sh # for running bash commands in some tests (linux/macos only) +phonemizer # phonemization of text +tensorboard +librosa +Cython +numpy +einops +inflect +Unidecode +scipy +torchaudio +matplotlib +pandas +conformer==0.3.2 +diffusers==0.25.0 +notebook +ipywidgets +gradio==3.43.2 +gdown +wget +seaborn +piper_phonemize diff --git a/third_party/Matcha-TTS/scripts/schedule.sh b/third_party/Matcha-TTS/scripts/schedule.sh new file mode 100644 index 0000000..44b3da1 --- /dev/null +++ b/third_party/Matcha-TTS/scripts/schedule.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Schedule execution of many runs +# Run from root folder with: bash scripts/schedule.sh + +python src/train.py trainer.max_epochs=5 logger=csv + +python src/train.py trainer.max_epochs=10 logger=csv diff --git a/third_party/Matcha-TTS/setup.py b/third_party/Matcha-TTS/setup.py new file mode 100644 index 0000000..80d4aac --- /dev/null +++ b/third_party/Matcha-TTS/setup.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +import os + +import numpy +from Cython.Build import cythonize +from setuptools import Extension, find_packages, setup + +exts = [ + Extension( + name="matcha.utils.monotonic_align.core", + sources=["matcha/utils/monotonic_align/core.pyx"], + ) +] + +with open("README.md", encoding="utf-8") as readme_file: + README = readme_file.read() + +cwd = os.path.dirname(os.path.abspath(__file__)) +with open(os.path.join(cwd, "matcha", "VERSION")) as fin: + version = fin.read().strip() + +setup( + name="matcha-tts", + version=version, + description="🍵 Matcha-TTS: A fast TTS architecture with conditional flow matching", + long_description=README, + long_description_content_type="text/markdown", + author="Shivam Mehta", + author_email="shivam.mehta25@gmail.com", + url="https://shivammehta25.github.io/Matcha-TTS", + install_requires=[str(r) for r in open(os.path.join(os.path.dirname(__file__), "requirements.txt"))], + include_dirs=[numpy.get_include()], + include_package_data=True, + packages=find_packages(exclude=["tests", "tests/*", "examples", "examples/*"]), + # use this to customize global commands available in the terminal after installing the package + entry_points={ + "console_scripts": [ + "matcha-data-stats=matcha.utils.generate_data_statistics:main", + "matcha-tts=matcha.cli:cli", + "matcha-tts-app=matcha.app:main", + ] + }, + ext_modules=cythonize(exts, language_level=3), + python_requires=">=3.9.0", +) diff --git a/third_party/Matcha-TTS/synthesis.ipynb b/third_party/Matcha-TTS/synthesis.ipynb new file mode 100644 index 0000000..dfbde30 --- /dev/null +++ b/third_party/Matcha-TTS/synthesis.ipynb @@ -0,0 +1,419 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "f37f4e3b-f764-4502-a6a2-6417bd9bfab9", + "metadata": {}, + "source": [ + "# Matcha-TTS: A fast TTS architecture with conditional flow matching\n", + "---\n", + "[Shivam Mehta](https://www.kth.se/profile/smehta), [Ruibo Tu](https://www.kth.se/profile/ruibo), [Jonas Beskow](https://www.kth.se/profile/beskow), [Éva Székely](https://www.kth.se/profile/szekely), and [Gustav Eje Henter](https://people.kth.se/~ghe/)\n", + "\n", + "We introduce Matcha-TTS, a new encoder-decoder architecture for speedy TTS acoustic modelling, trained using optimal-transport conditional flow matching (OT-CFM). This yields an ODE-based decoder capable of high output quality in fewer synthesis steps than models trained using score matching. Careful design choices additionally ensure each synthesis step is fast to run. The method is probabilistic, non-autoregressive, and learns to speak from scratch without external alignments. Compared to strong pre-trained baseline models, the Matcha-TTS system has the smallest memory footprint, rivals the speed of the fastest models on long utterances, and attains the highest mean opinion score in a listening test.\n", + "\n", + "Demo Page: https://shivammehta25.github.io/Matcha-TTS \\\n", + "Code: https://github.com/shivammehta25/Matcha-TTS\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "148f4bc0-c28e-4670-9a5e-4c7928ab8992", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "env: CUDA_VISIBLE_DEVICES=0\n" + ] + } + ], + "source": [ + "%env CUDA_VISIBLE_DEVICES=0" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "8d5876c0-b47e-4c80-9e9c-62550f81b64e", + "metadata": {}, + "outputs": [], + "source": [ + "import datetime as dt\n", + "from pathlib import Path\n", + "\n", + "import IPython.display as ipd\n", + "import numpy as np\n", + "import soundfile as sf\n", + "import torch\n", + "from tqdm.auto import tqdm\n", + "\n", + "# Hifigan imports\n", + "from matcha.hifigan.config import v1\n", + "from matcha.hifigan.denoiser import Denoiser\n", + "from matcha.hifigan.env import AttrDict\n", + "from matcha.hifigan.models import Generator as HiFiGAN\n", + "# Matcha imports\n", + "from matcha.models.matcha_tts import MatchaTTS\n", + "from matcha.text import sequence_to_text, text_to_sequence\n", + "from matcha.utils.model import denormalize\n", + "from matcha.utils.utils import get_user_data_dir, intersperse" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "b1a30306-588c-4f22-8d9b-e2676880b0e5", + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "%matplotlib inline\n", + "# This allows for real time code changes being reflected in the notebook, no need to restart the kernel" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "a312856b-01a9-4d75-a4c8-4666dffa0692", + "metadata": {}, + "outputs": [], + "source": [ + "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")" + ] + }, + { + "cell_type": "markdown", + "id": "88f3b3c3-d014-443b-84eb-e143cdec3e21", + "metadata": {}, + "source": [ + "## Filepaths" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "7640a4c1-44ce-447c-a8ff-45012fb7bddd", + "metadata": {}, + "outputs": [], + "source": [ + "MATCHA_CHECKPOINT = get_user_data_dir()/\"matcha_ljspeech.ckpt\"\n", + "HIFIGAN_CHECKPOINT = get_user_data_dir() / \"hifigan_T2_v1\"\n", + "OUTPUT_FOLDER = \"synth_output\"" + ] + }, + { + "cell_type": "markdown", + "id": "6477a3a9-71f2-4d2f-bb86-bdf3e31c2461", + "metadata": {}, + "source": [ + "## Load Matcha-TTS" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "26a16230-04ba-4825-a844-2fb5ab945e24", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model loaded! Parameter count: 18,204,193\n" + ] + } + ], + "source": [ + "def load_model(checkpoint_path):\n", + " model = MatchaTTS.load_from_checkpoint(checkpoint_path, map_location=device)\n", + " model.eval()\n", + " return model\n", + "count_params = lambda x: f\"{sum(p.numel() for p in x.parameters()):,}\"\n", + "\n", + "\n", + "model = load_model(MATCHA_CHECKPOINT)\n", + "print(f\"Model loaded! Parameter count: {count_params(model)}\")" + ] + }, + { + "cell_type": "markdown", + "id": "3077b84b-e3b6-42e1-a84b-2f7084b13f92", + "metadata": {}, + "source": [ + "## Load HiFi-GAN (Vocoder)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "f6b68184-968d-4868-9029-f0c40e9e68af", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Removing weight norm...\n" + ] + } + ], + "source": [ + "def load_vocoder(checkpoint_path):\n", + " h = AttrDict(v1)\n", + " hifigan = HiFiGAN(h).to(device)\n", + " hifigan.load_state_dict(torch.load(checkpoint_path, map_location=device)['generator'])\n", + " _ = hifigan.eval()\n", + " hifigan.remove_weight_norm()\n", + " return hifigan\n", + "\n", + "vocoder = load_vocoder(HIFIGAN_CHECKPOINT)\n", + "denoiser = Denoiser(vocoder, mode='zeros')" + ] + }, + { + "cell_type": "markdown", + "id": "4cbc2ba0-09ff-40e2-9e60-6b77b534f9fb", + "metadata": {}, + "source": [ + "### Helper functions to synthesise" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "880a1879-24fd-4757-849c-850339120796", + "metadata": {}, + "outputs": [], + "source": [ + "@torch.inference_mode()\n", + "def process_text(text: str):\n", + " x = torch.tensor(intersperse(text_to_sequence(text, ['english_cleaners2']), 0),dtype=torch.long, device=device)[None]\n", + " x_lengths = torch.tensor([x.shape[-1]],dtype=torch.long, device=device)\n", + " x_phones = sequence_to_text(x.squeeze(0).tolist())\n", + " return {\n", + " 'x_orig': text,\n", + " 'x': x,\n", + " 'x_lengths': x_lengths,\n", + " 'x_phones': x_phones\n", + " }\n", + "\n", + "\n", + "@torch.inference_mode()\n", + "def synthesise(text, spks=None):\n", + " text_processed = process_text(text)\n", + " start_t = dt.datetime.now()\n", + " output = model.synthesise(\n", + " text_processed['x'], \n", + " text_processed['x_lengths'],\n", + " n_timesteps=n_timesteps,\n", + " temperature=temperature,\n", + " spks=spks,\n", + " length_scale=length_scale\n", + " )\n", + " # merge everything to one dict \n", + " output.update({'start_t': start_t, **text_processed})\n", + " return output\n", + "\n", + "@torch.inference_mode()\n", + "def to_waveform(mel, vocoder):\n", + " audio = vocoder(mel).clamp(-1, 1)\n", + " audio = denoiser(audio.squeeze(0), strength=0.00025).cpu().squeeze()\n", + " return audio.cpu().squeeze()\n", + " \n", + "def save_to_folder(filename: str, output: dict, folder: str):\n", + " folder = Path(folder)\n", + " folder.mkdir(exist_ok=True, parents=True)\n", + " np.save(folder / f'{filename}', output['mel'].cpu().numpy())\n", + " sf.write(folder / f'{filename}.wav', output['waveform'], 22050, 'PCM_24')" + ] + }, + { + "cell_type": "markdown", + "id": "78f857e3-2ef7-4c86-b776-596c4d3cf875", + "metadata": {}, + "source": [ + "## Setup text to synthesise" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "2e0a9acd-0845-4192-ba09-b9683e28a3ac", + "metadata": {}, + "outputs": [], + "source": [ + "texts = [\n", + " \"The Secret Service believed that it was very doubtful that any President would ride regularly in a vehicle with a fixed top, even though transparent.\"\n", + "]" + ] + }, + { + "cell_type": "markdown", + "id": "a9da9e2d-99b9-4c6f-8a08-c828e2cba121", + "metadata": {}, + "source": [ + "### Hyperparameters" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "f0d216e5-4895-4da8-9d24-9e61021d2556", + "metadata": {}, + "outputs": [], + "source": [ + "## Number of ODE Solver steps\n", + "n_timesteps = 10\n", + "\n", + "## Changes to the speaking rate\n", + "length_scale=1.0\n", + "\n", + "## Sampling temperature\n", + "temperature = 0.667" + ] + }, + { + "cell_type": "markdown", + "id": "b93aac89-c7f8-4975-8510-4e763c9689f4", + "metadata": {}, + "source": [ + "## Synthesis" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "5a227963-aa12-43b9-a706-1168b6fc0ba5", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8342d12401c54017b0e19b8d293a06bf", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1 [00:00\n", + " \n", + " Your browser does not support the audio element.\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Number of ODE steps: 10\n", + "Mean RTF:\t\t\t\t0.017228 ± 0.000000\n", + "Mean RTF Waveform (incl. vocoder):\t0.021445 ± 0.000000\n" + ] + } + ], + "source": [ + "outputs, rtfs = [], []\n", + "rtfs_w = []\n", + "for i, text in enumerate(tqdm(texts)):\n", + " output = synthesise(text) #, torch.tensor([15], device=device, dtype=torch.long).unsqueeze(0))\n", + " output['waveform'] = to_waveform(output['mel'], vocoder)\n", + "\n", + " # Compute Real Time Factor (RTF) with HiFi-GAN\n", + " t = (dt.datetime.now() - output['start_t']).total_seconds()\n", + " rtf_w = t * 22050 / (output['waveform'].shape[-1])\n", + "\n", + " ## Pretty print\n", + " print(f\"{'*' * 53}\")\n", + " print(f\"Input text - {i}\")\n", + " print(f\"{'-' * 53}\")\n", + " print(output['x_orig'])\n", + " print(f\"{'*' * 53}\")\n", + " print(f\"Phonetised text - {i}\")\n", + " print(f\"{'-' * 53}\")\n", + " print(output['x_phones'])\n", + " print(f\"{'*' * 53}\")\n", + " print(f\"RTF:\\t\\t{output['rtf']:.6f}\")\n", + " print(f\"RTF Waveform:\\t{rtf_w:.6f}\")\n", + " rtfs.append(output['rtf'])\n", + " rtfs_w.append(rtf_w)\n", + "\n", + " ## Display the synthesised waveform\n", + " ipd.display(ipd.Audio(output['waveform'], rate=22050))\n", + "\n", + " ## Save the generated waveform\n", + " save_to_folder(i, output, OUTPUT_FOLDER)\n", + "\n", + "print(f\"Number of ODE steps: {n_timesteps}\")\n", + "print(f\"Mean RTF:\\t\\t\\t\\t{np.mean(rtfs):.6f} ± {np.std(rtfs):.6f}\")\n", + "print(f\"Mean RTF Waveform (incl. vocoder):\\t{np.mean(rtfs_w):.6f} ± {np.std(rtfs_w):.6f}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e3e85c3f-1623-4647-b40c-fa96907656fc", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tools/extract_embedding.py b/tools/extract_embedding.py new file mode 100644 index 0000000..02fa2f6 --- /dev/null +++ b/tools/extract_embedding.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import torch +import torchaudio +from tqdm import tqdm +import onnxruntime +import torchaudio.compliance.kaldi as kaldi + + +def main(args): + utt2wav, utt2spk = {}, {} + with open('{}/wav.scp'.format(args.dir)) as f: + for l in f: + l = l.replace('\n', '').split() + utt2wav[l[0]] = l[1] + with open('{}/utt2spk'.format(args.dir)) as f: + for l in f: + l = l.replace('\n', '').split() + utt2spk[l[0]] = l[1] + + option = onnxruntime.SessionOptions() + option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + option.intra_op_num_threads = 1 + providers = ["CPUExecutionProvider"] + ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers) + + utt2embedding, spk2embedding = {}, {} + for utt in tqdm(utt2wav.keys()): + audio, sample_rate = torchaudio.load(utt2wav[utt]) + if sample_rate != 16000: + audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio) + feat = kaldi.fbank(audio, + num_mel_bins=80, + dither=0, + sample_frequency=16000) + feat = feat - feat.mean(dim=0, keepdim=True) + embedding = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.unsqueeze(dim=0).cpu().numpy()})[0].flatten().tolist() + utt2embedding[utt] = embedding + spk = utt2spk[utt] + if spk not in spk2embedding: + spk2embedding[spk] = [] + spk2embedding[spk].append(embedding) + + torch.save(utt2embedding, '{}/utt2embedding.pt'.format(args.dir)) + torch.save(spk2embedding, '{}/spk2embedding.pt'.format(args.dir)) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--dir', + type=str) + parser.add_argument('--onnx_path', + type=str) + args = parser.parse_args() + main(args) diff --git a/tools/extract_speech_token.py b/tools/extract_speech_token.py new file mode 100644 index 0000000..fac0b0b --- /dev/null +++ b/tools/extract_speech_token.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import logging +import torch +from tqdm import tqdm +import onnxruntime +import numpy as np +import torchaudio +import whisper + + +def main(args): + utt2wav = {} + with open('{}/wav.scp'.format(args.dir)) as f: + for l in f: + l = l.replace('\n', '').split() + utt2wav[l[0]] = l[1] + + option = onnxruntime.SessionOptions() + option.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_ENABLE_ALL + option.intra_op_num_threads = 1 + providers = ["CUDAExecutionProvider"] + ort_session = onnxruntime.InferenceSession(args.onnx_path, sess_options=option, providers=providers) + + utt2speech_token = {} + for utt in tqdm(utt2wav.keys()): + audio, sample_rate = torchaudio.load(utt2wav[utt]) + if sample_rate != 16000: + audio = torchaudio.transforms.Resample(orig_freq=sample_rate, new_freq=16000)(audio) + if audio.shape[1] / 16000 > 30: + logging.warning('do not support extract speech token for audio longer than 30s') + speech_token = [] + else: + feat = whisper.log_mel_spectrogram(audio, n_mels=128) + speech_token = ort_session.run(None, {ort_session.get_inputs()[0].name: feat.detach().cpu().numpy(), + ort_session.get_inputs()[1].name: np.array([feat.shape[2]], dtype=np.int32)})[0].flatten().tolist() + utt2speech_token[utt] = speech_token + torch.save(utt2speech_token, '{}/utt2speech_token.pt'.format(args.dir)) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--dir', + type=str) + parser.add_argument('--onnx_path', + type=str) + args = parser.parse_args() + main(args) diff --git a/tools/make_parquet_list.py b/tools/make_parquet_list.py new file mode 100644 index 0000000..399d9a6 --- /dev/null +++ b/tools/make_parquet_list.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import argparse +import logging +import os +import json +from tqdm import tqdm +import pandas as pd +import multiprocessing +import time +import torch + + +def job(utt_list, parquet_file, utt2parquet_file, spk2parquet_file): + start_time = time.time() + data_list = [] + for utt in tqdm(utt_list): + data = open(utt2wav[utt], 'rb').read() + data_list.append(data) + wav_list = [utt2wav[utt] for utt in utt_list] + text_list = [utt2text[utt] for utt in utt_list] + spk_list = [utt2spk[utt] for utt in utt_list] + uttembedding_list = [utt2embedding[utt] for utt in utt_list] + spkembedding_list = [spk2embedding[utt2spk[utt]] for utt in utt_list] + speech_token_list = [utt2speech_token[utt] for utt in utt_list] + + # 保存到parquet,utt2parquet_file,spk2parquet_file + df = pd.DataFrame() + df['utt'] = utt_list + df['wav'] = wav_list + df['audio_data'] = data_list + df['text'] = text_list + df['spk'] = spk_list + df['utt_embedding'] = uttembedding_list + df['spk_embedding'] = spkembedding_list + df['speech_token'] = speech_token_list + df.to_parquet(parquet_file) + with open(utt2parquet_file, 'w') as f: + json.dump({k: parquet_file for k in utt_list}, f, ensure_ascii=False, indent=2) + with open(spk2parquet_file, 'w') as f: + json.dump({k: parquet_file for k in list(set(spk_list))}, f, ensure_ascii=False, indent=2) + logging.info('spend time {}'.format(time.time() - start_time)) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument('--num_utts_per_parquet', + type=int, + default=1000, + help='num utts per parquet') + parser.add_argument('--num_processes', + type=int, + default=1, + help='num processes for make parquets') + parser.add_argument('--src_dir', + type=str) + parser.add_argument('--des_dir', + type=str) + args = parser.parse_args() + + utt2wav, utt2text, utt2spk = {}, {}, {} + with open('{}/wav.scp'.format(args.src_dir)) as f: + for l in f: + l = l.replace('\n', '').split() + utt2wav[l[0]] = l[1] + with open('{}/text'.format(args.src_dir)) as f: + for l in f: + l = l.replace('\n', '').split() + utt2text[l[0]] = ' '.join(l[1:]) + with open('{}/utt2spk'.format(args.src_dir)) as f: + for l in f: + l = l.replace('\n', '').split() + utt2spk[l[0]] = l[1] + utt2embedding = torch.load('{}/utt2embedding.pt'.format(args.src_dir)) + spk2embedding = torch.load('{}/spk2embedding.pt'.format(args.src_dir)) + utt2speech_token = torch.load('{}/utt2speech_token.pt'.format(args.src_dir)) + utts = list(utt2wav.keys()) + + # Using process pool to speedup + pool = multiprocessing.Pool(processes=args.num_processes) + parquet_list, utt2parquet_list, spk2parquet_list = [], [], [] + for i, j in enumerate(range(0, len(utts), args.num_utts_per_parquet)): + parquet_file = os.path.join(args.des_dir, 'parquet_{:09d}.tar'.format(i)) + utt2parquet_file = os.path.join(args.des_dir, 'utt2parquet_{:09d}.json'.format(i)) + spk2parquet_file = os.path.join(args.des_dir, 'spk2parquet_{:09d}.json'.format(i)) + parquet_list.append(parquet_file) + utt2parquet_list.append(utt2parquet_file) + spk2parquet_list.append(spk2parquet_file) + pool.apply_async(job, (utts[j: j + args.num_utts_per_parquet], parquet_file, utt2parquet_file, spk2parquet_file)) + pool.close() + pool.join() + + with open('{}/data.list'.format(args.des_dir), 'w', encoding='utf8') as f1, \ + open('{}/utt2data.list'.format(args.des_dir), 'w', encoding='utf8') as f2, \ + open('{}/spk2data.list'.format(args.des_dir), 'w', encoding='utf8') as f3: + for name in parquet_list: + f1.write(name + '\n') + for name in utt2parquet_list: + f2.write(name + '\n') + for name in spk2parquet_list: + f3.write(name + '\n') diff --git a/webui.py b/webui.py new file mode 100644 index 0000000..d2a6e86 --- /dev/null +++ b/webui.py @@ -0,0 +1,186 @@ +# Copyright (c) 2024 Alibaba Inc (authors: Xiang Lyu) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import os +import sys +ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append('{}/third_party/AcademiCodec'.format(ROOT_DIR)) +sys.path.append('{}/third_party/Matcha-TTS'.format(ROOT_DIR)) + +import argparse +import gradio as gr +import numpy as np +import torch +import torchaudio +import random +import librosa + +import logging +logging.getLogger('matplotlib').setLevel(logging.WARNING) + +from cosyvoice.cli.cosyvoice import CosyVoice +from cosyvoice.utils.file_utils import load_wav + +logging.basicConfig(level=logging.DEBUG, + format='%(asctime)s %(levelname)s %(message)s') + +def generate_seed(): + seed = random.randint(1, 100000000) + return { + "__type__": "update", + "value": seed + } + +def set_all_random_seed(seed): + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + torch.cuda.manual_seed_all(seed) + +max_val = 0.8 +def postprocess(speech, top_db=60, hop_length=220, win_length=440): + speech, _ = librosa.effects.trim( + speech, top_db=top_db, + frame_length=win_length, + hop_length=hop_length + ) + if speech.abs().max() > max_val: + speech = speech / speech.abs().max() * max_val + speech = torch.concat([speech, torch.zeros(1, int(target_sr * 0.2))], dim=1) + return speech + +inference_mode_list = ['预训练音色', '3s极速复刻', '跨语种复刻', '自然语言控制'] +instruct_dict = {'预训练音色': '1. 选择预训练音色\n2.点击生成音频按钮', + '3s极速复刻': '1. 选择prompt音频文件,或录入prompt音频,若同时提供,优先选择prompt音频文件\n2. 输入prompt文本\n3.点击生成音频按钮', + '跨语种复刻': '1. 选择prompt音频文件,或录入prompt音频,若同时提供,优先选择prompt音频文件\n2.点击生成音频按钮', + '自然语言控制': '1. 输入instruct文本\n2.点击生成音频按钮'} +def change_instruction(mode_checkbox_group): + return instruct_dict[mode_checkbox_group] + +def generate_audio(tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed): + if prompt_wav_upload is not None: + prompt_wav = prompt_wav_upload + elif prompt_wav_record is not None: + prompt_wav = prompt_wav_record + else: + prompt_wav = None + # if instruct mode, please make sure that model is speech_tts/CosyVoice-300M-Instruct and not cross_lingual mode + if mode_checkbox_group in ['自然语言控制']: + if cosyvoice.frontend.instruct is False: + gr.Warning('您正在使用自然语言控制模式, {}模型不支持此模式, 请使用speech_tts/CosyVoice-300M-Instruct模型'.format(args.model_dir)) + return (target_sr, default_data) + if instruct_text == '': + gr.Warning('您正在使用自然语言控制模式, 请输入instruct文本') + return (target_sr, default_data) + if prompt_wav is not None or prompt_text != '': + gr.Info('您正在使用自然语言控制模式, prompt音频/prompt文本会被忽略') + # if cross_lingual mode, please make sure that model is speech_tts/CosyVoice-300M and tts_text prompt_text are different language + if mode_checkbox_group in ['跨语种复刻']: + if cosyvoice.frontend.instruct is True: + gr.Warning('您正在使用跨语种复刻模式, {}模型不支持此模式, 请使用speech_tts/CosyVoice-300M模型'.format(args.model_dir)) + return (target_sr, default_data) + if instruct_text != '': + gr.Info('您正在使用跨语种复刻模式, instruct文本会被忽略') + if prompt_wav is None: + gr.Warning('您正在使用跨语种复刻模式, 请提供prompt音频') + return (target_sr, default_data) + gr.Info('您正在使用跨语种复刻模式, 请确保合成文本和prompt文本为不同语言') + # if in zero_shot cross_lingual, please make sure that prompt_text and prompt_wav meets requirements + if mode_checkbox_group in ['3s极速复刻', '跨语种复刻']: + if prompt_wav is None: + gr.Warning('prompt音频为空,您是否忘记输入prompt音频?') + return (target_sr, default_data) + if torchaudio.info(prompt_wav).sample_rate < prompt_sr: + gr.Warning('prompt音频采样率{}低于{}'.format(torchaudio.info(prompt_wav).sample_rate, prompt_sr)) + return (target_sr, default_data) + # sft mode only use sft_dropdown + if mode_checkbox_group in ['预训练音色']: + if instruct_text != '' or prompt_wav is not None or prompt_text != '': + gr.Info('您正在使用预训练音色模式,prompt文本/prompt音频/instruct文本会被忽略!') + # zero_shot mode only use prompt_wav prompt text + if mode_checkbox_group in ['3s极速复刻']: + if prompt_text == '': + gr.Warning('prompt文本为空,您是否忘记输入prompt文本?') + return (target_sr, default_data) + if instruct_text != '': + gr.Info('您正在使用3s极速复刻模式,预训练音色/instruct文本会被忽略!') + + if mode_checkbox_group == '预训练音色': + logging.info('get sft inference request') + set_all_random_seed(seed) + output = cosyvoice.inference_sft(tts_text, sft_dropdown) + elif mode_checkbox_group == '3s极速复刻': + logging.info('get zero_shot inference request') + prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) + set_all_random_seed(seed) + output = cosyvoice.inference_zero_shot(tts_text, prompt_text, prompt_speech_16k) + elif mode_checkbox_group == '跨语种复刻': + logging.info('get cross_lingual inference request') + prompt_speech_16k = postprocess(load_wav(prompt_wav, prompt_sr)) + set_all_random_seed(seed) + output = cosyvoice.inference_cross_lingual(tts_text, prompt_speech_16k) + else: + logging.info('get instruct inference request') + set_all_random_seed(seed) + output = cosyvoice.inference_instruct(tts_text, sft_dropdown, instruct_text) + audio_data = output['tts_speech'].numpy().flatten() + return (target_sr, audio_data) + +def main(): + with gr.Blocks() as demo: + gr.Markdown("### 代码库 [CosyVoice](https://github.com/FunAudioLLM/CosyVoice) 预训练模型 [CosyVoice-300M](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M) [CosyVoice-300M-Instruct](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M-Instruct) [CosyVoice-300M-SFT](https://www.modelscope.cn/models/speech_tts/CosyVoice-300M-SFT)") + gr.Markdown("#### 请输入需要合成的文本,选择推理模式,并按照提示步骤进行操作") + + tts_text = gr.Textbox(label="输入合成文本", lines=1, value="我是通义实验室语音团队全新推出的生成式语音大模型,提供舒适自然的语音合成能力。") + + with gr.Row(): + mode_checkbox_group = gr.Radio(choices=inference_mode_list, label='选择推理模式', value=inference_mode_list[0]) + instruction_text = gr.Text(label="操作步骤", value=instruct_dict[inference_mode_list[0]], scale=0.5) + sft_dropdown = gr.Dropdown(choices=sft_spk, label='选择预训练音色', value=sft_spk[0], scale=0.25) + with gr.Column(scale=0.25): + seed_button = gr.Button(value="\U0001F3B2") + seed = gr.Number(value=0, label="随机推理种子") + + with gr.Row(): + prompt_wav_upload = gr.Audio(sources='upload', type='filepath', label='选择prompt音频文件,注意采样率不低于16khz') + prompt_wav_record = gr.Audio(sources='microphone', type='filepath', label='录制prompt音频文件') + prompt_text = gr.Textbox(label="输入prompt文本", lines=1, placeholder="请输入prompt文本,需与prompt音频内容一致,暂时不支持自动识别...", value='') + instruct_text = gr.Textbox(label="输入instruct文本", lines=1, placeholder="请输入instruct文本.", value='') + + generate_button = gr.Button("生成音频") + + audio_output = gr.Audio(label="合成音频") + + seed_button.click(generate_seed, inputs=[], outputs=seed) + generate_button.click(generate_audio, + inputs=[tts_text, mode_checkbox_group, sft_dropdown, prompt_text, prompt_wav_upload, prompt_wav_record, instruct_text, seed], + outputs=[audio_output]) + mode_checkbox_group.change(fn=change_instruction, inputs=[mode_checkbox_group], outputs=[instruction_text]) + demo.queue(max_size=4, default_concurrency_limit=2) + demo.launch(server_port=args.port,inbrowser=True) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--port', + type=int, + default=8000) + parser.add_argument('--model_dir', + type=str, + default='speech_tts/CosyVoice-300M', + help='local path or modelscope repo id') + args = parser.parse_args() + cosyvoice = CosyVoice(args.model_dir) + sft_spk = cosyvoice.list_avaliable_spks() + prompt_sr, target_sr = 16000, 22050 + default_data = np.zeros(target_sr) + main() diff --git "a/\350\277\220\350\241\214-CosyVoice-300M-Instruct.bat" "b/\350\277\220\350\241\214-CosyVoice-300M-Instruct.bat" new file mode 100644 index 0000000..bb49110 --- /dev/null +++ "b/\350\277\220\350\241\214-CosyVoice-300M-Instruct.bat" @@ -0,0 +1,17 @@ +SET PYTHON_PATH=%cd%\py311\ +rem overriding default python env vars in order not to interfere with any system python installation +SET PYTHONHOME= +SET PYTHONPATH= +SET PYTHONEXECUTABLE=%PYTHON_PATH%\python.exe +SET PYTHONWEXECUTABLE=%PYTHON_PATH%pythonw.exe +SET PYTHON_EXECUTABLE=%PYTHON_PATH%\python.exe +SET PYTHONW_EXECUTABLE=%PYTHON_PATH%pythonw.exe +SET PYTHON_BIN_PATH=%PYTHON_EXECUTABLE% +SET PYTHON_LIB_PATH=%PYTHON_PATH%\Lib\site-packages +SET FFMPEG_PATH=%cd%\py311\ffmpeg\bin +SET PATH=%PYTHON_PATH%;%PYTHON_PATH%\Scripts;%FFMPEG_PATH%;%PATH% +set HF_ENDPOINT=https://hf-mirror.com +set HF_HOME=%CD%\hf_download +set PYTHONPATH=third_party/AcademiCodec;third_party/Matcha-TTS +"%PYTHON_EXECUTABLE%" webui.py --port 9886 --model_dir ./pretrained_models/CosyVoice-300M-Instruct +pause diff --git "a/\350\277\220\350\241\214-CosyVoice-300M.bat" "b/\350\277\220\350\241\214-CosyVoice-300M.bat" new file mode 100644 index 0000000..2403f39 --- /dev/null +++ "b/\350\277\220\350\241\214-CosyVoice-300M.bat" @@ -0,0 +1,17 @@ +SET PYTHON_PATH=%cd%\py311\ +rem overriding default python env vars in order not to interfere with any system python installation +SET PYTHONHOME= +SET PYTHONPATH= +SET PYTHONEXECUTABLE=%PYTHON_PATH%\python.exe +SET PYTHONWEXECUTABLE=%PYTHON_PATH%pythonw.exe +SET PYTHON_EXECUTABLE=%PYTHON_PATH%\python.exe +SET PYTHONW_EXECUTABLE=%PYTHON_PATH%pythonw.exe +SET PYTHON_BIN_PATH=%PYTHON_EXECUTABLE% +SET PYTHON_LIB_PATH=%PYTHON_PATH%\Lib\site-packages +SET FFMPEG_PATH=%cd%\py311\ffmpeg\bin +SET PATH=%PYTHON_PATH%;%PYTHON_PATH%\Scripts;%FFMPEG_PATH%;%PATH% +set HF_ENDPOINT=https://hf-mirror.com +set HF_HOME=%CD%\hf_download +set PYTHONPATH=third_party/AcademiCodec;third_party/Matcha-TTS +"%PYTHON_EXECUTABLE%" webui.py --port 9886 --model_dir ./pretrained_models/CosyVoice-300M +pause