Blane187 commited on
Commit
1e43516
1 Parent(s): 3d56a8f

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .env +9 -0
  2. .github/PULL_REQUEST_TEMPLATE.md +26 -0
  3. .github/workflows/docker.yml +70 -0
  4. .github/workflows/genlocale.yml +38 -0
  5. .github/workflows/pull_format.yml +48 -0
  6. .github/workflows/push_format.yml +52 -0
  7. .github/workflows/sync_dev.yml +23 -0
  8. .github/workflows/unitest.yml +36 -0
  9. .gitignore +28 -0
  10. CONTRIBUTING.md +11 -0
  11. Dockerfile +47 -0
  12. LICENSE +23 -0
  13. MIT协议暨相关引用库协议 +45 -0
  14. README.md +213 -3
  15. Retrieval_based_Voice_Conversion_WebUI.ipynb +403 -0
  16. Retrieval_based_Voice_Conversion_WebUI_v2.ipynb +422 -0
  17. a.png +0 -0
  18. api_231006.py +440 -0
  19. api_240604.py +565 -0
  20. assets/Synthesizer_inputs.pth +3 -0
  21. assets/hubert/.gitignore +3 -0
  22. assets/hubert/hubert_inputs.pth +3 -0
  23. assets/indices/.gitignore +2 -0
  24. assets/pretrained/.gitignore +2 -0
  25. assets/pretrained_v2/.gitignore +2 -0
  26. assets/pretrained_v2/f0D32k.pth +3 -0
  27. assets/pretrained_v2/f0G32k.pth +3 -0
  28. assets/rmvpe/.gitignore +3 -0
  29. assets/rmvpe/rmvpe_inputs.pth +3 -0
  30. assets/uvr5_weights/.gitignore +2 -0
  31. audios/astronauts.mp3 +0 -0
  32. audios/somegirl.mp3 +0 -0
  33. audios/someguy.mp3 +0 -0
  34. audios/unachica.mp3 +0 -0
  35. audios/unchico.mp3 +0 -0
  36. configs/config.json +1 -0
  37. configs/config.py +254 -0
  38. configs/inuse/.gitignore +4 -0
  39. configs/inuse/v1/.gitignore +2 -0
  40. configs/inuse/v2/.gitignore +2 -0
  41. configs/v1/32k.json +46 -0
  42. configs/v1/40k.json +46 -0
  43. configs/v1/48k.json +46 -0
  44. configs/v2/32k.json +46 -0
  45. configs/v2/48k.json +46 -0
  46. demo.py +439 -0
  47. docker-compose.yml +20 -0
  48. docs/cn/Changelog_CN.md +109 -0
  49. docs/cn/faq.md +108 -0
  50. docs/en/Changelog_EN.md +105 -0
.env ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ OPENBLAS_NUM_THREADS = 1
2
+ no_proxy = localhost, 127.0.0.1, ::1
3
+
4
+ # You can change the location of the model, etc. by changing here
5
+ weight_root = assets/weights
6
+ weight_uvr5_root = assets/uvr5_weights
7
+ index_root = logs
8
+ outside_index_root = assets/indices
9
+ rmvpe_root = assets/rmvpe
.github/PULL_REQUEST_TEMPLATE.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pull request checklist
2
+
3
+ - [ ] The PR has a proper title. Use [Semantic Commit Messages](https://seesparkbox.com/foundry/semantic_commit_messages). (No more branch-name title please)
4
+ - [ ] Make sure this is ready to be merged into the relevant branch. Please don't create a PR and let it hang for a few days.
5
+ - [ ] Ensure you can run the codes you submitted succesfully. These submissions will be prioritized for review:
6
+
7
+ Introduce improvements in program execution speed;
8
+
9
+ Introduce improvements in synthesis quality;
10
+
11
+ Fix existing bugs reported by user feedback (or you met);
12
+
13
+ Introduce more convenient user operations.
14
+
15
+ # PR type
16
+
17
+ - Bug fix / new feature / synthesis quality improvement / program execution speed improvement
18
+
19
+ # Description
20
+
21
+ - Describe what this pull request is for.
22
+ - What will it affect.
23
+
24
+ # Screenshot
25
+
26
+ - Please include a screenshot if applicable
.github/workflows/docker.yml ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Build and Push Docker Image
2
+
3
+ on:
4
+ workflow_dispatch:
5
+ push:
6
+ # Sequence of patterns matched against refs/tags
7
+ tags:
8
+ - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10
9
+
10
+ jobs:
11
+ build:
12
+ runs-on: ubuntu-latest
13
+ permissions:
14
+ packages: write
15
+ contents: read
16
+ steps:
17
+ - uses: actions/checkout@v3
18
+ - name: Set time zone
19
+ uses: szenius/[email protected]
20
+ with:
21
+ timezoneLinux: "Asia/Shanghai"
22
+ timezoneMacos: "Asia/Shanghai"
23
+ timezoneWindows: "China Standard Time"
24
+
25
+ # # 如果有 dockerhub 账户,可以在github的secrets中配置下面两个,然后取消下面注释的这几行,并在meta步骤的images增加一行 ${{ github.repository }}
26
+ # - name: Login to DockerHub
27
+ # uses: docker/login-action@v1
28
+ # with:
29
+ # username: ${{ secrets.DOCKERHUB_USERNAME }}
30
+ # password: ${{ secrets.DOCKERHUB_TOKEN }}
31
+
32
+ - name: Login to GHCR
33
+ uses: docker/login-action@v2
34
+ with:
35
+ registry: ghcr.io
36
+ username: ${{ github.repository_owner }}
37
+ password: ${{ secrets.GITHUB_TOKEN }}
38
+
39
+ - name: Extract metadata (tags, labels) for Docker
40
+ id: meta
41
+ uses: docker/metadata-action@v4
42
+ with:
43
+ images: |
44
+ ghcr.io/${{ github.repository }}
45
+ # generate Docker tags based on the following events/attributes
46
+ # nightly, master, pr-2, 1.2.3, 1.2, 1
47
+ tags: |
48
+ type=schedule,pattern=nightly
49
+ type=edge
50
+ type=ref,event=branch
51
+ type=ref,event=pr
52
+ type=semver,pattern={{version}}
53
+ type=semver,pattern={{major}}.{{minor}}
54
+ type=semver,pattern={{major}}
55
+
56
+ - name: Set up QEMU
57
+ uses: docker/setup-qemu-action@v2
58
+
59
+ - name: Set up Docker Buildx
60
+ uses: docker/setup-buildx-action@v2
61
+
62
+ - name: Build and push
63
+ id: docker_build
64
+ uses: docker/build-push-action@v4
65
+ with:
66
+ context: .
67
+ platforms: linux/amd64,linux/arm64
68
+ push: true
69
+ tags: ${{ steps.meta.outputs.tags }}
70
+ labels: ${{ steps.meta.outputs.labels }}
.github/workflows/genlocale.yml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Generate and Sync Locale
2
+ on:
3
+ push:
4
+ branches:
5
+ - main
6
+ - dev
7
+ jobs:
8
+ genlocale:
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@master
12
+
13
+ - name: Run locale generation
14
+ run: |
15
+ python3 i18n/scan_i18n.py
16
+ cd i18n
17
+ python3 locale_diff.py
18
+
19
+ - name: Commit back
20
+ if: ${{ !github.head_ref }}
21
+ id: commitback
22
+ continue-on-error: true
23
+ run: |
24
+ git config --local user.name 'github-actions[bot]'
25
+ git config --local user.email 'github-actions[bot]@users.noreply.github.com'
26
+ git add --all
27
+ git commit -m "chore(i18n): sync locale on ${{github.ref_name}}"
28
+
29
+ - name: Create Pull Request
30
+ if: steps.commitback.outcome == 'success'
31
+ continue-on-error: true
32
+ uses: peter-evans/create-pull-request@v5
33
+ with:
34
+ delete-branch: true
35
+ body: "Automatically sync i18n translation jsons"
36
+ title: "chore(i18n): sync locale on ${{github.ref_name}}"
37
+ commit-message: "chore(i18n): sync locale on ${{github.ref_name}}"
38
+ branch: genlocale-${{github.ref_name}}
.github/workflows/pull_format.yml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check Pull Format
2
+
3
+ on:
4
+ pull_request_target:
5
+ types: [opened, reopened]
6
+
7
+ jobs:
8
+ # This workflow closes invalid PR
9
+ close_pr:
10
+ # The type of runner that the job will run on
11
+ runs-on: ubuntu-latest
12
+ permissions: write-all
13
+
14
+ # Steps represent a sequence of tasks that will be executed as part of the job
15
+ steps:
16
+ - name: Close PR if it is not pointed to dev branch
17
+ if: github.event.pull_request.base.ref != 'dev'
18
+ uses: superbrothers/close-pull-request@v3
19
+ with:
20
+ # Optional. Post a issue comment just before closing a pull request.
21
+ comment: "Invalid PR to `non-dev` branch `${{ github.event.pull_request.base.ref }}`."
22
+
23
+ pull_format:
24
+ runs-on: ubuntu-latest
25
+ permissions:
26
+ contents: write
27
+
28
+ continue-on-error: true
29
+
30
+ steps:
31
+ - name: Checkout
32
+ continue-on-error: true
33
+ uses: actions/checkout@v3
34
+ with:
35
+ ref: ${{ github.head_ref }}
36
+ fetch-depth: 0
37
+
38
+ - name: Set up Python ${{ matrix.python-version }}
39
+ uses: actions/setup-python@v4
40
+ with:
41
+ python-version: ${{ matrix.python-version }}
42
+
43
+ - name: Install Black
44
+ run: pip install "black[jupyter]"
45
+
46
+ - name: Run Black
47
+ # run: black $(git ls-files '*.py')
48
+ run: black .
.github/workflows/push_format.yml ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Standardize Code Format
2
+
3
+ on:
4
+ push:
5
+ branches:
6
+ - main
7
+ - dev
8
+
9
+ jobs:
10
+ push_format:
11
+ runs-on: ubuntu-latest
12
+
13
+ permissions:
14
+ contents: write
15
+ pull-requests: write
16
+
17
+ steps:
18
+ - uses: actions/checkout@v3
19
+ with:
20
+ ref: ${{github.ref_name}}
21
+
22
+ - name: Set up Python ${{ matrix.python-version }}
23
+ uses: actions/setup-python@v4
24
+ with:
25
+ python-version: ${{ matrix.python-version }}
26
+
27
+ - name: Install Black
28
+ run: pip install "black[jupyter]"
29
+
30
+ - name: Run Black
31
+ # run: black $(git ls-files '*.py')
32
+ run: black .
33
+
34
+ - name: Commit Back
35
+ continue-on-error: true
36
+ id: commitback
37
+ run: |
38
+ git config --local user.email "github-actions[bot]@users.noreply.github.com"
39
+ git config --local user.name "github-actions[bot]"
40
+ git add --all
41
+ git commit -m "chore(format): run black on ${{github.ref_name}}"
42
+
43
+ - name: Create Pull Request
44
+ if: steps.commitback.outcome == 'success'
45
+ continue-on-error: true
46
+ uses: peter-evans/create-pull-request@v5
47
+ with:
48
+ delete-branch: true
49
+ body: "Automatically apply code formatter change"
50
+ title: "chore(format): run black on ${{github.ref_name}}"
51
+ commit-message: "chore(format): run black on ${{github.ref_name}}"
52
+ branch: formatter-${{github.ref_name}}
.github/workflows/sync_dev.yml ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Merge dev into main
2
+
3
+ on:
4
+ workflow_dispatch:
5
+
6
+ jobs:
7
+ sync_dev:
8
+ runs-on: ubuntu-latest
9
+
10
+ permissions:
11
+ contents: write
12
+ pull-requests: write
13
+
14
+ steps:
15
+ - uses: actions/checkout@v3
16
+ with:
17
+ ref: main
18
+
19
+ - name: Create Pull Request
20
+ run: |
21
+ gh pr create --title "chore(sync): merge dev into main" --body "Merge dev to main" --base main --head dev
22
+ env:
23
+ GH_TOKEN: ${{ github.token }}
.github/workflows/unitest.yml ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Unit Test
2
+ on: [ push, pull_request ]
3
+ jobs:
4
+ build:
5
+ runs-on: ${{ matrix.os }}
6
+ strategy:
7
+ matrix:
8
+ python-version: ["3.8", "3.9", "3.10"]
9
+ os: [ubuntu-latest]
10
+ fail-fast: true
11
+
12
+ steps:
13
+ - uses: actions/checkout@master
14
+ - name: Set up Python ${{ matrix.python-version }}
15
+ uses: actions/setup-python@v4
16
+ with:
17
+ python-version: ${{ matrix.python-version }}
18
+ - name: Install dependencies
19
+ run: |
20
+ sudo apt update
21
+ sudo apt -y install ffmpeg
22
+ sudo apt -y install -qq aria2
23
+ aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d ./ -o hubert_base.pt
24
+ python -m pip install --upgrade pip
25
+ python -m pip install --upgrade setuptools
26
+ python -m pip install --upgrade wheel
27
+ pip install torch torchvision torchaudio
28
+ pip install -r requirements.txt
29
+ - name: Test step 1 & 2
30
+ run: |
31
+ mkdir -p logs/mi-test
32
+ touch logs/mi-test/preprocess.log
33
+ python infer/modules/train/preprocess.py logs/mute/0_gt_wavs 48000 8 logs/mi-test True 3.7
34
+ touch logs/mi-test/extract_f0_feature.log
35
+ python infer/modules/train/extract/extract_f0_print.py logs/mi-test $(nproc) pm
36
+ python infer/modules/train/extract_feature_print.py cpu 1 0 0 logs/mi-test v1 True
.gitignore ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ __pycache__
3
+ /TEMP
4
+ *.pyd
5
+ .venv
6
+ /opt
7
+ tools/aria2c/
8
+ tools/flag.txt
9
+
10
+ # Imported from huggingface.co/lj1995/VoiceConversionWebUI
11
+ /pretrained
12
+ /pretrained_v2
13
+ /uvr5_weights
14
+ hubert_base.pt
15
+ rmvpe.onnx
16
+ rmvpe.pt
17
+
18
+ # Generated by RVC
19
+ /logs
20
+ /weights
21
+
22
+ # To set a Python version for the project
23
+ .tool-versions
24
+
25
+ /runtime
26
+ /assets/weights/*
27
+ ffmpeg.*
28
+ ffprobe.*
CONTRIBUTING.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 贡献规则
2
+ 1. 一般来说,作者`@RVC-Boss`将拒绝所有的算法更改,除非它是为了修复某个代码层面的错误或警告
3
+ 2. 您可以贡献本仓库的其他位置,如翻译和WebUI,但请尽量作最小更改
4
+ 3. 所有更改都需要由`@RVC-Boss`批准,因此您的PR可能会被搁置
5
+ 4. 由此带来的不便请您谅解
6
+
7
+ # Contributing Rules
8
+ 1. Generally, the author `@RVC-Boss` will reject all algorithm changes unless what is to fix a code-level error or warning.
9
+ 2. You can contribute to other parts of this repo like translations and WebUI, but please minimize your changes as much as possible.
10
+ 3. All changes need to be approved by `@RVC-Boss`, so your PR may be put on hold.
11
+ 4. Please accept our apologies for any inconvenience caused.
Dockerfile ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # syntax=docker/dockerfile:1
2
+
3
+ FROM nvidia/cuda:11.6.2-cudnn8-runtime-ubuntu20.04
4
+
5
+ EXPOSE 7865
6
+
7
+ WORKDIR /app
8
+
9
+ COPY . .
10
+
11
+ # Install dependenceis to add PPAs
12
+ RUN apt-get update && \
13
+ apt-get install -y -qq ffmpeg aria2 && apt clean && \
14
+ apt-get install -y software-properties-common && \
15
+ apt-get clean && \
16
+ rm -rf /var/lib/apt/lists/*
17
+
18
+ # Add the deadsnakes PPA to get Python 3.9
19
+ RUN add-apt-repository ppa:deadsnakes/ppa
20
+
21
+ # Install Python 3.9 and pip
22
+ RUN apt-get update && \
23
+ apt-get install -y build-essential python-dev python3-dev python3.9-distutils python3.9-dev python3.9 curl && \
24
+ apt-get clean && \
25
+ update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.9 1 && \
26
+ curl https://bootstrap.pypa.io/get-pip.py | python3.9
27
+
28
+ # Set Python 3.9 as the default
29
+ RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1
30
+
31
+ RUN python3 -m pip install --no-cache-dir -r requirements.txt
32
+
33
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d assets/pretrained_v2/ -o D40k.pth
34
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d assets/pretrained_v2/ -o G40k.pth
35
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d assets/pretrained_v2/ -o f0D40k.pth
36
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d assets/pretrained_v2/ -o f0G40k.pth
37
+
38
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d assets/uvr5_weights/ -o HP2-人声vocals+非人声instrumentals.pth
39
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d assets/uvr5_weights/ -o HP5-主旋律人声vocals+其他instrumentals.pth
40
+
41
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d assets/hubert -o hubert_base.pt
42
+
43
+ RUN aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d assets/rmvpe -o rmvpe.pt
44
+
45
+ VOLUME [ "/app/weights", "/app/opt" ]
46
+
47
+ CMD ["python3", "infer-web.py"]
LICENSE ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2023 liujing04
4
+ Copyright (c) 2023 源文雨
5
+ Copyright (c) 2023 Ftps
6
+
7
+ Permission is hereby granted, free of charge, to any person obtaining a copy
8
+ of this software and associated documentation files (the "Software"), to deal
9
+ in the Software without restriction, including without limitation the rights
10
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11
+ copies of the Software, and to permit persons to whom the Software is
12
+ furnished to do so, subject to the following conditions:
13
+
14
+ The above copyright notice and this permission notice shall be included in all
15
+ copies or substantial portions of the Software.
16
+
17
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
20
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23
+ SOFTWARE.
MIT协议暨相关引用库协议 ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 本软件及其相关代码以MIT协议开源,作者不对软件具备任何控制力,使用软件者、传播软件导出的声音者自负全责。
2
+ 如不认可该条款,则不能使用或引用软件包内任何代码和文件。
3
+
4
+ 特此授予任何获得本软件和相关文档文件(以下简称“软件”)副本的人免费使用、复制、修改、合并、出版、分发、再授权和/或销售本软件的权利,以及授予本软件所提供的人使用本软件的权利,但须符合以下条件:
5
+ 上述版权声明和本许可声明应包含在软件的所有副本或实质部分中。
6
+ 软件是“按原样”提供的,没有任何明示或暗示的保证,包括但不限于适销性、适用于特定目的和不侵权的保证。在任何情况下,作者或版权持有人均不承担因软件或软件的使用或其他交易而产生、产生或与之相关的任何索赔、损害赔偿或其他责任,无论是在合同诉讼、侵权诉讼还是其他诉讼中。
7
+
8
+
9
+ The LICENCEs for related libraries are as follows.
10
+ 相关引用库协议如下:
11
+
12
+ ContentVec
13
+ https://github.com/auspicious3000/contentvec/blob/main/LICENSE
14
+ MIT License
15
+
16
+ VITS
17
+ https://github.com/jaywalnut310/vits/blob/main/LICENSE
18
+ MIT License
19
+
20
+ HIFIGAN
21
+ https://github.com/jik876/hifi-gan/blob/master/LICENSE
22
+ MIT License
23
+
24
+ gradio
25
+ https://github.com/gradio-app/gradio/blob/main/LICENSE
26
+ Apache License 2.0
27
+
28
+ ffmpeg
29
+ https://github.com/FFmpeg/FFmpeg/blob/master/COPYING.LGPLv3
30
+ https://github.com/BtbN/FFmpeg-Builds/releases/download/autobuild-2021-02-28-12-32/ffmpeg-n4.3.2-160-gfbb9368226-win64-lgpl-4.3.zip
31
+ LPGLv3 License
32
+ MIT License
33
+
34
+ ultimatevocalremovergui
35
+ https://github.com/Anjok07/ultimatevocalremovergui/blob/master/LICENSE
36
+ https://github.com/yang123qwe/vocal_separation_by_uvr5
37
+ MIT License
38
+
39
+ audio-slicer
40
+ https://github.com/openvpi/audio-slicer/blob/main/LICENSE
41
+ MIT License
42
+
43
+ PySimpleGUI
44
+ https://github.com/PySimpleGUI/PySimpleGUI/blob/master/license.txt
45
+ LPGLv3 License
README.md CHANGED
@@ -1,3 +1,213 @@
1
- ---
2
- license: mit
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <div align="center">
2
+
3
+ <h1>Retrieval-based-Voice-Conversion-WebUI</h1>
4
+ 一个基于VITS的简单易用的变声框架<br><br>
5
+
6
+ [![madewithlove](https://img.shields.io/badge/made_with-%E2%9D%A4-red?style=for-the-badge&labelColor=orange
7
+ )](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)
8
+
9
+ <img src="https://counter.seku.su/cmoe?name=rvc&theme=r34" /><br>
10
+
11
+ [![Open In Colab](https://img.shields.io/badge/Colab-F9AB00?style=for-the-badge&logo=googlecolab&color=525252)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb)
12
+ [![Licence](https://img.shields.io/badge/LICENSE-MIT-green.svg?style=for-the-badge)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/LICENSE)
13
+ [![Huggingface](https://img.shields.io/badge/🤗%20-Spaces-yellow.svg?style=for-the-badge)](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)
14
+
15
+ [![Discord](https://img.shields.io/badge/RVC%20Developers-Discord-7289DA?style=for-the-badge&logo=discord&logoColor=white)](https://discord.gg/HcsmBBGyVk)
16
+
17
+ [**更新日志**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/docs/Changelog_CN.md) | [**常见问题解答**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98%E8%A7%A3%E7%AD%94) | [**AutoDL·5毛钱训练AI歌手**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/Autodl%E8%AE%AD%E7%BB%83RVC%C2%B7AI%E6%AD%8C%E6%89%8B%E6%95%99%E7%A8%8B) | [**对照实验记录**](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/Autodl%E8%AE%AD%E7%BB%83RVC%C2%B7AI%E6%AD%8C%E6%89%8B%E6%95%99%E7%A8%8B](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/wiki/%E5%AF%B9%E7%85%A7%E5%AE%9E%E9%AA%8C%C2%B7%E5%AE%9E%E9%AA%8C%E8%AE%B0%E5%BD%95)) | [**在线演示**](https://modelscope.cn/studios/FlowerCry/RVCv2demo)
18
+
19
+ [**English**](./docs/en/README.en.md) | [**中文简体**](./README.md) | [**日本語**](./docs/jp/README.ja.md) | [**한국어**](./docs/kr/README.ko.md) ([**韓國語**](./docs/kr/README.ko.han.md)) | [**Français**](./docs/fr/README.fr.md) | [**Türkçe**](./docs/tr/README.tr.md) | [**Português**](./docs/pt/README.pt.md)
20
+
21
+ </div>
22
+
23
+ > 底模使用接近50小时的开源高质量VCTK训练集训练,无版权方面的顾虑,请大家放心使用
24
+
25
+ > 请期待RVCv3的底模,参数更大,数据更大,效果更好,基本持平的推理速度,需要训练数据量更少。
26
+
27
+ <table>
28
+ <tr>
29
+ <td align="center">训练推理界面</td>
30
+ <td align="center">实时变声界面</td>
31
+ </tr>
32
+ <tr>
33
+ <td align="center"><img src="https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/assets/129054828/092e5c12-0d49-4168-a590-0b0ef6a4f630"></td>
34
+ <td align="center"><img src="https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/assets/129054828/730b4114-8805-44a1-ab1a-04668f3c30a6"></td>
35
+ </tr>
36
+ <tr>
37
+ <td align="center">go-web.bat</td>
38
+ <td align="center">go-realtime-gui.bat</td>
39
+ </tr>
40
+ <tr>
41
+ <td align="center">可以自由选择想要执行的操作。</td>
42
+ <td align="center">我们已经实现端到端170ms延迟。如使用ASIO输入输出设备,已能实现端到端90ms延迟,但非常依赖硬件驱动支持。</td>
43
+ </tr>
44
+ </table>
45
+
46
+ ## 简介
47
+ 本仓库具有以下特点
48
+ + 使用top1检索替换输入源特征为训练集特征来杜绝音色泄漏
49
+ + 即便在相对较差的显卡上也能快速训练
50
+ + 使用少量数据进行训练也能得到较好结果(推荐至少收集10分钟低底噪语音数据)
51
+ + 可以通过模型融合来改变音色(借助ckpt处理选项卡中的ckpt-merge)
52
+ + 简单易用的网页界面
53
+ + 可调用UVR5模型来快速分离人声和伴奏
54
+ + 使用最先进的[人声音高提取算法InterSpeech2023-RMVPE](#参考项目)根绝哑音问题。效果最好(显著地)但比crepe_full更快、资源占用更小
55
+ + A卡I卡加速支持
56
+
57
+ 点此查看我们的[演示视频](https://www.bilibili.com/video/BV1pm4y1z7Gm/) !
58
+
59
+ ## 环境配置
60
+ 以下指令需在 Python 版本大于3.8的环境中执行。
61
+
62
+ ### Windows/Linux/MacOS等平台通用方法
63
+ 下列方法任选其一。
64
+ #### 1. 通过 pip 安装依赖
65
+ 1. 安装Pytorch及其核心依赖,若已安装则跳过。参考自: https://pytorch.org/get-started/locally/
66
+ ```bash
67
+ pip install torch torchvision torchaudio
68
+ ```
69
+ 2. 如果是 win 系统 + Nvidia Ampere 架构(RTX30xx),根据 #21 的经验,需要指定 pytorch 对应的 cuda 版本
70
+ ```bash
71
+ pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cu117
72
+ ```
73
+ 3. 根据自己的显卡安装对应依赖
74
+ - N卡
75
+ ```bash
76
+ pip install -r requirements.txt
77
+ ```
78
+ - A卡/I卡
79
+ ```bash
80
+ pip install -r requirements-dml.txt
81
+ ```
82
+ - A卡ROCM(Linux)
83
+ ```bash
84
+ pip install -r requirements-amd.txt
85
+ ```
86
+ - I卡IPEX(Linux)
87
+ ```bash
88
+ pip install -r requirements-ipex.txt
89
+ ```
90
+
91
+ #### 2. 通过 poetry 来安装依赖
92
+ 安装 Poetry 依赖管理工具,若已安装则跳过。参考自: https://python-poetry.org/docs/#installation
93
+ ```bash
94
+ curl -sSL https://install.python-poetry.org | python3 -
95
+ ```
96
+
97
+ 通过 Poetry 安装依赖时,python 建议使用 3.7-3.10 版本,其余版本在安装 llvmlite==0.39.0 时会出现冲突
98
+ ```bash
99
+ poetry init -n
100
+ poetry env use "path to your python.exe"
101
+ poetry run pip install -r requirments.txt
102
+ ```
103
+
104
+ ### MacOS
105
+ 可以通过 `run.sh` 来安装依赖
106
+ ```bash
107
+ sh ./run.sh
108
+ ```
109
+
110
+ ## 其他预模型准备
111
+ RVC需要其他一些预模型来推理和训练。
112
+
113
+ 你可以从我们的[Hugging Face space](https://huggingface.co/lj1995/VoiceConversionWebUI/tree/main/)下载到这些模型。
114
+
115
+ ### 1. 下载 assets
116
+ 以下是一份清单,包括了所有RVC所需的预模型和其他文件的名称。你可以在`tools`文件夹找到下载它们的脚本。
117
+
118
+ - ./assets/hubert/hubert_base.pt
119
+
120
+ - ./assets/pretrained
121
+
122
+ - ./assets/uvr5_weights
123
+
124
+ 想使用v2版本模型的话,需要额外下载
125
+
126
+ - ./assets/pretrained_v2
127
+
128
+ ### 2. 安装 ffmpeg
129
+ 若ffmpeg和ffprobe已安装则跳过。
130
+
131
+ #### Ubuntu/Debian 用户
132
+ ```bash
133
+ sudo apt install ffmpeg
134
+ ```
135
+ #### MacOS 用户
136
+ ```bash
137
+ brew install ffmpeg
138
+ ```
139
+ #### Windows 用户
140
+ 下载后放置在根目录。
141
+ - 下载[ffmpeg.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffmpeg.exe)
142
+
143
+ - 下载[ffprobe.exe](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/ffprobe.exe)
144
+
145
+ ### 3. 下载 rmvpe 人声音高提取算法所需文件
146
+
147
+ 如果你想使用最新的RMVPE人声音高提取算法,则你需要下载音高提取模型参数并放置于RVC根目录。
148
+
149
+ - 下载[rmvpe.pt](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/rmvpe.pt)
150
+
151
+ #### 下载 rmvpe 的 dml 环境(可选, A卡/I卡用户)
152
+
153
+ - 下载[rmvpe.onnx](https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/rmvpe.onnx)
154
+
155
+ ### 4. AMD显卡Rocm(可选, 仅Linux)
156
+
157
+ 如果你想基于AMD的Rocm技术在Linux系统上运行RVC,请先在[这里](https://rocm.docs.amd.com/en/latest/deploy/linux/os-native/install.html)安装所需的驱动。
158
+
159
+ 若你使用的是Arch Linux,可以使用pacman来安装所需驱动:
160
+ ````
161
+ pacman -S rocm-hip-sdk rocm-opencl-sdk
162
+ ````
163
+ 对于某些型号的显卡,你可能需要额外配置如下的环境变量(如:RX6700XT):
164
+ ````
165
+ export ROCM_PATH=/opt/rocm
166
+ export HSA_OVERRIDE_GFX_VERSION=10.3.0
167
+ ````
168
+ 同时确保你的当前用户处于`render`与`video`用户组内:
169
+ ````
170
+ sudo usermod -aG render $USERNAME
171
+ sudo usermod -aG video $USERNAME
172
+ ````
173
+
174
+ ## 开始使用
175
+ ### 直接启动
176
+ 使用以下指令来启动 WebUI
177
+ ```bash
178
+ python infer-web.py
179
+ ```
180
+
181
+ 若先前使用 Poetry 安装依赖,则可以通过以下方式启动WebUI
182
+ ```bash
183
+ poetry run python infer-web.py
184
+ ```
185
+
186
+ ### 使用整合包
187
+ 下载并解压`RVC-beta.7z`
188
+ #### Windows 用户
189
+ 双击`go-web.bat`
190
+ #### MacOS 用户
191
+ ```bash
192
+ sh ./run.sh
193
+ ```
194
+ ### 对于需要使用IPEX技术的I卡用户(仅Linux)
195
+ ```bash
196
+ source /opt/intel/oneapi/setvars.sh
197
+ ```
198
+
199
+ ## 参考项目
200
+ + [ContentVec](https://github.com/auspicious3000/contentvec/)
201
+ + [VITS](https://github.com/jaywalnut310/vits)
202
+ + [HIFIGAN](https://github.com/jik876/hifi-gan)
203
+ + [Gradio](https://github.com/gradio-app/gradio)
204
+ + [FFmpeg](https://github.com/FFmpeg/FFmpeg)
205
+ + [Ultimate Vocal Remover](https://github.com/Anjok07/ultimatevocalremovergui)
206
+ + [audio-slicer](https://github.com/openvpi/audio-slicer)
207
+ + [Vocal pitch extraction:RMVPE](https://github.com/Dream-High/RMVPE)
208
+ + The pretrained model is trained and tested by [yxlllc](https://github.com/yxlllc/RMVPE) and [RVC-Boss](https://github.com/RVC-Boss).
209
+
210
+ ## 感谢所有贡献者作出的努力
211
+ <a href="https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/graphs/contributors" target="_blank">
212
+ <img src="https://contrib.rocks/image?repo=RVC-Project/Retrieval-based-Voice-Conversion-WebUI" />
213
+ </a>
Retrieval_based_Voice_Conversion_WebUI.ipynb ADDED
@@ -0,0 +1,403 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "attachments": {},
5
+ "cell_type": "markdown",
6
+ "metadata": {},
7
+ "source": [
8
+ "# [Retrieval-based-Voice-Conversion-WebUI](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) Training notebook"
9
+ ]
10
+ },
11
+ {
12
+ "attachments": {},
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "ZFFCx5J80SGa"
16
+ },
17
+ "source": [
18
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI.ipynb)"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {
25
+ "id": "GmFP6bN9dvOq"
26
+ },
27
+ "outputs": [],
28
+ "source": [
29
+ "# @title 查看显卡\n",
30
+ "!nvidia-smi"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {
37
+ "id": "jwu07JgqoFON"
38
+ },
39
+ "outputs": [],
40
+ "source": [
41
+ "# @title 挂载谷歌云盘\n",
42
+ "\n",
43
+ "from google.colab import drive\n",
44
+ "\n",
45
+ "drive.mount(\"/content/drive\")"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": null,
51
+ "metadata": {
52
+ "id": "wjddIFr1oS3W"
53
+ },
54
+ "outputs": [],
55
+ "source": [
56
+ "# @title 安装依赖\n",
57
+ "!apt-get -y install build-essential python3-dev ffmpeg\n",
58
+ "!pip3 install --upgrade setuptools wheel\n",
59
+ "!pip3 install --upgrade pip\n",
60
+ "!pip3 install faiss-cpu==1.7.2 fairseq gradio==3.14.0 ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": null,
66
+ "metadata": {
67
+ "id": "ge_97mfpgqTm"
68
+ },
69
+ "outputs": [],
70
+ "source": [
71
+ "# @title 克隆仓库\n",
72
+ "\n",
73
+ "!git clone --depth=1 -b stable https://github.com/fumiama/Retrieval-based-Voice-Conversion-WebUI\n",
74
+ "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
75
+ "!mkdir -p pretrained uvr5_weights"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": null,
81
+ "metadata": {
82
+ "id": "BLDEZADkvlw1"
83
+ },
84
+ "outputs": [],
85
+ "source": [
86
+ "# @title 更新仓库(一般无需执行)\n",
87
+ "!git pull"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": null,
93
+ "metadata": {
94
+ "id": "pqE0PrnuRqI2"
95
+ },
96
+ "outputs": [],
97
+ "source": [
98
+ "# @title 安装aria2\n",
99
+ "!apt -y install -qq aria2"
100
+ ]
101
+ },
102
+ {
103
+ "cell_type": "code",
104
+ "execution_count": null,
105
+ "metadata": {
106
+ "id": "UG3XpUwEomUz"
107
+ },
108
+ "outputs": [],
109
+ "source": [
110
+ "# @title 下载底模\n",
111
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D32k.pth\n",
112
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D40k.pth\n",
113
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D48k.pth\n",
114
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G32k.pth\n",
115
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G40k.pth\n",
116
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G48k.pth\n",
117
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D32k.pth\n",
118
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D40k.pth\n",
119
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D48k.pth\n",
120
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G32k.pth\n",
121
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G40k.pth\n",
122
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G48k.pth"
123
+ ]
124
+ },
125
+ {
126
+ "cell_type": "code",
127
+ "execution_count": null,
128
+ "metadata": {
129
+ "id": "HugjmZqZRuiF"
130
+ },
131
+ "outputs": [],
132
+ "source": [
133
+ "# @title 下载人声分离模型\n",
134
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth\n",
135
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": null,
141
+ "metadata": {
142
+ "id": "2RCaT9FTR0ej"
143
+ },
144
+ "outputs": [],
145
+ "source": [
146
+ "# @title 下载hubert_base\n",
147
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt"
148
+ ]
149
+ },
150
+ {
151
+ "cell_type": "code",
152
+ "execution_count": null,
153
+ "metadata": {},
154
+ "outputs": [],
155
+ "source": [
156
+ "# @title #下载rmvpe模型\n",
157
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o rmvpe.pt"
158
+ ]
159
+ },
160
+ {
161
+ "cell_type": "code",
162
+ "execution_count": null,
163
+ "metadata": {
164
+ "id": "Mwk7Q0Loqzjx"
165
+ },
166
+ "outputs": [],
167
+ "source": [
168
+ "# @title 从谷歌云盘加载打包好的数据集到/content/dataset\n",
169
+ "\n",
170
+ "# @markdown 数据集位置\n",
171
+ "DATASET = (\n",
172
+ " \"/content/drive/MyDrive/dataset/lulu20230327_32k.zip\" # @param {type:\"string\"}\n",
173
+ ")\n",
174
+ "\n",
175
+ "!mkdir -p /content/dataset\n",
176
+ "!unzip -d /content/dataset -B {DATASET}"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": null,
182
+ "metadata": {
183
+ "id": "PDlFxWHWEynD"
184
+ },
185
+ "outputs": [],
186
+ "source": [
187
+ "# @title 重命名数据集中的重名文件\n",
188
+ "!ls -a /content/dataset/\n",
189
+ "!rename 's/(\\w+)\\.(\\w+)~(\\d*)/$1_$3.$2/' /content/dataset/*.*~*"
190
+ ]
191
+ },
192
+ {
193
+ "cell_type": "code",
194
+ "execution_count": null,
195
+ "metadata": {
196
+ "id": "7vh6vphDwO0b"
197
+ },
198
+ "outputs": [],
199
+ "source": [
200
+ "# @title 启动web\n",
201
+ "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
202
+ "# %load_ext tensorboard\n",
203
+ "# %tensorboard --logdir /content/Retrieval-based-Voice-Conversion-WebUI/logs\n",
204
+ "!python3 infer-web.py --colab --pycmd python3"
205
+ ]
206
+ },
207
+ {
208
+ "cell_type": "code",
209
+ "execution_count": null,
210
+ "metadata": {
211
+ "id": "FgJuNeAwx5Y_"
212
+ },
213
+ "outputs": [],
214
+ "source": [
215
+ "# @title 手动将训练后的模型文件备份到谷歌云盘\n",
216
+ "# @markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
217
+ "\n",
218
+ "# @markdown 模型名\n",
219
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
220
+ "# @markdown 模型epoch\n",
221
+ "MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
222
+ "\n",
223
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth\n",
224
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth\n",
225
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/added_*.index /content/drive/MyDrive/\n",
226
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/total_*.npy /content/drive/MyDrive/\n",
227
+ "\n",
228
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "code",
233
+ "execution_count": null,
234
+ "metadata": {
235
+ "id": "OVQoLQJXS7WX"
236
+ },
237
+ "outputs": [],
238
+ "source": [
239
+ "# @title 从谷歌云盘恢复pth\n",
240
+ "# @markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
241
+ "\n",
242
+ "# @markdown 模型名\n",
243
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
244
+ "# @markdown 模型epoch\n",
245
+ "MODELEPOCH = 7500 # @param {type:\"integer\"}\n",
246
+ "\n",
247
+ "!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
248
+ "\n",
249
+ "!cp /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
250
+ "!cp /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
251
+ "!cp /content/drive/MyDrive/*.index /content/\n",
252
+ "!cp /content/drive/MyDrive/*.npy /content/\n",
253
+ "!cp /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth"
254
+ ]
255
+ },
256
+ {
257
+ "cell_type": "code",
258
+ "execution_count": null,
259
+ "metadata": {
260
+ "id": "ZKAyuKb9J6dz"
261
+ },
262
+ "outputs": [],
263
+ "source": [
264
+ "# @title 手动预处理(不推荐)\n",
265
+ "# @markdown 模型名\n",
266
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
267
+ "# @markdown 采样率\n",
268
+ "BITRATE = 48000 # @param {type:\"integer\"}\n",
269
+ "# @markdown 使用的进程数\n",
270
+ "THREADCOUNT = 8 # @param {type:\"integer\"}\n",
271
+ "\n",
272
+ "!python3 trainset_preprocess_pipeline_print.py /content/dataset {BITRATE} {THREADCOUNT} logs/{MODELNAME} True"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "metadata": {
279
+ "id": "CrxJqzAUKmPJ"
280
+ },
281
+ "outputs": [],
282
+ "source": [
283
+ "# @title 手动提取特征(不推荐)\n",
284
+ "# @markdown 模型名\n",
285
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
286
+ "# @markdown 使用的进程数\n",
287
+ "THREADCOUNT = 8 # @param {type:\"integer\"}\n",
288
+ "# @markdown 音高提取算法\n",
289
+ "ALGO = \"harvest\" # @param {type:\"string\"}\n",
290
+ "\n",
291
+ "!python3 extract_f0_print.py logs/{MODELNAME} {THREADCOUNT} {ALGO}\n",
292
+ "\n",
293
+ "!python3 extract_feature_print.py cpu 1 0 0 logs/{MODELNAME} True"
294
+ ]
295
+ },
296
+ {
297
+ "cell_type": "code",
298
+ "execution_count": null,
299
+ "metadata": {
300
+ "id": "IMLPLKOaKj58"
301
+ },
302
+ "outputs": [],
303
+ "source": [
304
+ "# @title 手动训练(不推荐)\n",
305
+ "# @markdown 模型名\n",
306
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
307
+ "# @markdown 使用的GPU\n",
308
+ "USEGPU = \"0\" # @param {type:\"string\"}\n",
309
+ "# @markdown 批大小\n",
310
+ "BATCHSIZE = 32 # @param {type:\"integer\"}\n",
311
+ "# @markdown 停止的epoch\n",
312
+ "MODELEPOCH = 3200 # @param {type:\"integer\"}\n",
313
+ "# @markdown 保存epoch间隔\n",
314
+ "EPOCHSAVE = 100 # @param {type:\"integer\"}\n",
315
+ "# @markdown 采样率\n",
316
+ "MODELSAMPLE = \"48k\" # @param {type:\"string\"}\n",
317
+ "# @markdown 是否缓存训练集\n",
318
+ "CACHEDATA = 1 # @param {type:\"integer\"}\n",
319
+ "# @markdown 是否仅保存最新的ckpt文件\n",
320
+ "ONLYLATEST = 0 # @param {type:\"integer\"}\n",
321
+ "\n",
322
+ "!python3 train_nsf_sim_cache_sid_load_pretrain.py -e lulu -sr {MODELSAMPLE} -f0 1 -bs {BATCHSIZE} -g {USEGPU} -te {MODELEPOCH} -se {EPOCHSAVE} -pg pretrained/f0G{MODELSAMPLE}.pth -pd pretrained/f0D{MODELSAMPLE}.pth -l {ONLYLATEST} -c {CACHEDATA}"
323
+ ]
324
+ },
325
+ {
326
+ "cell_type": "code",
327
+ "execution_count": null,
328
+ "metadata": {
329
+ "id": "haYA81hySuDl"
330
+ },
331
+ "outputs": [],
332
+ "source": [
333
+ "# @title 删除其它pth,只留选中的(慎点,仔细看代码)\n",
334
+ "# @markdown 模型名\n",
335
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
336
+ "# @markdown 选中模型epoch\n",
337
+ "MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
338
+ "\n",
339
+ "!echo \"备份选中的模型。。。\"\n",
340
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
341
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
342
+ "\n",
343
+ "!echo \"正在删除。。。\"\n",
344
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
345
+ "!rm /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*.pth\n",
346
+ "\n",
347
+ "!echo \"恢复选中的模型。。。\"\n",
348
+ "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
349
+ "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
350
+ "\n",
351
+ "!echo \"删除完成\"\n",
352
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
353
+ ]
354
+ },
355
+ {
356
+ "cell_type": "code",
357
+ "execution_count": null,
358
+ "metadata": {
359
+ "id": "QhSiPTVPoIRh"
360
+ },
361
+ "outputs": [],
362
+ "source": [
363
+ "# @title 清除项目下所有文件,只留选中的模型(慎点,仔细看代码)\n",
364
+ "# @markdown 模型名\n",
365
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
366
+ "# @markdown 选中模型epoch\n",
367
+ "MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
368
+ "\n",
369
+ "!echo \"备份选中的模型。。。\"\n",
370
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
371
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
372
+ "\n",
373
+ "!echo \"正在删除。。。\"\n",
374
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
375
+ "!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*\n",
376
+ "\n",
377
+ "!echo \"恢复选中的模型。。。\"\n",
378
+ "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
379
+ "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
380
+ "\n",
381
+ "!echo \"删除完成\"\n",
382
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
383
+ ]
384
+ }
385
+ ],
386
+ "metadata": {
387
+ "accelerator": "GPU",
388
+ "colab": {
389
+ "private_outputs": true,
390
+ "provenance": []
391
+ },
392
+ "gpuClass": "standard",
393
+ "kernelspec": {
394
+ "display_name": "Python 3",
395
+ "name": "python3"
396
+ },
397
+ "language_info": {
398
+ "name": "python"
399
+ }
400
+ },
401
+ "nbformat": 4,
402
+ "nbformat_minor": 0
403
+ }
Retrieval_based_Voice_Conversion_WebUI_v2.ipynb ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "attachments": {},
5
+ "cell_type": "markdown",
6
+ "metadata": {},
7
+ "source": [
8
+ "# [Retrieval-based-Voice-Conversion-WebUI](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI) Training notebook"
9
+ ]
10
+ },
11
+ {
12
+ "attachments": {},
13
+ "cell_type": "markdown",
14
+ "metadata": {
15
+ "id": "ZFFCx5J80SGa"
16
+ },
17
+ "source": [
18
+ "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/blob/main/Retrieval_based_Voice_Conversion_WebUI_v2.ipynb)"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": null,
24
+ "metadata": {
25
+ "id": "GmFP6bN9dvOq"
26
+ },
27
+ "outputs": [],
28
+ "source": [
29
+ "# @title #查看显卡\n",
30
+ "!nvidia-smi"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": null,
36
+ "metadata": {
37
+ "id": "jwu07JgqoFON"
38
+ },
39
+ "outputs": [],
40
+ "source": [
41
+ "# @title 挂载谷歌云盘\n",
42
+ "\n",
43
+ "from google.colab import drive\n",
44
+ "\n",
45
+ "drive.mount(\"/content/drive\")"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": null,
51
+ "metadata": {
52
+ "id": "wjddIFr1oS3W"
53
+ },
54
+ "outputs": [],
55
+ "source": [
56
+ "# @title #安装依赖\n",
57
+ "!apt-get -y install build-essential python3-dev ffmpeg\n",
58
+ "!pip3 install --upgrade setuptools wheel\n",
59
+ "!pip3 install --upgrade pip\n",
60
+ "!pip3 install faiss-cpu==1.7.2 fairseq gradio==3.14.0 ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2"
61
+ ]
62
+ },
63
+ {
64
+ "cell_type": "code",
65
+ "execution_count": null,
66
+ "metadata": {
67
+ "id": "ge_97mfpgqTm"
68
+ },
69
+ "outputs": [],
70
+ "source": [
71
+ "# @title #克隆仓库\n",
72
+ "\n",
73
+ "!mkdir Retrieval-based-Voice-Conversion-WebUI\n",
74
+ "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
75
+ "!git init\n",
76
+ "!git remote add origin https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI.git\n",
77
+ "!git fetch origin cfd984812804ddc9247d65b14c82cd32e56c1133 --depth=1\n",
78
+ "!git reset --hard FETCH_HEAD"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": null,
84
+ "metadata": {
85
+ "id": "BLDEZADkvlw1"
86
+ },
87
+ "outputs": [],
88
+ "source": [
89
+ "# @title #更新仓库(一般无需执行)\n",
90
+ "!git pull"
91
+ ]
92
+ },
93
+ {
94
+ "cell_type": "code",
95
+ "execution_count": null,
96
+ "metadata": {
97
+ "id": "pqE0PrnuRqI2"
98
+ },
99
+ "outputs": [],
100
+ "source": [
101
+ "# @title #安装aria2\n",
102
+ "!apt -y install -qq aria2"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": null,
108
+ "metadata": {
109
+ "id": "UG3XpUwEomUz"
110
+ },
111
+ "outputs": [],
112
+ "source": [
113
+ "# @title 下载底模\n",
114
+ "\n",
115
+ "# v1\n",
116
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D32k.pth\n",
117
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D40k.pth\n",
118
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o D48k.pth\n",
119
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G32k.pth\n",
120
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G40k.pth\n",
121
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o G48k.pth\n",
122
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D32k.pth\n",
123
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D40k.pth\n",
124
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0D48k.pth\n",
125
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G32k.pth\n",
126
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G40k.pth\n",
127
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained -o f0G48k.pth\n",
128
+ "\n",
129
+ "# v2\n",
130
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D32k.pth\n",
131
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D40k.pth\n",
132
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o D48k.pth\n",
133
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G32k.pth\n",
134
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G40k.pth\n",
135
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o G48k.pth\n",
136
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D32k.pth\n",
137
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D40k.pth\n",
138
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0D48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0D48k.pth\n",
139
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G32k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G32k.pth\n",
140
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G40k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G40k.pth\n",
141
+ "# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/pretrained_v2/f0G48k.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/pretrained_v2 -o f0G48k.pth"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": null,
147
+ "metadata": {
148
+ "id": "HugjmZqZRuiF"
149
+ },
150
+ "outputs": [],
151
+ "source": [
152
+ "# @title #下载人声分离模型\n",
153
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP2-人声vocals+非人声instrumentals.pth\n",
154
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth -d /content/Retrieval-based-Voice-Conversion-WebUI/uvr5_weights -o HP5-主旋律人声vocals+其他instrumentals.pth"
155
+ ]
156
+ },
157
+ {
158
+ "cell_type": "code",
159
+ "execution_count": null,
160
+ "metadata": {
161
+ "id": "2RCaT9FTR0ej"
162
+ },
163
+ "outputs": [],
164
+ "source": [
165
+ "# @title #下载hubert_base\n",
166
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": null,
172
+ "metadata": {},
173
+ "outputs": [],
174
+ "source": [
175
+ "# @title #下载rmvpe模型\n",
176
+ "!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/rmvpe.pt -d /content/Retrieval-based-Voice-Conversion-WebUI -o rmvpe.pt"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": null,
182
+ "metadata": {
183
+ "id": "Mwk7Q0Loqzjx"
184
+ },
185
+ "outputs": [],
186
+ "source": [
187
+ "# @title #从谷歌云盘加载打包好的数据集到/content/dataset\n",
188
+ "\n",
189
+ "# @markdown 数据集位置\n",
190
+ "DATASET = (\n",
191
+ " \"/content/drive/MyDrive/dataset/lulu20230327_32k.zip\" # @param {type:\"string\"}\n",
192
+ ")\n",
193
+ "\n",
194
+ "!mkdir -p /content/dataset\n",
195
+ "!unzip -d /content/dataset -B {DATASET}"
196
+ ]
197
+ },
198
+ {
199
+ "cell_type": "code",
200
+ "execution_count": null,
201
+ "metadata": {
202
+ "id": "PDlFxWHWEynD"
203
+ },
204
+ "outputs": [],
205
+ "source": [
206
+ "# @title #重命名数据集中的重名文件\n",
207
+ "!ls -a /content/dataset/\n",
208
+ "!rename 's/(\\w+)\\.(\\w+)~(\\d*)/$1_$3.$2/' /content/dataset/*.*~*"
209
+ ]
210
+ },
211
+ {
212
+ "cell_type": "code",
213
+ "execution_count": null,
214
+ "metadata": {
215
+ "id": "7vh6vphDwO0b"
216
+ },
217
+ "outputs": [],
218
+ "source": [
219
+ "# @title #启动webui\n",
220
+ "%cd /content/Retrieval-based-Voice-Conversion-WebUI\n",
221
+ "# %load_ext tensorboard\n",
222
+ "# %tensorboard --logdir /content/Retrieval-based-Voice-Conversion-WebUI/logs\n",
223
+ "!python3 infer-web.py --colab --pycmd python3"
224
+ ]
225
+ },
226
+ {
227
+ "cell_type": "code",
228
+ "execution_count": null,
229
+ "metadata": {
230
+ "id": "FgJuNeAwx5Y_"
231
+ },
232
+ "outputs": [],
233
+ "source": [
234
+ "# @title #手动将训练后的模型文件备份到谷歌云盘\n",
235
+ "# @markdown #需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
236
+ "\n",
237
+ "# @markdown #模型名\n",
238
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
239
+ "# @markdown #模型epoch\n",
240
+ "MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
241
+ "\n",
242
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth\n",
243
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth\n",
244
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/added_*.index /content/drive/MyDrive/\n",
245
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/total_*.npy /content/drive/MyDrive/\n",
246
+ "\n",
247
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth"
248
+ ]
249
+ },
250
+ {
251
+ "cell_type": "code",
252
+ "execution_count": null,
253
+ "metadata": {
254
+ "id": "OVQoLQJXS7WX"
255
+ },
256
+ "outputs": [],
257
+ "source": [
258
+ "# @title 从谷歌云盘恢复pth\n",
259
+ "# @markdown 需要自己查看logs文件夹下模型的文件名,手动修改下方命令末尾的文件名\n",
260
+ "\n",
261
+ "# @markdown 模型名\n",
262
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
263
+ "# @markdown 模型epoch\n",
264
+ "MODELEPOCH = 7500 # @param {type:\"integer\"}\n",
265
+ "\n",
266
+ "!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
267
+ "\n",
268
+ "!cp /content/drive/MyDrive/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
269
+ "!cp /content/drive/MyDrive/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
270
+ "!cp /content/drive/MyDrive/*.index /content/\n",
271
+ "!cp /content/drive/MyDrive/*.npy /content/\n",
272
+ "!cp /content/drive/MyDrive/{MODELNAME}{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODELNAME}.pth"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "metadata": {
279
+ "id": "ZKAyuKb9J6dz"
280
+ },
281
+ "outputs": [],
282
+ "source": [
283
+ "# @title 手动预处理(不推荐)\n",
284
+ "# @markdown 模型名\n",
285
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
286
+ "# @markdown 采样率\n",
287
+ "BITRATE = 48000 # @param {type:\"integer\"}\n",
288
+ "# @markdown 使用的进程数\n",
289
+ "THREADCOUNT = 8 # @param {type:\"integer\"}\n",
290
+ "\n",
291
+ "!python3 trainset_preprocess_pipeline_print.py /content/dataset {BITRATE} {THREADCOUNT} logs/{MODELNAME} True"
292
+ ]
293
+ },
294
+ {
295
+ "cell_type": "code",
296
+ "execution_count": null,
297
+ "metadata": {
298
+ "id": "CrxJqzAUKmPJ"
299
+ },
300
+ "outputs": [],
301
+ "source": [
302
+ "# @title 手动提取特征(不推荐)\n",
303
+ "# @markdown 模型名\n",
304
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
305
+ "# @markdown 使用的进程数\n",
306
+ "THREADCOUNT = 8 # @param {type:\"integer\"}\n",
307
+ "# @markdown 音高提取算法\n",
308
+ "ALGO = \"harvest\" # @param {type:\"string\"}\n",
309
+ "\n",
310
+ "!python3 extract_f0_print.py logs/{MODELNAME} {THREADCOUNT} {ALGO}\n",
311
+ "\n",
312
+ "!python3 extract_feature_print.py cpu 1 0 0 logs/{MODELNAME} True"
313
+ ]
314
+ },
315
+ {
316
+ "cell_type": "code",
317
+ "execution_count": null,
318
+ "metadata": {
319
+ "id": "IMLPLKOaKj58"
320
+ },
321
+ "outputs": [],
322
+ "source": [
323
+ "# @title 手动训练(不推荐)\n",
324
+ "# @markdown 模型名\n",
325
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
326
+ "# @markdown 使用的GPU\n",
327
+ "USEGPU = \"0\" # @param {type:\"string\"}\n",
328
+ "# @markdown 批大小\n",
329
+ "BATCHSIZE = 32 # @param {type:\"integer\"}\n",
330
+ "# @markdown 停止的epoch\n",
331
+ "MODELEPOCH = 3200 # @param {type:\"integer\"}\n",
332
+ "# @markdown 保存epoch间隔\n",
333
+ "EPOCHSAVE = 100 # @param {type:\"integer\"}\n",
334
+ "# @markdown 采样率\n",
335
+ "MODELSAMPLE = \"48k\" # @param {type:\"string\"}\n",
336
+ "# @markdown 是否缓存训练集\n",
337
+ "CACHEDATA = 1 # @param {type:\"integer\"}\n",
338
+ "# @markdown 是否仅保存最新的ckpt文件\n",
339
+ "ONLYLATEST = 0 # @param {type:\"integer\"}\n",
340
+ "\n",
341
+ "!python3 train_nsf_sim_cache_sid_load_pretrain.py -e lulu -sr {MODELSAMPLE} -f0 1 -bs {BATCHSIZE} -g {USEGPU} -te {MODELEPOCH} -se {EPOCHSAVE} -pg pretrained/f0G{MODELSAMPLE}.pth -pd pretrained/f0D{MODELSAMPLE}.pth -l {ONLYLATEST} -c {CACHEDATA}"
342
+ ]
343
+ },
344
+ {
345
+ "cell_type": "code",
346
+ "execution_count": null,
347
+ "metadata": {
348
+ "id": "haYA81hySuDl"
349
+ },
350
+ "outputs": [],
351
+ "source": [
352
+ "# @title 删除其它pth,只留选中的(慎点,仔细看代码)\n",
353
+ "# @markdown 模型名\n",
354
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
355
+ "# @markdown 选中模型epoch\n",
356
+ "MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
357
+ "\n",
358
+ "!echo \"备份选中的模型。。。\"\n",
359
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
360
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
361
+ "\n",
362
+ "!echo \"正在删除。。。\"\n",
363
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
364
+ "!rm /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*.pth\n",
365
+ "\n",
366
+ "!echo \"恢复选中的模型。。。\"\n",
367
+ "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
368
+ "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
369
+ "\n",
370
+ "!echo \"删除完成\"\n",
371
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
372
+ ]
373
+ },
374
+ {
375
+ "cell_type": "code",
376
+ "execution_count": null,
377
+ "metadata": {
378
+ "id": "QhSiPTVPoIRh"
379
+ },
380
+ "outputs": [],
381
+ "source": [
382
+ "# @title 清除项目下所有文件,只留选中的模型(慎点,仔细看代码)\n",
383
+ "# @markdown 模型名\n",
384
+ "MODELNAME = \"lulu\" # @param {type:\"string\"}\n",
385
+ "# @markdown 选中模型epoch\n",
386
+ "MODELEPOCH = 9600 # @param {type:\"integer\"}\n",
387
+ "\n",
388
+ "!echo \"备份选中的模型。。。\"\n",
389
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth /content/{MODELNAME}_D_{MODELEPOCH}.pth\n",
390
+ "!cp /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth /content/{MODELNAME}_G_{MODELEPOCH}.pth\n",
391
+ "\n",
392
+ "!echo \"正在删除。。。\"\n",
393
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}\n",
394
+ "!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/*\n",
395
+ "\n",
396
+ "!echo \"恢复选中的模型。。。\"\n",
397
+ "!mv /content/{MODELNAME}_D_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/G_{MODELEPOCH}.pth\n",
398
+ "!mv /content/{MODELNAME}_G_{MODELEPOCH}.pth /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}/D_{MODELEPOCH}.pth\n",
399
+ "\n",
400
+ "!echo \"删除完成\"\n",
401
+ "!ls /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODELNAME}"
402
+ ]
403
+ }
404
+ ],
405
+ "metadata": {
406
+ "accelerator": "GPU",
407
+ "colab": {
408
+ "private_outputs": true,
409
+ "provenance": []
410
+ },
411
+ "gpuClass": "standard",
412
+ "kernelspec": {
413
+ "display_name": "Python 3",
414
+ "name": "python3"
415
+ },
416
+ "language_info": {
417
+ "name": "python"
418
+ }
419
+ },
420
+ "nbformat": 4,
421
+ "nbformat_minor": 0
422
+ }
a.png ADDED
api_231006.py ADDED
@@ -0,0 +1,440 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #api for 231006 release version by Xiaokai
2
+ import os
3
+ import sys
4
+ import json
5
+ import re
6
+ import time
7
+ import librosa
8
+ import torch
9
+ import numpy as np
10
+ import torch.nn.functional as F
11
+ import torchaudio.transforms as tat
12
+ import sounddevice as sd
13
+ from dotenv import load_dotenv
14
+ from fastapi import FastAPI, HTTPException
15
+ from pydantic import BaseModel
16
+ import threading
17
+ import uvicorn
18
+ import logging
19
+
20
+ # Initialize the logger
21
+ logging.basicConfig(level=logging.INFO)
22
+ logger = logging.getLogger(__name__)
23
+
24
+ # Define FastAPI app
25
+ app = FastAPI()
26
+
27
+ class GUIConfig:
28
+ def __init__(self) -> None:
29
+ self.pth_path: str = ""
30
+ self.index_path: str = ""
31
+ self.pitch: int = 0
32
+ self.samplerate: int = 40000
33
+ self.block_time: float = 1.0 # s
34
+ self.buffer_num: int = 1
35
+ self.threhold: int = -60
36
+ self.crossfade_time: float = 0.05
37
+ self.extra_time: float = 2.5
38
+ self.I_noise_reduce = False
39
+ self.O_noise_reduce = False
40
+ self.rms_mix_rate = 0.0
41
+ self.index_rate = 0.3
42
+ self.f0method = "rmvpe"
43
+ self.sg_input_device = ""
44
+ self.sg_output_device = ""
45
+
46
+ class ConfigData(BaseModel):
47
+ pth_path: str
48
+ index_path: str
49
+ sg_input_device: str
50
+ sg_output_device: str
51
+ threhold: int = -60
52
+ pitch: int = 0
53
+ index_rate: float = 0.3
54
+ rms_mix_rate: float = 0.0
55
+ block_time: float = 0.25
56
+ crossfade_length: float = 0.05
57
+ extra_time: float = 2.5
58
+ n_cpu: int = 4
59
+ I_noise_reduce: bool = False
60
+ O_noise_reduce: bool = False
61
+
62
+ class AudioAPI:
63
+ def __init__(self) -> None:
64
+ self.gui_config = GUIConfig()
65
+ self.config = None # Initialize Config object as None
66
+ self.flag_vc = False
67
+ self.function = "vc"
68
+ self.delay_time = 0
69
+ self.rvc = None # Initialize RVC object as None
70
+
71
+ def load(self):
72
+ input_devices, output_devices, _, _ = self.get_devices()
73
+ try:
74
+ with open("configs/config.json", "r", encoding='utf-8') as j:
75
+ data = json.load(j)
76
+ data["rmvpe"] = True # Ensure rmvpe is the only f0method
77
+ if data["sg_input_device"] not in input_devices:
78
+ data["sg_input_device"] = input_devices[sd.default.device[0]]
79
+ if data["sg_output_device"] not in output_devices:
80
+ data["sg_output_device"] = output_devices[sd.default.device[1]]
81
+ except Exception as e:
82
+ logger.error(f"Failed to load configuration: {e}")
83
+ with open("configs/config.json", "w", encoding='utf-8') as j:
84
+ data = {
85
+ "pth_path": " ",
86
+ "index_path": " ",
87
+ "sg_input_device": input_devices[sd.default.device[0]],
88
+ "sg_output_device": output_devices[sd.default.device[1]],
89
+ "threhold": "-60",
90
+ "pitch": "0",
91
+ "index_rate": "0",
92
+ "rms_mix_rate": "0",
93
+ "block_time": "0.25",
94
+ "crossfade_length": "0.05",
95
+ "extra_time": "2.5",
96
+ "f0method": "rmvpe",
97
+ "use_jit": False,
98
+ }
99
+ data["rmvpe"] = True # Ensure rmvpe is the only f0method
100
+ json.dump(data, j, ensure_ascii=False)
101
+ return data
102
+
103
+ def set_values(self, values):
104
+ logger.info(f"Setting values: {values}")
105
+ if not values.pth_path.strip():
106
+ raise HTTPException(status_code=400, detail="Please select a .pth file")
107
+ if not values.index_path.strip():
108
+ raise HTTPException(status_code=400, detail="Please select an index file")
109
+ self.set_devices(values.sg_input_device, values.sg_output_device)
110
+ self.config.use_jit = False
111
+ self.gui_config.pth_path = values.pth_path
112
+ self.gui_config.index_path = values.index_path
113
+ self.gui_config.threhold = values.threhold
114
+ self.gui_config.pitch = values.pitch
115
+ self.gui_config.block_time = values.block_time
116
+ self.gui_config.crossfade_time = values.crossfade_length
117
+ self.gui_config.extra_time = values.extra_time
118
+ self.gui_config.I_noise_reduce = values.I_noise_reduce
119
+ self.gui_config.O_noise_reduce = values.O_noise_reduce
120
+ self.gui_config.rms_mix_rate = values.rms_mix_rate
121
+ self.gui_config.index_rate = values.index_rate
122
+ self.gui_config.n_cpu = values.n_cpu
123
+ self.gui_config.f0method = "rmvpe"
124
+ return True
125
+
126
+ def start_vc(self):
127
+ torch.cuda.empty_cache()
128
+ self.flag_vc = True
129
+ self.rvc = rvc_for_realtime.RVC(
130
+ self.gui_config.pitch,
131
+ self.gui_config.pth_path,
132
+ self.gui_config.index_path,
133
+ self.gui_config.index_rate,
134
+ 0,
135
+ 0,
136
+ 0,
137
+ self.config,
138
+ self.rvc if self.rvc else None,
139
+ )
140
+ self.gui_config.samplerate = self.rvc.tgt_sr
141
+ self.zc = self.rvc.tgt_sr // 100
142
+ self.block_frame = (
143
+ int(
144
+ np.round(
145
+ self.gui_config.block_time
146
+ * self.gui_config.samplerate
147
+ / self.zc
148
+ )
149
+ )
150
+ * self.zc
151
+ )
152
+ self.block_frame_16k = 160 * self.block_frame // self.zc
153
+ self.crossfade_frame = (
154
+ int(
155
+ np.round(
156
+ self.gui_config.crossfade_time
157
+ * self.gui_config.samplerate
158
+ / self.zc
159
+ )
160
+ )
161
+ * self.zc
162
+ )
163
+ self.sola_search_frame = self.zc
164
+ self.extra_frame = (
165
+ int(
166
+ np.round(
167
+ self.gui_config.extra_time
168
+ * self.gui_config.samplerate
169
+ / self.zc
170
+ )
171
+ )
172
+ * self.zc
173
+ )
174
+ self.input_wav = torch.zeros(
175
+ self.extra_frame + self.crossfade_frame + self.sola_search_frame + self.block_frame,
176
+ device=self.config.device,
177
+ dtype=torch.float32,
178
+ )
179
+ self.input_wav_res = torch.zeros(
180
+ 160 * self.input_wav.shape[0] // self.zc,
181
+ device=self.config.device,
182
+ dtype=torch.float32,
183
+ )
184
+ self.pitch = np.zeros(self.input_wav.shape[0] // self.zc, dtype="int32")
185
+ self.pitchf = np.zeros(self.input_wav.shape[0] // self.zc, dtype="float64")
186
+ self.sola_buffer = torch.zeros(self.crossfade_frame, device=self.config.device, dtype=torch.float32)
187
+ self.nr_buffer = self.sola_buffer.clone()
188
+ self.output_buffer = self.input_wav.clone()
189
+ self.res_buffer = torch.zeros(2 * self.zc, device=self.config.device, dtype=torch.float32)
190
+ self.valid_rate = 1 - (self.extra_frame - 1) / self.input_wav.shape[0]
191
+ self.fade_in_window = (
192
+ torch.sin(0.5 * np.pi * torch.linspace(0.0, 1.0, steps=self.crossfade_frame, device=self.config.device, dtype=torch.float32)) ** 2
193
+ )
194
+ self.fade_out_window = 1 - self.fade_in_window
195
+ self.resampler = tat.Resample(
196
+ orig_freq=self.gui_config.samplerate,
197
+ new_freq=16000,
198
+ dtype=torch.float32,
199
+ ).to(self.config.device)
200
+ self.tg = TorchGate(
201
+ sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9
202
+ ).to(self.config.device)
203
+ thread_vc = threading.Thread(target=self.soundinput)
204
+ thread_vc.start()
205
+
206
+ def soundinput(self):
207
+ channels = 1 if sys.platform == "darwin" else 2
208
+ with sd.Stream(
209
+ channels=channels,
210
+ callback=self.audio_callback,
211
+ blocksize=self.block_frame,
212
+ samplerate=self.gui_config.samplerate,
213
+ dtype="float32",
214
+ ) as stream:
215
+ global stream_latency
216
+ stream_latency = stream.latency[-1]
217
+ while self.flag_vc:
218
+ time.sleep(self.gui_config.block_time)
219
+ logger.info("Audio block passed.")
220
+ logger.info("Ending VC")
221
+
222
+ def audio_callback(self, indata: np.ndarray, outdata: np.ndarray, frames, times, status):
223
+ start_time = time.perf_counter()
224
+ indata = librosa.to_mono(indata.T)
225
+ if self.gui_config.threhold > -60:
226
+ rms = librosa.feature.rms(y=indata, frame_length=4 * self.zc, hop_length=self.zc)
227
+ db_threhold = (librosa.amplitude_to_db(rms, ref=1.0)[0] < self.gui_config.threhold)
228
+ for i in range(db_threhold.shape[0]):
229
+ if db_threhold[i]:
230
+ indata[i * self.zc : (i + 1) * self.zc] = 0
231
+ self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone()
232
+ self.input_wav[-self.block_frame :] = torch.from_numpy(indata).to(self.config.device)
233
+ self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone()
234
+ if self.gui_config.I_noise_reduce and self.function == "vc":
235
+ input_wav = self.input_wav[-self.crossfade_frame - self.block_frame - 2 * self.zc :]
236
+ input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0))[0, 2 * self.zc :]
237
+ input_wav[: self.crossfade_frame] *= self.fade_in_window
238
+ input_wav[: self.crossfade_frame] += self.nr_buffer * self.fade_out_window
239
+ self.nr_buffer[:] = input_wav[-self.crossfade_frame :]
240
+ input_wav = torch.cat((self.res_buffer[:], input_wav[: self.block_frame]))
241
+ self.res_buffer[:] = input_wav[-2 * self.zc :]
242
+ self.input_wav_res[-self.block_frame_16k - 160 :] = self.resampler(input_wav)[160:]
243
+ else:
244
+ self.input_wav_res[-self.block_frame_16k - 160 :] = self.resampler(self.input_wav[-self.block_frame - 2 * self.zc :])[160:]
245
+ if self.function == "vc":
246
+ f0_extractor_frame = self.block_frame_16k + 800
247
+ if self.gui_config.f0method == "rmvpe":
248
+ f0_extractor_frame = (5120 * ((f0_extractor_frame - 1) // 5120 + 1) - 160)
249
+ infer_wav = self.rvc.infer(
250
+ self.input_wav_res,
251
+ self.input_wav_res[-f0_extractor_frame:].cpu().numpy(),
252
+ self.block_frame_16k,
253
+ self.valid_rate,
254
+ self.pitch,
255
+ self.pitchf,
256
+ self.gui_config.f0method,
257
+ )
258
+ infer_wav = infer_wav[-self.crossfade_frame - self.sola_search_frame - self.block_frame :]
259
+ else:
260
+ infer_wav = self.input_wav[-self.crossfade_frame - self.sola_search_frame - self.block_frame :].clone()
261
+ if (self.gui_config.O_noise_reduce and self.function == "vc") or (self.gui_config.I_noise_reduce and self.function == "im"):
262
+ self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone()
263
+ self.output_buffer[-self.block_frame :] = infer_wav[-self.block_frame :]
264
+ infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0)
265
+ if self.gui_config.rms_mix_rate < 1 and self.function == "vc":
266
+ rms1 = librosa.feature.rms(y=self.input_wav_res[-160 * infer_wav.shape[0] // self.zc :].cpu().numpy(), frame_length=640, hop_length=160)
267
+ rms1 = torch.from_numpy(rms1).to(self.config.device)
268
+ rms1 = F.interpolate(rms1.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear", align_corners=True)[0, 0, :-1]
269
+ rms2 = librosa.feature.rms(y=infer_wav[:].cpu().numpy(), frame_length=4 * self.zc, hop_length=self.zc)
270
+ rms2 = torch.from_numpy(rms2).to(self.config.device)
271
+ rms2 = F.interpolate(rms2.unsqueeze(0), size=infer_wav.shape[0] + 1, mode="linear", align_corners=True)[0, 0, :-1]
272
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
273
+ infer_wav *= torch.pow(rms1 / rms2, torch.tensor(1 - self.gui_config.rms_mix_rate))
274
+ conv_input = infer_wav[None, None, : self.crossfade_frame + self.sola_search_frame]
275
+ cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
276
+ cor_den = torch.sqrt(F.conv1d(conv_input**2, torch.ones(1, 1, self.crossfade_frame, device=self.config.device)) + 1e-8)
277
+ if sys.platform == "darwin":
278
+ _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
279
+ sola_offset = sola_offset.item()
280
+ else:
281
+ sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
282
+ logger.info(f"sola_offset = {sola_offset}")
283
+ infer_wav = infer_wav[sola_offset : sola_offset + self.block_frame + self.crossfade_frame]
284
+ infer_wav[: self.crossfade_frame] *= self.fade_in_window
285
+ infer_wav[: self.crossfade_frame] += self.sola_buffer * self.fade_out_window
286
+ self.sola_buffer[:] = infer_wav[-self.crossfade_frame :]
287
+ if sys.platform == "darwin":
288
+ outdata[:] = infer_wav[: -self.crossfade_frame].cpu().numpy()[:, np.newaxis]
289
+ else:
290
+ outdata[:] = infer_wav[: -self.crossfade_frame].repeat(2, 1).t().cpu().numpy()
291
+ total_time = time.perf_counter() - start_time
292
+ logger.info(f"Infer time: {total_time:.2f}")
293
+
294
+ def get_devices(self, update: bool = True):
295
+ if update:
296
+ sd._terminate()
297
+ sd._initialize()
298
+ devices = sd.query_devices()
299
+ hostapis = sd.query_hostapis()
300
+ for hostapi in hostapis:
301
+ for device_idx in hostapi["devices"]:
302
+ devices[device_idx]["hostapi_name"] = hostapi["name"]
303
+ input_devices = [
304
+ f"{d['name']} ({d['hostapi_name']})"
305
+ for d in devices
306
+ if d["max_input_channels"] > 0
307
+ ]
308
+ output_devices = [
309
+ f"{d['name']} ({d['hostapi_name']})"
310
+ for d in devices
311
+ if d["max_output_channels"] > 0
312
+ ]
313
+ input_devices_indices = [
314
+ d["index"] if "index" in d else d["name"]
315
+ for d in devices
316
+ if d["max_input_channels"] > 0
317
+ ]
318
+ output_devices_indices = [
319
+ d["index"] if "index" in d else d["name"]
320
+ for d in devices
321
+ if d["max_output_channels"] > 0
322
+ ]
323
+ return (
324
+ input_devices,
325
+ output_devices,
326
+ input_devices_indices,
327
+ output_devices_indices,
328
+ )
329
+
330
+ def set_devices(self, input_device, output_device):
331
+ (
332
+ input_devices,
333
+ output_devices,
334
+ input_device_indices,
335
+ output_device_indices,
336
+ ) = self.get_devices()
337
+ logger.debug(f"Available input devices: {input_devices}")
338
+ logger.debug(f"Available output devices: {output_devices}")
339
+ logger.debug(f"Selected input device: {input_device}")
340
+ logger.debug(f"Selected output device: {output_device}")
341
+
342
+ if input_device not in input_devices:
343
+ logger.error(f"Input device '{input_device}' is not in the list of available devices")
344
+ raise HTTPException(status_code=400, detail=f"Input device '{input_device}' is not available")
345
+
346
+ if output_device not in output_devices:
347
+ logger.error(f"Output device '{output_device}' is not in the list of available devices")
348
+ raise HTTPException(status_code=400, detail=f"Output device '{output_device}' is not available")
349
+
350
+ sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
351
+ sd.default.device[1] = output_device_indices[output_devices.index(output_device)]
352
+ logger.info(f"Input device set to {sd.default.device[0]}: {input_device}")
353
+ logger.info(f"Output device set to {sd.default.device[1]}: {output_device}")
354
+
355
+ audio_api = AudioAPI()
356
+
357
+ @app.get("/inputDevices", response_model=list)
358
+ def get_input_devices():
359
+ try:
360
+ input_devices, _, _, _ = audio_api.get_devices()
361
+ return input_devices
362
+ except Exception as e:
363
+ logger.error(f"Failed to get input devices: {e}")
364
+ raise HTTPException(status_code=500, detail="Failed to get input devices")
365
+
366
+ @app.get("/outputDevices", response_model=list)
367
+ def get_output_devices():
368
+ try:
369
+ _, output_devices, _, _ = audio_api.get_devices()
370
+ return output_devices
371
+ except Exception as e:
372
+ logger.error(f"Failed to get output devices: {e}")
373
+ raise HTTPException(status_code=500, detail="Failed to get output devices")
374
+
375
+ @app.post("/config")
376
+ def configure_audio(config_data: ConfigData):
377
+ try:
378
+ logger.info(f"Configuring audio with data: {config_data}")
379
+ if audio_api.set_values(config_data):
380
+ settings = config_data.dict()
381
+ settings["use_jit"] = False
382
+ settings["f0method"] = "rmvpe"
383
+ with open("configs/config.json", "w", encoding='utf-8') as j:
384
+ json.dump(settings, j, ensure_ascii=False)
385
+ logger.info("Configuration set successfully")
386
+ return {"message": "Configuration set successfully"}
387
+ except HTTPException as e:
388
+ logger.error(f"Configuration error: {e.detail}")
389
+ raise
390
+ except Exception as e:
391
+ logger.error(f"Configuration failed: {e}")
392
+ raise HTTPException(status_code=400, detail=f"Configuration failed: {e}")
393
+
394
+ @app.post("/start")
395
+ def start_conversion():
396
+ try:
397
+ if not audio_api.flag_vc:
398
+ audio_api.start_vc()
399
+ return {"message": "Audio conversion started"}
400
+ else:
401
+ logger.warning("Audio conversion already running")
402
+ raise HTTPException(status_code=400, detail="Audio conversion already running")
403
+ except HTTPException as e:
404
+ logger.error(f"Start conversion error: {e.detail}")
405
+ raise
406
+ except Exception as e:
407
+ logger.error(f"Failed to start conversion: {e}")
408
+ raise HTTPException(status_code=500, detail=f"Failed to start conversion: {e}")
409
+
410
+ @app.post("/stop")
411
+ def stop_conversion():
412
+ try:
413
+ if audio_api.flag_vc:
414
+ audio_api.flag_vc = False
415
+ global stream_latency
416
+ stream_latency = -1
417
+ return {"message": "Audio conversion stopped"}
418
+ else:
419
+ logger.warning("Audio conversion not running")
420
+ raise HTTPException(status_code=400, detail="Audio conversion not running")
421
+ except HTTPException as e:
422
+ logger.error(f"Stop conversion error: {e.detail}")
423
+ raise
424
+ except Exception as e:
425
+ logger.error(f"Failed to stop conversion: {e}")
426
+ raise HTTPException(status_code=500, detail=f"Failed to stop conversion: {e}")
427
+
428
+ if __name__ == "__main__":
429
+ if sys.platform == "win32":
430
+ from multiprocessing import freeze_support
431
+ freeze_support()
432
+ load_dotenv()
433
+ os.environ["OMP_NUM_THREADS"] = "4"
434
+ if sys.platform == "darwin":
435
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
436
+ from tools.torchgate import TorchGate
437
+ import tools.rvc_for_realtime as rvc_for_realtime
438
+ from configs.config import Config
439
+ audio_api.config = Config()
440
+ uvicorn.run(app, host="0.0.0.0", port=6242)
api_240604.py ADDED
@@ -0,0 +1,565 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #api for 240604 release version by Xiaokai
2
+ import os
3
+ import sys
4
+ import json
5
+ import re
6
+ import time
7
+ import librosa
8
+ import torch
9
+ import numpy as np
10
+ import torch.nn.functional as F
11
+ import torchaudio.transforms as tat
12
+ import sounddevice as sd
13
+ from dotenv import load_dotenv
14
+ from fastapi import FastAPI, HTTPException
15
+ from pydantic import BaseModel
16
+ import threading
17
+ import uvicorn
18
+ import logging
19
+ from multiprocessing import Queue, Process, cpu_count, freeze_support
20
+
21
+ # Initialize the logger
22
+ logging.basicConfig(level=logging.INFO)
23
+ logger = logging.getLogger(__name__)
24
+
25
+ # Define FastAPI app
26
+ app = FastAPI()
27
+
28
+ class GUIConfig:
29
+ def __init__(self) -> None:
30
+ self.pth_path: str = ""
31
+ self.index_path: str = ""
32
+ self.pitch: int = 0
33
+ self.formant: float = 0.0
34
+ self.sr_type: str = "sr_model"
35
+ self.block_time: float = 0.25 # s
36
+ self.threhold: int = -60
37
+ self.crossfade_time: float = 0.05
38
+ self.extra_time: float = 2.5
39
+ self.I_noise_reduce: bool = False
40
+ self.O_noise_reduce: bool = False
41
+ self.use_pv: bool = False
42
+ self.rms_mix_rate: float = 0.0
43
+ self.index_rate: float = 0.0
44
+ self.n_cpu: int = 4
45
+ self.f0method: str = "fcpe"
46
+ self.sg_input_device: str = ""
47
+ self.sg_output_device: str = ""
48
+
49
+ class ConfigData(BaseModel):
50
+ pth_path: str
51
+ index_path: str
52
+ sg_input_device: str
53
+ sg_output_device: str
54
+ threhold: int = -60
55
+ pitch: int = 0
56
+ formant: float = 0.0
57
+ index_rate: float = 0.3
58
+ rms_mix_rate: float = 0.0
59
+ block_time: float = 0.25
60
+ crossfade_length: float = 0.05
61
+ extra_time: float = 2.5
62
+ n_cpu: int = 4
63
+ I_noise_reduce: bool = False
64
+ O_noise_reduce: bool = False
65
+ use_pv: bool = False
66
+ f0method: str = "fcpe"
67
+
68
+ class Harvest(Process):
69
+ def __init__(self, inp_q, opt_q):
70
+ super(Harvest, self).__init__()
71
+ self.inp_q = inp_q
72
+ self.opt_q = opt_q
73
+
74
+ def run(self):
75
+ import numpy as np
76
+ import pyworld
77
+ while True:
78
+ idx, x, res_f0, n_cpu, ts = self.inp_q.get()
79
+ f0, t = pyworld.harvest(
80
+ x.astype(np.double),
81
+ fs=16000,
82
+ f0_ceil=1100,
83
+ f0_floor=50,
84
+ frame_period=10,
85
+ )
86
+ res_f0[idx] = f0
87
+ if len(res_f0.keys()) >= n_cpu:
88
+ self.opt_q.put(ts)
89
+
90
+ class AudioAPI:
91
+ def __init__(self) -> None:
92
+ self.gui_config = GUIConfig()
93
+ self.config = None # Initialize Config object as None
94
+ self.flag_vc = False
95
+ self.function = "vc"
96
+ self.delay_time = 0
97
+ self.rvc = None # Initialize RVC object as None
98
+ self.inp_q = None
99
+ self.opt_q = None
100
+ self.n_cpu = min(cpu_count(), 8)
101
+
102
+ def initialize_queues(self):
103
+ self.inp_q = Queue()
104
+ self.opt_q = Queue()
105
+ for _ in range(self.n_cpu):
106
+ p = Harvest(self.inp_q, self.opt_q)
107
+ p.daemon = True
108
+ p.start()
109
+
110
+ def load(self):
111
+ input_devices, output_devices, _, _ = self.get_devices()
112
+ try:
113
+ with open("configs/config.json", "r", encoding='utf-8') as j:
114
+ data = json.load(j)
115
+ if data["sg_input_device"] not in input_devices:
116
+ data["sg_input_device"] = input_devices[sd.default.device[0]]
117
+ if data["sg_output_device"] not in output_devices:
118
+ data["sg_output_device"] = output_devices[sd.default.device[1]]
119
+ except Exception as e:
120
+ logger.error(f"Failed to load configuration: {e}")
121
+ with open("configs/config.json", "w", encoding='utf-8') as j:
122
+ data = {
123
+ "pth_path": "",
124
+ "index_path": "",
125
+ "sg_input_device": input_devices[sd.default.device[0]],
126
+ "sg_output_device": output_devices[sd.default.device[1]],
127
+ "threhold": -60,
128
+ "pitch": 0,
129
+ "formant": 0.0,
130
+ "index_rate": 0,
131
+ "rms_mix_rate": 0,
132
+ "block_time": 0.25,
133
+ "crossfade_length": 0.05,
134
+ "extra_time": 2.5,
135
+ "n_cpu": 4,
136
+ "f0method": "fcpe",
137
+ "use_jit": False,
138
+ "use_pv": False,
139
+ }
140
+ json.dump(data, j, ensure_ascii=False)
141
+ return data
142
+
143
+ def set_values(self, values):
144
+ logger.info(f"Setting values: {values}")
145
+ if not values.pth_path.strip():
146
+ raise HTTPException(status_code=400, detail="Please select a .pth file")
147
+ if not values.index_path.strip():
148
+ raise HTTPException(status_code=400, detail="Please select an index file")
149
+ self.set_devices(values.sg_input_device, values.sg_output_device)
150
+ self.config.use_jit = False
151
+ self.gui_config.pth_path = values.pth_path
152
+ self.gui_config.index_path = values.index_path
153
+ self.gui_config.threhold = values.threhold
154
+ self.gui_config.pitch = values.pitch
155
+ self.gui_config.formant = values.formant
156
+ self.gui_config.block_time = values.block_time
157
+ self.gui_config.crossfade_time = values.crossfade_length
158
+ self.gui_config.extra_time = values.extra_time
159
+ self.gui_config.I_noise_reduce = values.I_noise_reduce
160
+ self.gui_config.O_noise_reduce = values.O_noise_reduce
161
+ self.gui_config.rms_mix_rate = values.rms_mix_rate
162
+ self.gui_config.index_rate = values.index_rate
163
+ self.gui_config.n_cpu = values.n_cpu
164
+ self.gui_config.use_pv = values.use_pv
165
+ self.gui_config.f0method = values.f0method
166
+ return True
167
+
168
+ def start_vc(self):
169
+ torch.cuda.empty_cache()
170
+ self.flag_vc = True
171
+ self.rvc = rvc_for_realtime.RVC(
172
+ self.gui_config.pitch,
173
+ self.gui_config.pth_path,
174
+ self.gui_config.index_path,
175
+ self.gui_config.index_rate,
176
+ self.gui_config.n_cpu,
177
+ self.inp_q,
178
+ self.opt_q,
179
+ self.config,
180
+ self.rvc if self.rvc else None,
181
+ )
182
+ self.gui_config.samplerate = (
183
+ self.rvc.tgt_sr
184
+ if self.gui_config.sr_type == "sr_model"
185
+ else self.get_device_samplerate()
186
+ )
187
+ self.zc = self.gui_config.samplerate // 100
188
+ self.block_frame = (
189
+ int(
190
+ np.round(
191
+ self.gui_config.block_time
192
+ * self.gui_config.samplerate
193
+ / self.zc
194
+ )
195
+ )
196
+ * self.zc
197
+ )
198
+ self.block_frame_16k = 160 * self.block_frame // self.zc
199
+ self.crossfade_frame = (
200
+ int(
201
+ np.round(
202
+ self.gui_config.crossfade_time
203
+ * self.gui_config.samplerate
204
+ / self.zc
205
+ )
206
+ )
207
+ * self.zc
208
+ )
209
+ self.sola_buffer_frame = min(self.crossfade_frame, 4 * self.zc)
210
+ self.sola_search_frame = self.zc
211
+ self.extra_frame = (
212
+ int(
213
+ np.round(
214
+ self.gui_config.extra_time
215
+ * self.gui_config.samplerate
216
+ / self.zc
217
+ )
218
+ )
219
+ * self.zc
220
+ )
221
+ self.input_wav = torch.zeros(
222
+ self.extra_frame
223
+ + self.crossfade_frame
224
+ + self.sola_search_frame
225
+ + self.block_frame,
226
+ device=self.config.device,
227
+ dtype=torch.float32,
228
+ )
229
+ self.input_wav_denoise = self.input_wav.clone()
230
+ self.input_wav_res = torch.zeros(
231
+ 160 * self.input_wav.shape[0] // self.zc,
232
+ device=self.config.device,
233
+ dtype=torch.float32,
234
+ )
235
+ self.rms_buffer = np.zeros(4 * self.zc, dtype="float32")
236
+ self.sola_buffer = torch.zeros(
237
+ self.sola_buffer_frame, device=self.config.device, dtype=torch.float32
238
+ )
239
+ self.nr_buffer = self.sola_buffer.clone()
240
+ self.output_buffer = self.input_wav.clone()
241
+ self.skip_head = self.extra_frame // self.zc
242
+ self.return_length = (
243
+ self.block_frame + self.sola_buffer_frame + self.sola_search_frame
244
+ ) // self.zc
245
+ self.fade_in_window = (
246
+ torch.sin(
247
+ 0.5
248
+ * np.pi
249
+ * torch.linspace(
250
+ 0.0,
251
+ 1.0,
252
+ steps=self.sola_buffer_frame,
253
+ device=self.config.device,
254
+ dtype=torch.float32,
255
+ )
256
+ )
257
+ ** 2
258
+ )
259
+ self.fade_out_window = 1 - self.fade_in_window
260
+ self.resampler = tat.Resample(
261
+ orig_freq=self.gui_config.samplerate,
262
+ new_freq=16000,
263
+ dtype=torch.float32,
264
+ ).to(self.config.device)
265
+ if self.rvc.tgt_sr != self.gui_config.samplerate:
266
+ self.resampler2 = tat.Resample(
267
+ orig_freq=self.rvc.tgt_sr,
268
+ new_freq=self.gui_config.samplerate,
269
+ dtype=torch.float32,
270
+ ).to(self.config.device)
271
+ else:
272
+ self.resampler2 = None
273
+ self.tg = TorchGate(
274
+ sr=self.gui_config.samplerate, n_fft=4 * self.zc, prop_decrease=0.9
275
+ ).to(self.config.device)
276
+ thread_vc = threading.Thread(target=self.soundinput)
277
+ thread_vc.start()
278
+
279
+ def soundinput(self):
280
+ channels = 1 if sys.platform == "darwin" else 2
281
+ with sd.Stream(
282
+ channels=channels,
283
+ callback=self.audio_callback,
284
+ blocksize=self.block_frame,
285
+ samplerate=self.gui_config.samplerate,
286
+ dtype="float32",
287
+ ) as stream:
288
+ global stream_latency
289
+ stream_latency = stream.latency[-1]
290
+ while self.flag_vc:
291
+ time.sleep(self.gui_config.block_time)
292
+ logger.info("Audio block passed.")
293
+ logger.info("Ending VC")
294
+
295
+ def audio_callback(self, indata: np.ndarray, outdata: np.ndarray, frames, times, status):
296
+ start_time = time.perf_counter()
297
+ indata = librosa.to_mono(indata.T)
298
+ if self.gui_config.threhold > -60:
299
+ indata = np.append(self.rms_buffer, indata)
300
+ rms = librosa.feature.rms(y=indata, frame_length=4 * self.zc, hop_length=self.zc)[:, 2:]
301
+ self.rms_buffer[:] = indata[-4 * self.zc :]
302
+ indata = indata[2 * self.zc - self.zc // 2 :]
303
+ db_threhold = (
304
+ librosa.amplitude_to_db(rms, ref=1.0)[0] < self.gui_config.threhold
305
+ )
306
+ for i in range(db_threhold.shape[0]):
307
+ if db_threhold[i]:
308
+ indata[i * self.zc : (i + 1) * self.zc] = 0
309
+ indata = indata[self.zc // 2 :]
310
+ self.input_wav[: -self.block_frame] = self.input_wav[self.block_frame :].clone()
311
+ self.input_wav[-indata.shape[0] :] = torch.from_numpy(indata).to(self.config.device)
312
+ self.input_wav_res[: -self.block_frame_16k] = self.input_wav_res[self.block_frame_16k :].clone()
313
+ # input noise reduction and resampling
314
+ if self.gui_config.I_noise_reduce:
315
+ self.input_wav_denoise[: -self.block_frame] = self.input_wav_denoise[self.block_frame :].clone()
316
+ input_wav = self.input_wav[-self.sola_buffer_frame - self.block_frame :]
317
+ input_wav = self.tg(input_wav.unsqueeze(0), self.input_wav.unsqueeze(0)).squeeze(0)
318
+ input_wav[: self.sola_buffer_frame] *= self.fade_in_window
319
+ input_wav[: self.sola_buffer_frame] += self.nr_buffer * self.fade_out_window
320
+ self.input_wav_denoise[-self.block_frame :] = input_wav[: self.block_frame]
321
+ self.nr_buffer[:] = input_wav[self.block_frame :]
322
+ self.input_wav_res[-self.block_frame_16k - 160 :] = self.resampler(
323
+ self.input_wav_denoise[-self.block_frame - 2 * self.zc :]
324
+ )[160:]
325
+ else:
326
+ self.input_wav_res[-160 * (indata.shape[0] // self.zc + 1) :] = (
327
+ self.resampler(self.input_wav[-indata.shape[0] - 2 * self.zc :])[160:]
328
+ )
329
+ # infer
330
+ if self.function == "vc":
331
+ infer_wav = self.rvc.infer(
332
+ self.input_wav_res,
333
+ self.block_frame_16k,
334
+ self.skip_head,
335
+ self.return_length,
336
+ self.gui_config.f0method,
337
+ )
338
+ if self.resampler2 is not None:
339
+ infer_wav = self.resampler2(infer_wav)
340
+ elif self.gui_config.I_noise_reduce:
341
+ infer_wav = self.input_wav_denoise[self.extra_frame :].clone()
342
+ else:
343
+ infer_wav = self.input_wav[self.extra_frame :].clone()
344
+ # output noise reduction
345
+ if self.gui_config.O_noise_reduce and self.function == "vc":
346
+ self.output_buffer[: -self.block_frame] = self.output_buffer[self.block_frame :].clone()
347
+ self.output_buffer[-self.block_frame :] = infer_wav[-self.block_frame :]
348
+ infer_wav = self.tg(infer_wav.unsqueeze(0), self.output_buffer.unsqueeze(0)).squeeze(0)
349
+ # volume envelop mixing
350
+ if self.gui_config.rms_mix_rate < 1 and self.function == "vc":
351
+ if self.gui_config.I_noise_reduce:
352
+ input_wav = self.input_wav_denoise[self.extra_frame :]
353
+ else:
354
+ input_wav = self.input_wav[self.extra_frame :]
355
+ rms1 = librosa.feature.rms(
356
+ y=input_wav[: infer_wav.shape[0]].cpu().numpy(),
357
+ frame_length=4 * self.zc,
358
+ hop_length=self.zc,
359
+ )
360
+ rms1 = torch.from_numpy(rms1).to(self.config.device)
361
+ rms1 = F.interpolate(
362
+ rms1.unsqueeze(0),
363
+ size=infer_wav.shape[0] + 1,
364
+ mode="linear",
365
+ align_corners=True,
366
+ )[0, 0, :-1]
367
+ rms2 = librosa.feature.rms(
368
+ y=infer_wav[:].cpu().numpy(),
369
+ frame_length=4 * self.zc,
370
+ hop_length=self.zc,
371
+ )
372
+ rms2 = torch.from_numpy(rms2).to(self.config.device)
373
+ rms2 = F.interpolate(
374
+ rms2.unsqueeze(0),
375
+ size=infer_wav.shape[0] + 1,
376
+ mode="linear",
377
+ align_corners=True,
378
+ )[0, 0, :-1]
379
+ rms2 = torch.max(rms2, torch.zeros_like(rms2) + 1e-3)
380
+ infer_wav *= torch.pow(
381
+ rms1 / rms2, torch.tensor(1 - self.gui_config.rms_mix_rate)
382
+ )
383
+ # SOLA algorithm from https://github.com/yxlllc/DDSP-SVC
384
+ conv_input = infer_wav[None, None, : self.sola_buffer_frame + self.sola_search_frame]
385
+ cor_nom = F.conv1d(conv_input, self.sola_buffer[None, None, :])
386
+ cor_den = torch.sqrt(
387
+ F.conv1d(
388
+ conv_input**2,
389
+ torch.ones(1, 1, self.sola_buffer_frame, device=self.config.device),
390
+ )
391
+ + 1e-8
392
+ )
393
+ if sys.platform == "darwin":
394
+ _, sola_offset = torch.max(cor_nom[0, 0] / cor_den[0, 0])
395
+ sola_offset = sola_offset.item()
396
+ else:
397
+ sola_offset = torch.argmax(cor_nom[0, 0] / cor_den[0, 0])
398
+ logger.info(f"sola_offset = {sola_offset}")
399
+ infer_wav = infer_wav[sola_offset:]
400
+ if "privateuseone" in str(self.config.device) or not self.gui_config.use_pv:
401
+ infer_wav[: self.sola_buffer_frame] *= self.fade_in_window
402
+ infer_wav[: self.sola_buffer_frame] += self.sola_buffer * self.fade_out_window
403
+ else:
404
+ infer_wav[: self.sola_buffer_frame] = phase_vocoder(
405
+ self.sola_buffer,
406
+ infer_wav[: self.sola_buffer_frame],
407
+ self.fade_out_window,
408
+ self.fade_in_window,
409
+ )
410
+ self.sola_buffer[:] = infer_wav[
411
+ self.block_frame : self.block_frame + self.sola_buffer_frame
412
+ ]
413
+ if sys.platform == "darwin":
414
+ outdata[:] = infer_wav[: self.block_frame].cpu().numpy()[:, np.newaxis]
415
+ else:
416
+ outdata[:] = infer_wav[: self.block_frame].repeat(2, 1).t().cpu().numpy()
417
+ total_time = time.perf_counter() - start_time
418
+ logger.info(f"Infer time: {total_time:.2f}")
419
+
420
+ def get_devices(self, update: bool = True):
421
+ if update:
422
+ sd._terminate()
423
+ sd._initialize()
424
+ devices = sd.query_devices()
425
+ hostapis = sd.query_hostapis()
426
+ for hostapi in hostapis:
427
+ for device_idx in hostapi["devices"]:
428
+ devices[device_idx]["hostapi_name"] = hostapi["name"]
429
+ input_devices = [
430
+ f"{d['name']} ({d['hostapi_name']})"
431
+ for d in devices
432
+ if d["max_input_channels"] > 0
433
+ ]
434
+ output_devices = [
435
+ f"{d['name']} ({d['hostapi_name']})"
436
+ for d in devices
437
+ if d["max_output_channels"] > 0
438
+ ]
439
+ input_devices_indices = [
440
+ d["index"] if "index" in d else d["name"]
441
+ for d in devices
442
+ if d["max_input_channels"] > 0
443
+ ]
444
+ output_devices_indices = [
445
+ d["index"] if "index" in d else d["name"]
446
+ for d in devices
447
+ if d["max_output_channels"] > 0
448
+ ]
449
+ return (
450
+ input_devices,
451
+ output_devices,
452
+ input_devices_indices,
453
+ output_devices_indices,
454
+ )
455
+
456
+ def set_devices(self, input_device, output_device):
457
+ (
458
+ input_devices,
459
+ output_devices,
460
+ input_device_indices,
461
+ output_device_indices,
462
+ ) = self.get_devices()
463
+ logger.debug(f"Available input devices: {input_devices}")
464
+ logger.debug(f"Available output devices: {output_devices}")
465
+ logger.debug(f"Selected input device: {input_device}")
466
+ logger.debug(f"Selected output device: {output_device}")
467
+
468
+ if input_device not in input_devices:
469
+ logger.error(f"Input device '{input_device}' is not in the list of available devices")
470
+ raise HTTPException(status_code=400, detail=f"Input device '{input_device}' is not available")
471
+
472
+ if output_device not in output_devices:
473
+ logger.error(f"Output device '{output_device}' is not in the list of available devices")
474
+ raise HTTPException(status_code=400, detail=f"Output device '{output_device}' is not available")
475
+
476
+ sd.default.device[0] = input_device_indices[input_devices.index(input_device)]
477
+ sd.default.device[1] = output_device_indices[output_devices.index(output_device)]
478
+ logger.info(f"Input device set to {sd.default.device[0]}: {input_device}")
479
+ logger.info(f"Output device set to {sd.default.device[1]}: {output_device}")
480
+
481
+ audio_api = AudioAPI()
482
+
483
+ @app.get("/inputDevices", response_model=list)
484
+ def get_input_devices():
485
+ try:
486
+ input_devices, _, _, _ = audio_api.get_devices()
487
+ return input_devices
488
+ except Exception as e:
489
+ logger.error(f"Failed to get input devices: {e}")
490
+ raise HTTPException(status_code=500, detail="Failed to get input devices")
491
+
492
+ @app.get("/outputDevices", response_model=list)
493
+ def get_output_devices():
494
+ try:
495
+ _, output_devices, _, _ = audio_api.get_devices()
496
+ return output_devices
497
+ except Exception as e:
498
+ logger.error(f"Failed to get output devices: {e}")
499
+ raise HTTPException(status_code=500, detail="Failed to get output devices")
500
+
501
+ @app.post("/config")
502
+ def configure_audio(config_data: ConfigData):
503
+ try:
504
+ logger.info(f"Configuring audio with data: {config_data}")
505
+ if audio_api.set_values(config_data):
506
+ settings = config_data.dict()
507
+ settings["use_jit"] = False
508
+ with open("configs/config.json", "w", encoding='utf-8') as j:
509
+ json.dump(settings, j, ensure_ascii=False)
510
+ logger.info("Configuration set successfully")
511
+ return {"message": "Configuration set successfully"}
512
+ except HTTPException as e:
513
+ logger.error(f"Configuration error: {e.detail}")
514
+ raise
515
+ except Exception as e:
516
+ logger.error(f"Configuration failed: {e}")
517
+ raise HTTPException(status_code=400, detail=f"Configuration failed: {e}")
518
+
519
+ @app.post("/start")
520
+ def start_conversion():
521
+ try:
522
+ if not audio_api.flag_vc:
523
+ audio_api.start_vc()
524
+ return {"message": "Audio conversion started"}
525
+ else:
526
+ logger.warning("Audio conversion already running")
527
+ raise HTTPException(status_code=400, detail="Audio conversion already running")
528
+ except HTTPException as e:
529
+ logger.error(f"Start conversion error: {e.detail}")
530
+ raise
531
+ except Exception as e:
532
+ logger.error(f"Failed to start conversion: {e}")
533
+ raise HTTPException(status_code=500, detail="Failed to start conversion: {e}")
534
+
535
+ @app.post("/stop")
536
+ def stop_conversion():
537
+ try:
538
+ if audio_api.flag_vc:
539
+ audio_api.flag_vc = False
540
+ global stream_latency
541
+ stream_latency = -1
542
+ return {"message": "Audio conversion stopped"}
543
+ else:
544
+ logger.warning("Audio conversion not running")
545
+ raise HTTPException(status_code=400, detail="Audio conversion not running")
546
+ except HTTPException as e:
547
+ logger.error(f"Stop conversion error: {e.detail}")
548
+ raise
549
+ except Exception as e:
550
+ logger.error(f"Failed to stop conversion: {e}")
551
+ raise HTTPException(status_code=500, detail="Failed to stop conversion: {e}")
552
+
553
+ if __name__ == "__main__":
554
+ if sys.platform == "win32":
555
+ freeze_support()
556
+ load_dotenv()
557
+ os.environ["OMP_NUM_THREADS"] = "4"
558
+ if sys.platform == "darwin":
559
+ os.environ["PYTORCH_ENABLE_MPS_FALLBACK"] = "1"
560
+ from tools.torchgate import TorchGate
561
+ import tools.rvc_for_realtime as rvc_for_realtime
562
+ from configs.config import Config
563
+ audio_api.config = Config()
564
+ audio_api.initialize_queues()
565
+ uvicorn.run(app, host="0.0.0.0", port=6242)
assets/Synthesizer_inputs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c5ae8cd034b02bbc325939e9b9debbedb43ee9d71a654daaff8804815bd957d
3
+ size 122495
assets/hubert/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *
2
+ !.gitignore
3
+ !hubert_inputs.pth
assets/hubert/hubert_inputs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bbd4741d4be8a71333170c0df5320f605a9d210b96547b391555da078167861f
3
+ size 169434
assets/indices/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
assets/pretrained/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
assets/pretrained_v2/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
assets/pretrained_v2/f0D32k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd7134e7793674c85474d5145d2d982e3c5d8124fc7bb6c20f710ed65808fa8a
3
+ size 142875703
assets/pretrained_v2/f0G32k.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2332611297b8d88c7436de8f17ef5f07a2119353e962cd93cda5806d59a1133d
3
+ size 73950049
assets/rmvpe/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ *
2
+ !.gitignore
3
+ !rmvpe_inputs.pth
assets/rmvpe/rmvpe_inputs.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:339fcb7e1476b302e9aecef4a951e918c20852b2e871de5eea13b06e554e0a3a
3
+ size 33527
assets/uvr5_weights/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
audios/astronauts.mp3 ADDED
Binary file (73.6 kB). View file
 
audios/somegirl.mp3 ADDED
Binary file (32.2 kB). View file
 
audios/someguy.mp3 ADDED
Binary file (24.9 kB). View file
 
audios/unachica.mp3 ADDED
Binary file (36.4 kB). View file
 
audios/unchico.mp3 ADDED
Binary file (35.9 kB). View file
 
configs/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"pth_path": "assets/weights/kikiV1.pth", "index_path": "logs/kikiV1.index", "sg_hostapi": "MME", "sg_wasapi_exclusive": false, "sg_input_device": "VoiceMeeter Output (VB-Audio Vo", "sg_output_device": "VoiceMeeter Input (VB-Audio Voi", "sr_type": "sr_device", "threhold": -60.0, "pitch": 12.0, "formant": 0.0, "rms_mix_rate": 0.5, "index_rate": 0.0, "block_time": 0.15, "crossfade_length": 0.08, "extra_time": 2.0, "n_cpu": 4.0, "use_jit": false, "use_pv": false, "f0method": "fcpe"}
configs/config.py ADDED
@@ -0,0 +1,254 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import sys
4
+ import json
5
+ import shutil
6
+ from multiprocessing import cpu_count
7
+
8
+ import torch
9
+
10
+ try:
11
+ import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
12
+
13
+ if torch.xpu.is_available():
14
+ from infer.modules.ipex import ipex_init
15
+
16
+ ipex_init()
17
+ except Exception: # pylint: disable=broad-exception-caught
18
+ pass
19
+ import logging
20
+
21
+ logger = logging.getLogger(__name__)
22
+
23
+
24
+ version_config_list = [
25
+ "v1/32k.json",
26
+ "v1/40k.json",
27
+ "v1/48k.json",
28
+ "v2/48k.json",
29
+ "v2/32k.json",
30
+ ]
31
+
32
+
33
+ def singleton_variable(func):
34
+ def wrapper(*args, **kwargs):
35
+ if not wrapper.instance:
36
+ wrapper.instance = func(*args, **kwargs)
37
+ return wrapper.instance
38
+
39
+ wrapper.instance = None
40
+ return wrapper
41
+
42
+
43
+ @singleton_variable
44
+ class Config:
45
+ def __init__(self):
46
+ self.device = "cuda:0"
47
+ self.is_half = True
48
+ self.use_jit = False
49
+ self.n_cpu = 0
50
+ self.gpu_name = None
51
+ self.json_config = self.load_config_json()
52
+ self.gpu_mem = None
53
+ (
54
+ self.python_cmd,
55
+ self.listen_port,
56
+ self.iscolab,
57
+ self.noparallel,
58
+ self.noautoopen,
59
+ self.dml,
60
+ ) = self.arg_parse()
61
+ self.instead = ""
62
+ self.preprocess_per = 3.7
63
+ self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config()
64
+
65
+ @staticmethod
66
+ def load_config_json() -> dict:
67
+ d = {}
68
+ for config_file in version_config_list:
69
+ p = f"configs/inuse/{config_file}"
70
+ if not os.path.exists(p):
71
+ shutil.copy(f"configs/{config_file}", p)
72
+ with open(f"configs/inuse/{config_file}", "r") as f:
73
+ d[config_file] = json.load(f)
74
+ return d
75
+
76
+ @staticmethod
77
+ def arg_parse() -> tuple:
78
+ exe = sys.executable or "python"
79
+ parser = argparse.ArgumentParser()
80
+ parser.add_argument("--port", type=int, default=7865, help="Listen port")
81
+ parser.add_argument("--pycmd", type=str, default=exe, help="Python command")
82
+ parser.add_argument("--colab", action="store_true", help="Launch in colab")
83
+ parser.add_argument(
84
+ "--noparallel", action="store_true", help="Disable parallel processing"
85
+ )
86
+ parser.add_argument(
87
+ "--noautoopen",
88
+ action="store_true",
89
+ help="Do not open in browser automatically",
90
+ )
91
+ parser.add_argument(
92
+ "--dml",
93
+ action="store_true",
94
+ help="torch_dml",
95
+ )
96
+ cmd_opts = parser.parse_args()
97
+
98
+ cmd_opts.port = cmd_opts.port if 0 <= cmd_opts.port <= 65535 else 7865
99
+
100
+ return (
101
+ cmd_opts.pycmd,
102
+ cmd_opts.port,
103
+ cmd_opts.colab,
104
+ cmd_opts.noparallel,
105
+ cmd_opts.noautoopen,
106
+ cmd_opts.dml,
107
+ )
108
+
109
+ # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+.
110
+ # check `getattr` and try it for compatibility
111
+ @staticmethod
112
+ def has_mps() -> bool:
113
+ if not torch.backends.mps.is_available():
114
+ return False
115
+ try:
116
+ torch.zeros(1).to(torch.device("mps"))
117
+ return True
118
+ except Exception:
119
+ return False
120
+
121
+ @staticmethod
122
+ def has_xpu() -> bool:
123
+ if hasattr(torch, "xpu") and torch.xpu.is_available():
124
+ return True
125
+ else:
126
+ return False
127
+
128
+ def use_fp32_config(self):
129
+ for config_file in version_config_list:
130
+ self.json_config[config_file]["train"]["fp16_run"] = False
131
+ with open(f"configs/inuse/{config_file}", "r") as f:
132
+ strr = f.read().replace("true", "false")
133
+ with open(f"configs/inuse/{config_file}", "w") as f:
134
+ f.write(strr)
135
+ logger.info("overwrite " + config_file)
136
+ self.preprocess_per = 3.0
137
+ logger.info("overwrite preprocess_per to %d" % (self.preprocess_per))
138
+
139
+ def device_config(self) -> tuple:
140
+ if torch.cuda.is_available():
141
+ if self.has_xpu():
142
+ self.device = self.instead = "xpu:0"
143
+ self.is_half = True
144
+ i_device = int(self.device.split(":")[-1])
145
+ self.gpu_name = torch.cuda.get_device_name(i_device)
146
+ if (
147
+ ("16" in self.gpu_name and "V100" not in self.gpu_name.upper())
148
+ or "P40" in self.gpu_name.upper()
149
+ or "P10" in self.gpu_name.upper()
150
+ or "1060" in self.gpu_name
151
+ or "1070" in self.gpu_name
152
+ or "1080" in self.gpu_name
153
+ ):
154
+ logger.info("Found GPU %s, force to fp32", self.gpu_name)
155
+ self.is_half = False
156
+ self.use_fp32_config()
157
+ else:
158
+ logger.info("Found GPU %s", self.gpu_name)
159
+ self.gpu_mem = int(
160
+ torch.cuda.get_device_properties(i_device).total_memory
161
+ / 1024
162
+ / 1024
163
+ / 1024
164
+ + 0.4
165
+ )
166
+ if self.gpu_mem <= 4:
167
+ self.preprocess_per = 3.0
168
+ elif self.has_mps():
169
+ logger.info("No supported Nvidia GPU found")
170
+ self.device = self.instead = "mps"
171
+ self.is_half = False
172
+ self.use_fp32_config()
173
+ else:
174
+ logger.info("No supported Nvidia GPU found")
175
+ self.device = self.instead = "cpu"
176
+ self.is_half = False
177
+ self.use_fp32_config()
178
+
179
+ if self.n_cpu == 0:
180
+ self.n_cpu = cpu_count()
181
+
182
+ if self.is_half:
183
+ # 6G显存配置
184
+ x_pad = 3
185
+ x_query = 10
186
+ x_center = 60
187
+ x_max = 65
188
+ else:
189
+ # 5G显存配置
190
+ x_pad = 1
191
+ x_query = 6
192
+ x_center = 38
193
+ x_max = 41
194
+
195
+ if self.gpu_mem is not None and self.gpu_mem <= 4:
196
+ x_pad = 1
197
+ x_query = 5
198
+ x_center = 30
199
+ x_max = 32
200
+ if self.dml:
201
+ logger.info("Use DirectML instead")
202
+ if (
203
+ os.path.exists(
204
+ "runtime\Lib\site-packages\onnxruntime\capi\DirectML.dll"
205
+ )
206
+ == False
207
+ ):
208
+ try:
209
+ os.rename(
210
+ "runtime\Lib\site-packages\onnxruntime",
211
+ "runtime\Lib\site-packages\onnxruntime-cuda",
212
+ )
213
+ except:
214
+ pass
215
+ try:
216
+ os.rename(
217
+ "runtime\Lib\site-packages\onnxruntime-dml",
218
+ "runtime\Lib\site-packages\onnxruntime",
219
+ )
220
+ except:
221
+ pass
222
+ # if self.device != "cpu":
223
+ import torch_directml
224
+
225
+ self.device = torch_directml.device(torch_directml.default_device())
226
+ self.is_half = False
227
+ else:
228
+ if self.instead:
229
+ logger.info(f"Use {self.instead} instead")
230
+ if (
231
+ os.path.exists(
232
+ "runtime\Lib\site-packages\onnxruntime\capi\onnxruntime_providers_cuda.dll"
233
+ )
234
+ == False
235
+ ):
236
+ try:
237
+ os.rename(
238
+ "runtime\Lib\site-packages\onnxruntime",
239
+ "runtime\Lib\site-packages\onnxruntime-dml",
240
+ )
241
+ except:
242
+ pass
243
+ try:
244
+ os.rename(
245
+ "runtime\Lib\site-packages\onnxruntime-cuda",
246
+ "runtime\Lib\site-packages\onnxruntime",
247
+ )
248
+ except:
249
+ pass
250
+ logger.info(
251
+ "Half-precision floating-point: %s, device: %s"
252
+ % (self.is_half, self.device)
253
+ )
254
+ return x_pad, x_query, x_center, x_max
configs/inuse/.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ *
2
+ !.gitignore
3
+ !v1
4
+ !v2
configs/inuse/v1/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
configs/inuse/v2/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *
2
+ !.gitignore
configs/v1/32k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 32000,
21
+ "filter_length": 1024,
22
+ "hop_length": 320,
23
+ "win_length": 1024,
24
+ "n_mel_channels": 80,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,4,2,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v1/40k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 40000,
21
+ "filter_length": 2048,
22
+ "hop_length": 400,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 125,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,10,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v1/48k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 11520,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 48000,
21
+ "filter_length": 2048,
22
+ "hop_length": 480,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 128,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,6,2,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [16,16,4,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v2/32k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 12800,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 32000,
21
+ "filter_length": 1024,
22
+ "hop_length": 320,
23
+ "win_length": 1024,
24
+ "n_mel_channels": 80,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [10,8,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [20,16,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
configs/v2/48k.json ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "train": {
3
+ "log_interval": 200,
4
+ "seed": 1234,
5
+ "epochs": 20000,
6
+ "learning_rate": 1e-4,
7
+ "betas": [0.8, 0.99],
8
+ "eps": 1e-9,
9
+ "batch_size": 4,
10
+ "fp16_run": true,
11
+ "lr_decay": 0.999875,
12
+ "segment_size": 17280,
13
+ "init_lr_ratio": 1,
14
+ "warmup_epochs": 0,
15
+ "c_mel": 45,
16
+ "c_kl": 1.0
17
+ },
18
+ "data": {
19
+ "max_wav_value": 32768.0,
20
+ "sampling_rate": 48000,
21
+ "filter_length": 2048,
22
+ "hop_length": 480,
23
+ "win_length": 2048,
24
+ "n_mel_channels": 128,
25
+ "mel_fmin": 0.0,
26
+ "mel_fmax": null
27
+ },
28
+ "model": {
29
+ "inter_channels": 192,
30
+ "hidden_channels": 192,
31
+ "filter_channels": 768,
32
+ "n_heads": 2,
33
+ "n_layers": 6,
34
+ "kernel_size": 3,
35
+ "p_dropout": 0,
36
+ "resblock": "1",
37
+ "resblock_kernel_sizes": [3,7,11],
38
+ "resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]],
39
+ "upsample_rates": [12,10,2,2],
40
+ "upsample_initial_channel": 512,
41
+ "upsample_kernel_sizes": [24,20,4,4],
42
+ "use_spectral_norm": false,
43
+ "gin_channels": 256,
44
+ "spk_embed_dim": 109
45
+ }
46
+ }
demo.py ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from original import *
2
+ import shutil, glob
3
+ from easyfuncs import download_from_url, CachedModels
4
+ os.makedirs("dataset",exist_ok=True)
5
+ model_library = CachedModels()
6
+
7
+ with gr.Blocks(title="🔊",theme=gr.themes.Base(primary_hue="rose",neutral_hue="zinc")) as app:
8
+ with gr.Row():
9
+ gr.HTML("<img src='file/a.png' alt='image'>")
10
+ with gr.Tabs():
11
+ with gr.TabItem("Inference"):
12
+ with gr.Row():
13
+ voice_model = gr.Dropdown(label="Model Voice", choices=sorted(names), value=lambda:sorted(names)[0] if len(sorted(names)) > 0 else '', interactive=True)
14
+ refresh_button = gr.Button("Refresh", variant="primary")
15
+ spk_item = gr.Slider(
16
+ minimum=0,
17
+ maximum=2333,
18
+ step=1,
19
+ label="Speaker ID",
20
+ value=0,
21
+ visible=False,
22
+ interactive=True,
23
+ )
24
+ vc_transform0 = gr.Number(
25
+ label="Pitch",
26
+ value=0
27
+ )
28
+ but0 = gr.Button(value="Convert", variant="primary")
29
+ with gr.Row():
30
+ with gr.Column():
31
+ with gr.Row():
32
+ dropbox = gr.File(label="Drop your audio here & hit the Reload button.")
33
+ with gr.Row():
34
+ record_button=gr.Audio(source="microphone", label="OR Record audio.", type="filepath")
35
+ with gr.Row():
36
+ paths_for_files = lambda path:[os.path.abspath(os.path.join(path, f)) for f in os.listdir(path) if os.path.splitext(f)[1].lower() in ('.mp3', '.wav', '.flac', '.ogg')]
37
+ input_audio0 = gr.Dropdown(
38
+ label="Input Path",
39
+ value=paths_for_files('audios')[0] if len(paths_for_files('audios')) > 0 else '',
40
+ choices=paths_for_files('audios'), # Only show absolute paths for audio files ending in .mp3, .wav, .flac or .ogg
41
+ allow_custom_value=True
42
+ )
43
+ with gr.Row():
44
+ audio_player = gr.Audio()
45
+ input_audio0.change(
46
+ inputs=[input_audio0],
47
+ outputs=[audio_player],
48
+ fn=lambda path: {"value":path,"__type__":"update"} if os.path.exists(path) else None
49
+ )
50
+ record_button.stop_recording(
51
+ fn=lambda audio:audio, #TODO save wav lambda
52
+ inputs=[record_button],
53
+ outputs=[input_audio0])
54
+ dropbox.upload(
55
+ fn=lambda audio:audio.name,
56
+ inputs=[dropbox],
57
+ outputs=[input_audio0])
58
+ with gr.Column():
59
+ with gr.Accordion("Change Index", open=False):
60
+ file_index2 = gr.Dropdown(
61
+ label="Change Index",
62
+ choices=sorted(index_paths),
63
+ interactive=True,
64
+ value=sorted(index_paths)[0] if len(sorted(index_paths)) > 0 else ''
65
+ )
66
+ index_rate1 = gr.Slider(
67
+ minimum=0,
68
+ maximum=1,
69
+ label="Index Strength",
70
+ value=0.5,
71
+ interactive=True,
72
+ )
73
+ vc_output2 = gr.Audio(label="Output")
74
+ with gr.Accordion("General Settings", open=False):
75
+ f0method0 = gr.Radio(
76
+ label="Method",
77
+ choices=["pm", "harvest", "crepe", "rmvpe"]
78
+ if config.dml == False
79
+ else ["pm", "harvest", "rmvpe"],
80
+ value="rmvpe",
81
+ interactive=True,
82
+ )
83
+ filter_radius0 = gr.Slider(
84
+ minimum=0,
85
+ maximum=7,
86
+ label="Breathiness Reduction (Harvest only)",
87
+ value=3,
88
+ step=1,
89
+ interactive=True,
90
+ )
91
+ resample_sr0 = gr.Slider(
92
+ minimum=0,
93
+ maximum=48000,
94
+ label="Resample",
95
+ value=0,
96
+ step=1,
97
+ interactive=True,
98
+ visible=False
99
+ )
100
+ rms_mix_rate0 = gr.Slider(
101
+ minimum=0,
102
+ maximum=1,
103
+ label="Volume Normalization",
104
+ value=0,
105
+ interactive=True,
106
+ )
107
+ protect0 = gr.Slider(
108
+ minimum=0,
109
+ maximum=0.5,
110
+ label="Breathiness Protection (0 is enabled, 0.5 is disabled)",
111
+ value=0.33,
112
+ step=0.01,
113
+ interactive=True,
114
+ )
115
+ if voice_model != None: vc.get_vc(voice_model.value,protect0,protect0)
116
+ file_index1 = gr.Textbox(
117
+ label="Index Path",
118
+ interactive=True,
119
+ visible=False#Not used here
120
+ )
121
+ refresh_button.click(
122
+ fn=change_choices,
123
+ inputs=[],
124
+ outputs=[voice_model, file_index2],
125
+ api_name="infer_refresh",
126
+ )
127
+ refresh_button.click(
128
+ fn=lambda:{"choices":paths_for_files('audios'),"__type__":"update"}, #TODO check if properly returns a sorted list of audio files in the 'audios' folder that have the extensions '.wav', '.mp3', '.ogg', or '.flac'
129
+ inputs=[],
130
+ outputs = [input_audio0],
131
+ )
132
+ refresh_button.click(
133
+ fn=lambda:{"value":paths_for_files('audios')[0],"__type__":"update"} if len(paths_for_files('audios')) > 0 else {"value":"","__type__":"update"}, #TODO check if properly returns a sorted list of audio files in the 'audios' folder that have the extensions '.wav', '.mp3', '.ogg', or '.flac'
134
+ inputs=[],
135
+ outputs = [input_audio0],
136
+ )
137
+ with gr.Row():
138
+ f0_file = gr.File(label="F0 Path", visible=False)
139
+ with gr.Row():
140
+ vc_output1 = gr.Textbox(label="Information", placeholder="Welcome!",visible=False)
141
+ but0.click(
142
+ vc.vc_single,
143
+ [
144
+ spk_item,
145
+ input_audio0,
146
+ vc_transform0,
147
+ f0_file,
148
+ f0method0,
149
+ file_index1,
150
+ file_index2,
151
+ index_rate1,
152
+ filter_radius0,
153
+ resample_sr0,
154
+ rms_mix_rate0,
155
+ protect0,
156
+ ],
157
+ [vc_output1, vc_output2],
158
+ api_name="infer_convert",
159
+ )
160
+ voice_model.change(
161
+ fn=vc.get_vc,
162
+ inputs=[voice_model, protect0, protect0],
163
+ outputs=[spk_item, protect0, protect0, file_index2, file_index2],
164
+ api_name="infer_change_voice",
165
+ )
166
+ with gr.TabItem("Download Models"):
167
+ with gr.Row():
168
+ url_input = gr.Textbox(label="URL to model", value="",placeholder="https://...", scale=6)
169
+ name_output = gr.Textbox(label="Save as", value="",placeholder="MyModel",scale=2)
170
+ url_download = gr.Button(value="Download Model",scale=2)
171
+ url_download.click(
172
+ inputs=[url_input,name_output],
173
+ outputs=[url_input],
174
+ fn=download_from_url,
175
+ )
176
+ with gr.Row():
177
+ model_browser = gr.Dropdown(choices=list(model_library.models.keys()),label="OR Search Models (Quality UNKNOWN)",scale=5)
178
+ download_from_browser = gr.Button(value="Get",scale=2)
179
+ download_from_browser.click(
180
+ inputs=[model_browser],
181
+ outputs=[model_browser],
182
+ fn=lambda model: download_from_url(model_library.models[model],model),
183
+ )
184
+ with gr.TabItem("Train"):
185
+ with gr.Row():
186
+ with gr.Column():
187
+ training_name = gr.Textbox(label="Name your model", value="My-Voice",placeholder="My-Voice")
188
+ np7 = gr.Slider(
189
+ minimum=0,
190
+ maximum=config.n_cpu,
191
+ step=1,
192
+ label="Number of CPU processes used to extract pitch features",
193
+ value=int(np.ceil(config.n_cpu / 1.5)),
194
+ interactive=True,
195
+ )
196
+ sr2 = gr.Radio(
197
+ label="Sampling Rate",
198
+ choices=["40k", "32k"],
199
+ value="32k",
200
+ interactive=True,
201
+ visible=False
202
+ )
203
+ if_f0_3 = gr.Radio(
204
+ label="Will your model be used for singing? If not, you can ignore this.",
205
+ choices=[True, False],
206
+ value=True,
207
+ interactive=True,
208
+ visible=False
209
+ )
210
+ version19 = gr.Radio(
211
+ label="Version",
212
+ choices=["v1", "v2"],
213
+ value="v2",
214
+ interactive=True,
215
+ visible=False,
216
+ )
217
+ dataset_folder = gr.Textbox(
218
+ label="dataset folder", value='dataset'
219
+ )
220
+ easy_uploader = gr.Files(label="Drop your audio files here",file_types=['audio'])
221
+ but1 = gr.Button("1. Process", variant="primary")
222
+ info1 = gr.Textbox(label="Information", value="",visible=True)
223
+ easy_uploader.upload(inputs=[dataset_folder],outputs=[],fn=lambda folder:os.makedirs(folder,exist_ok=True))
224
+ easy_uploader.upload(
225
+ fn=lambda files,folder: [shutil.copy2(f.name,os.path.join(folder,os.path.split(f.name)[1])) for f in files] if folder != "" else gr.Warning('Please enter a folder name for your dataset'),
226
+ inputs=[easy_uploader, dataset_folder],
227
+ outputs=[])
228
+ gpus6 = gr.Textbox(
229
+ label="Enter the GPU numbers to use separated by -, (e.g. 0-1-2)",
230
+ value=gpus,
231
+ interactive=True,
232
+ visible=F0GPUVisible,
233
+ )
234
+ gpu_info9 = gr.Textbox(
235
+ label="GPU Info", value=gpu_info, visible=F0GPUVisible
236
+ )
237
+ spk_id5 = gr.Slider(
238
+ minimum=0,
239
+ maximum=4,
240
+ step=1,
241
+ label="Speaker ID",
242
+ value=0,
243
+ interactive=True,
244
+ visible=False
245
+ )
246
+ but1.click(
247
+ preprocess_dataset,
248
+ [dataset_folder, training_name, sr2, np7],
249
+ [info1],
250
+ api_name="train_preprocess",
251
+ )
252
+ with gr.Column():
253
+ f0method8 = gr.Radio(
254
+ label="F0 extraction method",
255
+ choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"],
256
+ value="rmvpe_gpu",
257
+ interactive=True,
258
+ )
259
+ gpus_rmvpe = gr.Textbox(
260
+ label="GPU numbers to use separated by -, (e.g. 0-1-2)",
261
+ value="%s-%s" % (gpus, gpus),
262
+ interactive=True,
263
+ visible=F0GPUVisible,
264
+ )
265
+ but2 = gr.Button("2. Extract Features", variant="primary")
266
+ info2 = gr.Textbox(label="Information", value="", max_lines=8)
267
+ f0method8.change(
268
+ fn=change_f0_method,
269
+ inputs=[f0method8],
270
+ outputs=[gpus_rmvpe],
271
+ )
272
+ but2.click(
273
+ extract_f0_feature,
274
+ [
275
+ gpus6,
276
+ np7,
277
+ f0method8,
278
+ if_f0_3,
279
+ training_name,
280
+ version19,
281
+ gpus_rmvpe,
282
+ ],
283
+ [info2],
284
+ api_name="train_extract_f0_feature",
285
+ )
286
+ with gr.Column():
287
+ total_epoch11 = gr.Slider(
288
+ minimum=2,
289
+ maximum=1000,
290
+ step=1,
291
+ label="Epochs (more epochs may improve quality but takes longer)",
292
+ value=150,
293
+ interactive=True,
294
+ )
295
+ but4 = gr.Button("3. Train Index", variant="primary")
296
+ but3 = gr.Button("4. Train Model", variant="primary")
297
+ info3 = gr.Textbox(label="Information", value="", max_lines=10)
298
+ with gr.Accordion(label="General Settings", open=False):
299
+ gpus16 = gr.Textbox(
300
+ label="GPUs separated by -, (e.g. 0-1-2)",
301
+ value="0",
302
+ interactive=True,
303
+ visible=True
304
+ )
305
+ save_epoch10 = gr.Slider(
306
+ minimum=1,
307
+ maximum=50,
308
+ step=1,
309
+ label="Weight Saving Frequency",
310
+ value=25,
311
+ interactive=True,
312
+ )
313
+ batch_size12 = gr.Slider(
314
+ minimum=1,
315
+ maximum=40,
316
+ step=1,
317
+ label="Batch Size",
318
+ value=default_batch_size,
319
+ interactive=True,
320
+ )
321
+ if_save_latest13 = gr.Radio(
322
+ label="Only save the latest model",
323
+ choices=["yes", "no"],
324
+ value="yes",
325
+ interactive=True,
326
+ visible=False
327
+ )
328
+ if_cache_gpu17 = gr.Radio(
329
+ label="If your dataset is UNDER 10 minutes, cache it to train faster",
330
+ choices=["yes", "no"],
331
+ value="no",
332
+ interactive=True,
333
+ )
334
+ if_save_every_weights18 = gr.Radio(
335
+ label="Save small model at every save point",
336
+ choices=["yes", "no"],
337
+ value="yes",
338
+ interactive=True,
339
+ )
340
+ with gr.Accordion(label="Change pretrains", open=False):
341
+ pretrained = lambda sr, letter: [os.path.abspath(os.path.join('assets/pretrained_v2', file)) for file in os.listdir('assets/pretrained_v2') if file.endswith('.pth') and sr in file and letter in file]
342
+ pretrained_G14 = gr.Dropdown(
343
+ label="pretrained G",
344
+ # Get a list of all pretrained G model files in assets/pretrained_v2 that end with .pth
345
+ choices = pretrained(sr2.value, 'G'),
346
+ value=pretrained(sr2.value, 'G')[0] if len(pretrained(sr2.value, 'G')) > 0 else '',
347
+ interactive=True,
348
+ visible=True
349
+ )
350
+ pretrained_D15 = gr.Dropdown(
351
+ label="pretrained D",
352
+ choices = pretrained(sr2.value, 'D'),
353
+ value= pretrained(sr2.value, 'D')[0] if len(pretrained(sr2.value, 'G')) > 0 else '',
354
+ visible=True,
355
+ interactive=True
356
+ )
357
+ with gr.Row():
358
+ download_model = gr.Button('5.Download Model')
359
+ with gr.Row():
360
+ model_files = gr.Files(label='Your Model and Index file can be downloaded here:')
361
+ download_model.click(
362
+ fn=lambda name: os.listdir(f'assets/weights/{name}') + glob.glob(f'logs/{name.split(".")[0]}/added_*.index'),
363
+ inputs=[training_name],
364
+ outputs=[model_files, info3])
365
+ with gr.Row():
366
+ sr2.change(
367
+ change_sr2,
368
+ [sr2, if_f0_3, version19],
369
+ [pretrained_G14, pretrained_D15],
370
+ )
371
+ version19.change(
372
+ change_version19,
373
+ [sr2, if_f0_3, version19],
374
+ [pretrained_G14, pretrained_D15, sr2],
375
+ )
376
+ if_f0_3.change(
377
+ change_f0,
378
+ [if_f0_3, sr2, version19],
379
+ [f0method8, pretrained_G14, pretrained_D15],
380
+ )
381
+ with gr.Row():
382
+ but5 = gr.Button("1 Click Training", variant="primary", visible=False)
383
+ but3.click(
384
+ click_train,
385
+ [
386
+ training_name,
387
+ sr2,
388
+ if_f0_3,
389
+ spk_id5,
390
+ save_epoch10,
391
+ total_epoch11,
392
+ batch_size12,
393
+ if_save_latest13,
394
+ pretrained_G14,
395
+ pretrained_D15,
396
+ gpus16,
397
+ if_cache_gpu17,
398
+ if_save_every_weights18,
399
+ version19,
400
+ ],
401
+ info3,
402
+ api_name="train_start",
403
+ )
404
+ but4.click(train_index, [training_name, version19], info3)
405
+ but5.click(
406
+ train1key,
407
+ [
408
+ training_name,
409
+ sr2,
410
+ if_f0_3,
411
+ dataset_folder,
412
+ spk_id5,
413
+ np7,
414
+ f0method8,
415
+ save_epoch10,
416
+ total_epoch11,
417
+ batch_size12,
418
+ if_save_latest13,
419
+ pretrained_G14,
420
+ pretrained_D15,
421
+ gpus16,
422
+ if_cache_gpu17,
423
+ if_save_every_weights18,
424
+ version19,
425
+ gpus_rmvpe,
426
+ ],
427
+ info3,
428
+ api_name="train_start_all",
429
+ )
430
+
431
+ if config.iscolab:
432
+ app.queue(concurrency_count=511, max_size=1022).launch(share=True)
433
+ else:
434
+ app.queue(concurrency_count=511, max_size=1022).launch(
435
+ server_name="0.0.0.0",
436
+ inbrowser=not config.noautoopen,
437
+ server_port=config.listen_port,
438
+ quiet=True,
439
+ )
docker-compose.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: "3.8"
2
+ services:
3
+ rvc:
4
+ build:
5
+ context: .
6
+ dockerfile: Dockerfile
7
+ container_name: rvc
8
+ volumes:
9
+ - ./weights:/app/assets/weights
10
+ - ./opt:/app/opt
11
+ # - ./dataset:/app/dataset # you can use this folder in order to provide your dataset for model training
12
+ ports:
13
+ - 7865:7865
14
+ deploy:
15
+ resources:
16
+ reservations:
17
+ devices:
18
+ - driver: nvidia
19
+ count: 1
20
+ capabilities: [gpu]
docs/cn/Changelog_CN.md ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 20231006更新
2
+
3
+ 我们制作了一个用于实时变声的界面go-realtime-gui.bat/gui_v1.py(事实上早就存在了),本次更新重点也优化了实时变声的性能。对比0813版:
4
+ - 1、优优化界面操作:参数热更新(调整参数不需要中止再启动),懒加载模型(已加载过的模型不需要重新加载),增加响度因子参数(响度向输入音频靠近)
5
+ - 2、优化自带降噪效果与速度
6
+ - 3、大幅优化推理速度
7
+
8
+ 注意输入输出设备应该选择同种类型,例如都选MME类型。
9
+
10
+ 1006版本整体的更新为:
11
+ - 1、继续提升rmvpe音高提取算法效果,对于男低音有更大的提升
12
+ - 2、优化推理界面布局
13
+
14
+ ### 20230813更新
15
+ 1-常规bug修复
16
+ - 保存频率总轮数最低改为1 总轮数最低改为2
17
+ - 修复无pretrain模型训练报错
18
+ - 增加伴奏人声分离完毕清理显存
19
+ - faiss保存路径绝对路径改为相对路径
20
+ - 支持路径包含空格(训练集路径+实验名称均支持,不再会报错)
21
+ - filelist取消强制utf8编码
22
+ - 解决实时变声中开启索引导致的CPU极大占用问题
23
+
24
+ 2-重点更新
25
+ - 训练出当前最强开源人声音高提取模型RMVPE,并用于RVC的训练、离线/实时推理,支持pytorch/onnx/DirectML
26
+ - 通过pytorch-dml支持A卡和I卡的
27
+ (1)实时变声(2)推理(3)人声伴奏分离(4)训练暂未支持,会切换至CPU训练;通过onnx_dml支持rmvpe_gpu的推理
28
+
29
+ ### 20230618更新
30
+ - v2增加32k和48k两个新预训练模型
31
+ - 修复非f0模型推理报错
32
+ - 对于超过一小时的训练集的索引建立环节,自动kmeans缩小特征处理以加速索引训练、加入和查询
33
+ - 附送一个人声转吉他玩具仓库
34
+ - 数据处理剔除异常值切片
35
+ - onnx导出选项卡
36
+
37
+ 失败的实验:
38
+ - ~~特征检索增加时序维度:寄,没啥效果~~
39
+ - ~~特征检索增加PCAR降维可选项:寄,数据大用kmeans缩小数据量,数据小降维操作耗时比省下的匹配耗时还多~~
40
+ - ~~支持onnx推理(附带仅推理的小压缩包):寄,生成nsf还是需要pytorch~~
41
+ - ~~训练时在音高、gender、eq、噪声等方面对输入进行随机增强:寄,没啥效果~~
42
+ - ~~接入小型声码器调研:寄,效果变差~~
43
+
44
+ todolist:
45
+ - ~~训练集音高识别支持crepe:已经被RMVPE取代,不需要~~
46
+ - ~~多进程harvest推理:已经被RMVPE取代,不需要~~
47
+ - ~~crepe的精度支持和RVC-config同步:已经被RMVPE取代,不需要。支持这个还要同步torchcrepe的库,麻烦~~
48
+ - 对接F0编辑器
49
+
50
+
51
+ ### 20230528更新
52
+ - 增加v2的jupyter notebook,韩文changelog,增加一些环境依赖
53
+ - 增加呼吸、清辅音、齿音保护模式
54
+ - 支持crepe-full推理
55
+ - UVR5人声伴奏分离加上3个去延迟模型和MDX-Net去混响模型,增加HP3人声提取模型
56
+ - 索引名称增加版本和实验名称
57
+ - 人声伴奏分离、推理批量导出增加音频导出格式选项
58
+ - 废弃32k模型的训练
59
+
60
+ ### 20230513更新
61
+ - 清除一键包内部老版本runtime内残留的lib.infer_pack和uvr5_pack
62
+ - 修复训练集预处理伪多进程的bug
63
+ - 增加harvest识别音高可选通过中值滤波削弱哑音现象,可调整中值滤波半径
64
+ - 导出音频增加后处理重采样
65
+ - 训练n_cpu进程数从"仅调整f0提取"改为"调整数据预处理和f0提取"
66
+ - 自动检测logs文件夹下的index路径,提供下拉列表功能
67
+ - tab页增加"常见问题解答"(也可参考github-rvc-wiki)
68
+ - 相同路径的输入音频推理增加了音高缓存(用途:使用harvest音高提取,整个pipeline会经历漫长且重复的音高提取过程,如果不使用缓存,实验不同音色、索引、音高中值滤波半径参数的用户在第一次测试后的等待结果会非常痛苦)
69
+
70
+ ### 20230514更新
71
+ - 音量包络对齐输入混合(可以缓解“输入静音输出小幅度噪声”的问题。如果输入音频背景底噪大则不建议开启,默认不开启(值为1可视为不开启))
72
+ - 支持按照指定频率保存提取的小模型(假如你想尝试不同epoch下的推理效果,但是不想保存所有大checkpoint并且每次都要ckpt手工处理提取小模型,这项功能会非常实用)
73
+ - 通过设置环境变量解决服务端开了系统全局代理导致浏览器连接错误的问题
74
+ - 支持v2预训练模型(目前只公开了40k版本进行测试,另外2个采样率还没有训练完全)
75
+ - 推理前限制超过1的过大音量
76
+ - 微调数据预处理参数
77
+
78
+
79
+ ### 20230409更新
80
+ - 修正训练参数,提升显卡平均利用率,A100最高从25%提升至90%左右,V100:50%->90%左右,2060S:60%->85%左右,P40:25%->95%左右,训练速度显著提升
81
+ - 修正参数:总batch_size改为每张卡的batch_size
82
+ - 修正total_epoch:最大限制100解锁至1000;默认10提升至默认20
83
+ - 修复ckpt提取识别是否带音高错误导致推理异常的问题
84
+ - 修复分布式训练每个rank都保存一次ckpt���问题
85
+ - 特征提取进行nan特征过滤
86
+ - 修复静音输入输出随机辅音or噪声的问题(老版模型需要重做训练集重训)
87
+
88
+ ### 20230416更新
89
+ - 新增本地实时变声迷你GUI,双击go-realtime-gui.bat启动
90
+ - 训练推理均对<50Hz的频段进行滤波过滤
91
+ - 训练推理音高提取pyworld最低音高从默认80下降至50,50-80hz间的男声低音不会哑
92
+ - WebUI支持根据系统区域变更语言(现支持en_US,ja_JP,zh_CN,zh_HK,zh_SG,zh_TW,不支持的默认en_US)
93
+ - 修正部分显卡识别(例如V100-16G识别失败,P4识别失败)
94
+
95
+ ### 20230428更新
96
+ - 升级faiss索引设置,速度更快,质量更高
97
+ - 取消total_npy依赖,后续分享模型不再需要填写total_npy
98
+ - 解锁16系限制。4G显存GPU给到4G的推理设置。
99
+ - 修复部分音频格式下UVR5人声伴奏分离的bug
100
+ - 实时变声迷你gui增加对非40k与不懈怠音高模型的支持
101
+
102
+ ### 后续计划:
103
+ 功能:
104
+ - 支持多人训练选项卡(至多4人)
105
+
106
+ 底模:
107
+ - 收集呼吸wav加入训练集修正呼吸变声电音的问题
108
+ - 我们正在训练增加了歌声训练集的底模,未来会公开
109
+
docs/cn/faq.md ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Q1:ffmpeg error/utf8 error.
2
+
3
+ 大概率不是ffmpeg问题,而是音频路径问题;<br>
4
+ ffmpeg读取路径带空格、()等特殊符号,可能出现ffmpeg error;训练集音频带中文路径,在写入filelist.txt的时候可能出现utf8 error;<br>
5
+
6
+ ## Q2:一键训练结束没有索引
7
+
8
+ 显示"Training is done. The program is closed."则模型训练成功,后续紧邻的报错是假的;<br>
9
+
10
+ 一键训练结束完成没有added开头的索引文件,可能是因为训练集太大卡住了添加索引的步骤;已通过批处理add索引解决内存add索引对内存需求过大的问题。临时可尝试再次点击"训练索引"按钮。<br>
11
+
12
+ ## Q3:训练结束推理没看到训练集的音色
13
+ 点刷新音色再看看,如果还没有看看训练有没有报错,控制台和webui的截图,logs/实验名下的log,都可以发给开发者看看。<br>
14
+
15
+ ## Q4:如何分享模型
16
+   rvc_root/logs/实验名 下面存储的pth不是用来分享模型用来推理的,而是为了存储实验状态供复现,以及继续训练用的。用来分享的模型应该是weights文件夹下大小为60+MB的pth文件;<br>
17
+   后续将把weights/exp_name.pth和logs/exp_name/added_xxx.index合并打包成weights/exp_name.zip省去填写index的步骤,那么zip文件用来分享,不要分享pth文件,除非是想换机器继续训练;<br>
18
+   如果你把logs文件夹下的几百MB的pth文件复制/分享到weights文件夹下强行用于推理,可能会出现f0,tgt_sr等各种key不存在的报错。你需要用ckpt选项卡最下面,手工或自动(本地logs下如果能找到相关信息则会自动)选择是否携带音高、目标音频采样率的选项后进行ckpt小模型提取(输入路径填G开头的那个),提取完在weights文件夹下会出现60+MB的pth文件,刷新音色后可以选择使用。<br>
19
+
20
+ ## Q5:Connection Error.
21
+ 也许你关闭了控制台(黑色窗口)。<br>
22
+
23
+ ## Q6:WebUI弹出Expecting value: line 1 column 1 (char 0).
24
+ 请关闭系统局域网代理/全局代理。<br>
25
+
26
+ 这个不仅是客户端的代理,也包括服务端的代理(例如你使用autodl设置了http_proxy和https_proxy学术加速,使用时也需要unset关掉)<br>
27
+
28
+ ## Q7:不用WebUI如何通过命令训练推理
29
+ 训练脚本:<br>
30
+ 可先跑通WebUI,消息窗内会显示数据集处理和训练用命令行;<br>
31
+
32
+ 推理脚本:<br>
33
+ https://huggingface.co/lj1995/VoiceConversionWebUI/blob/main/myinfer.py<br>
34
+
35
+ 例子:<br>
36
+
37
+ runtime\python.exe myinfer.py 0 "E:\codes\py39\RVC-beta\todo-songs\1111.wav" "E:\codes\py39\logs\mi-test\added_IVF677_Flat_nprobe_7.index" harvest "test.wav" "weights/mi-test.pth" 0.6 cuda:0 True<br>
38
+
39
+ f0up_key=sys.argv[1]<br>
40
+ input_path=sys.argv[2]<br>
41
+ index_path=sys.argv[3]<br>
42
+ f0method=sys.argv[4]#harvest or pm<br>
43
+ opt_path=sys.argv[5]<br>
44
+ model_path=sys.argv[6]<br>
45
+ index_rate=float(sys.argv[7])<br>
46
+ device=sys.argv[8]<br>
47
+ is_half=bool(sys.argv[9])<br>
48
+
49
+ ## Q8:Cuda error/Cuda out of memory.
50
+ 小概率是cuda配置问题、设备不支持;大概率是显存不够(out of memory);<br>
51
+
52
+ 训练的话缩小batch size(如果缩小到1还不够只能更换显卡训练),推理的话酌情缩小config.py结尾的x_pad,x_query,x_center,x_max。4G以下显存(例如1060(3G)和各种2G显卡)可以直接放弃,4G显存显卡还有救。<br>
53
+
54
+ ## Q9:total_epoch调多少比较好
55
+
56
+ 如果训练集音质差底噪大,20~30足够了,调太高,底模音质无法带高你的低音质训练集<br>
57
+ 如果训练集音质高底噪低时长多,可以调高,200是ok的(训练速度很快,既然你有条件准备高音质训练集,显卡想必条件也不错,肯定不在乎多一些训练时间)<br>
58
+
59
+ ## Q10:需要多少训练集时长
60
+   推荐10min至50min<br>
61
+   保证音质高底噪低的情况下,如果有个人特色的音色统一,则多多益善<br>
62
+   高水平的训练集(精简+音色有特色),5min至10min也是ok的,仓库作者本人就经常这么玩<br>
63
+   也有人拿1min至2min的数据来训练并且训练成功的,但是成功经验是其他人不可复现的,不太具备参考价值。这要求训练集音色特色非常明显(比如说高频气声较明显的萝莉少女音),且音质高;<br>
64
+   1min以下时长数据目前没见有人尝试(成功)过。不建议进行这种鬼畜行为。<br>
65
+
66
+ ## Q11:index rate干嘛用的,怎么调(科普)
67
+   如果底模和推理源的音质高于训练集的音质,他们可以带高推理结果的音质,但代价可能是音色往底模/推理源的音色靠,这种现象叫做"音色泄露";<br>
68
+   index rate用来削减/解决音色泄露问题。调到1,则理论上不存在推理源的音色泄露问题,但音质更倾向于训练集。如果训练集音质比推理源低,则index rate调高可能降低音质。调到0,则不具备利用检索混合来保护训练集音色的效果;<br>
69
+   如果训练集优质时长多,可调高total_epoch,此时模型本身不太会引用推理源和底模的音色,很少存在"音色泄露"问题,此时index_rate不重要,你甚至可以不建立/分享index索引文件。<br>
70
+
71
+ ## Q11:推理怎么选gpu
72
+ config.py文件里device cuda:后面选择卡号;<br>
73
+ 卡号和显卡的映射关系,在训练选项卡的显卡信息栏里能看到。<br>
74
+
75
+ ## Q12:如何推理训练中间保存的pth
76
+ 通过ckpt选项卡最下面提取小模型。<br>
77
+
78
+
79
+ ## Q13:如何中断和继续训练
80
+ 现阶段只能关闭WebUI控制台双击go-web.bat重启程序。网页参数也要刷新重新填写;<br>
81
+ 继续训练:相同网页参数点训练模型,就会接着上次的checkpoint继续训练。<br>
82
+
83
+ ## Q14:训练时出现文件页面/内存error
84
+ 进程开太多了,内存炸了。你可能可以通过如下方式解决<br>
85
+ 1、"提取音高和处理数据使用的CPU进程数" 酌情拉低;<br>
86
+ 2、训练集音频手工切一下,不要太长。<br>
87
+
88
+
89
+ ## Q15:如何中途加数据训练
90
+ 1、所有数据新建一个实验名;<br>
91
+ 2、拷贝上一次的最新的那个G和D文件(或者你想基于哪个中间ckpt训练,也可以拷贝中间的)到新实验名;下<br>
92
+ 3、一键训练新实验名,他会继续上一次的最新进度训练。<br>
93
+
94
+ ## Q16: error about llvmlite.dll
95
+
96
+ OSError: Could not load shared object file: llvmlite.dll
97
+
98
+ FileNotFoundError: Could not find module lib\site-packages\llvmlite\binding\llvmlite.dll (or one of its dependencies). Try using the full path with constructor syntax.
99
+
100
+ win平台会报这个错,装上https://aka.ms/vs/17/release/vc_redist.x64.exe这个再重启WebUI就好了。
101
+
102
+ ## Q17: RuntimeError: The expanded size of the tensor (17280) must match the existing size (0) at non-singleton dimension 1. Target sizes: [1, 17280]. Tensor sizes: [0]
103
+
104
+ wavs16k文件夹下,找到文件大小显著比其他都小的一些音频文件,删掉,点击训练模型,就不会报错了,不过由于一键流程中断了你训练完模型还要点训练索引。
105
+
106
+ ## Q18: RuntimeError: The size of tensor a (24) must match the size of tensor b (16) at non-singleton dimension 2
107
+
108
+ 不要中途变更采样率继续训练。如果一定要变更,应更换实验名从头训练。当然你也可以把上次提取的音高和特征(0/1/2/2b folders)拷贝过去加速训练流程。
docs/en/Changelog_EN.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### 2023-10-06
2
+ - We have created a GUI for real-time voice change: go-realtime-gui.bat/gui_v1.py (Note that you should choose the same type of input and output device, e.g. MME and MME).
3
+ - We trained a better pitch extract RMVPE model.
4
+ - Optimize inference GUI layout.
5
+
6
+ ### 2023-08-13
7
+ 1-Regular bug fix
8
+ - Change the minimum total epoch number to 1, and change the minimum total epoch number to 2
9
+ - Fix training errors of not using pre-train models
10
+ - After accompaniment vocals separation, clear graphics memory
11
+ - Change faiss save path absolute path to relative path
12
+ - Support path containing spaces (both training set path and experiment name are supported, and errors will no longer be reported)
13
+ - Filelist cancels mandatory utf8 encoding
14
+ - Solve the CPU consumption problem caused by faiss searching during real-time voice changes
15
+
16
+ 2-Key updates
17
+ - Train the current strongest open-source vocal pitch extraction model RMVPE, and use it for RVC training, offline/real-time inference, supporting PyTorch/Onnx/DirectML
18
+ - Support for AMD and Intel graphics cards through Pytorch_DML
19
+
20
+ (1) Real time voice change (2) Inference (3) Separation of vocal accompaniment (4) Training not currently supported, will switch to CPU training; supports RMVPE inference of gpu by Onnx_Dml
21
+
22
+
23
+ ### 2023-06-18
24
+ - New pretrained v2 models: 32k and 48k
25
+ - Fix non-f0 model inference errors
26
+ - For training-set exceeding 1 hour, do automatic minibatch-kmeans to reduce feature shape, so that index training, adding, and searching will be much faster.
27
+ - Provide a toy vocal2guitar huggingface space
28
+ - Auto delete outlier short cut training-set audios
29
+ - Onnx export tab
30
+
31
+ Failed experiments:
32
+ - ~~Feature retrieval: add temporal feature retrieval: not effective~~
33
+ - ~~Feature retrieval: add PCAR dimensionality reduction: searching is even slower~~
34
+ - ~~Random data augmentation when training: not effective~~
35
+
36
+ todolist:
37
+ - ~~Vocos-RVC (tiny vocoder): not effective~~
38
+ - ~~Crepe support for training:replaced by RMVPE~~
39
+ - ~~Half precision crepe inference:replaced by RMVPE. And hard to achive.~~
40
+ - F0 editor support
41
+
42
+ ### 2023-05-28
43
+ - Add v2 jupyter notebook, korean changelog, fix some environment requirments
44
+ - Add voiceless consonant and breath protection mode
45
+ - Support crepe-full pitch detect
46
+ - UVR5 vocal separation: support dereverb models and de-echo models
47
+ - Add experiment name and version on the name of index
48
+ - Support users to manually select export format of output audios when batch voice conversion processing and UVR5 vocal separation
49
+ - v1 32k model training is no more supported
50
+
51
+ ### 2023-05-13
52
+ - Clear the redundant codes in the old version of runtime in the one-click-package: lib.infer_pack and uvr5_pack
53
+ - Fix pseudo multiprocessing bug in training set preprocessing
54
+ - Adding median filtering radius adjustment for harvest pitch recognize algorithm
55
+ - Support post processing resampling for exporting audio
56
+ - Multi processing "n_cpu" setting for training is changed from "f0 extraction" to "data preprocessing and f0 extraction"
57
+ - Automatically detect the index paths under the logs folder and provide a drop-down list function
58
+ - Add "Frequently Asked Questions and Answers" on the tab page (you can also refer to github RVC wiki)
59
+ - When inference, harvest pitch is cached when using same input audio path (purpose: using harvest pitch extraction, the entire pipeline will go through a long and repetitive pitch extraction process. If caching is not used, users who experiment with different timbre, index, and pitch median filtering radius settings will experience a very painful waiting process after the first inference)
60
+
61
+ ### 2023-05-14
62
+ - Use volume envelope of input to mix or replace the volume envelope of output (can alleviate the problem of "input muting and output small amplitude noise". If the input audio background noise is high, it is not recommended to turn it on, and it is not turned on by default (1 can be considered as not turned on)
63
+ - Support saving extracted small models at a specified frequency (if you want to see the performance under different epochs, but do not want to save all large checkpoints and manually extract small models by ckpt-processing every time, this feature will be very practical)
64
+ - Resolve the issue of "connection errors" caused by the server's global proxy by setting environment variables
65
+ - Supports pre-trained v2 models (currently only 40k versions are publicly available for testing, and the other two sampling rates have not been fully trained yet)
66
+ - Limit excessive volume exceeding 1 before inference
67
+ - Slightly adjusted the settings of training-set preprocessing
68
+
69
+
70
+ #######################
71
+
72
+ History changelogs:
73
+
74
+ ### 2023-04-09
75
+ - Fixed training parameters to improve GPU utilization rate: A100 increased from 25% to around 90%, V100: 50% to around 90%, 2060S: 60% to around 85%, P40: 25% to around 95%; significantly improved training speed
76
+ - Changed parameter: total batch_size is now per GPU batch_size
77
+ - Changed total_epoch: maximum limit increased from 100 to 1000; default increased from 10 to 20
78
+ - Fixed issue of ckpt extraction recognizing pitch incorrectly, causing abnormal inference
79
+ - Fixed issue of distributed training saving ckpt for each rank
80
+ - Applied nan feature filtering for feature extraction
81
+ - Fixed issue with silent input/output producing random consonants or noise (old models need to retrain with a new dataset)
82
+
83
+ ### 2023-04-16 Update
84
+ - Added local real-time voice changing mini-GUI, start by double-clicking go-realtime-gui.bat
85
+ - Applied filtering for frequency bands below 50Hz during training and inference
86
+ - Lowered the minimum pitch extraction of pyworld from the default 80 to 50 for training and inference, allowing male low-pitched voices between 50-80Hz not to be muted
87
+ - WebUI supports changing languages according to system locale (currently supporting en_US, ja_JP, zh_CN, zh_HK, zh_SG, zh_TW; defaults to en_US if not supported)
88
+ - Fixed recognition of some GPUs (e.g., V100-16G recognition failure, P4 recognition failure)
89
+
90
+ ### 2023-04-28 Update
91
+ - Upgraded faiss index settings for faster speed and higher quality
92
+ - Removed dependency on total_npy; future model sharing will not require total_npy input
93
+ - Unlocked restrictions for the 16-series GPUs, providing 4GB inference settings for 4GB VRAM GPUs
94
+ - Fixed bug in UVR5 vocal accompaniment separation for certain audio formats
95
+ - Real-time voice changing mini-GUI now supports non-40k and non-lazy pitch models
96
+
97
+ ### Future Plans:
98
+ Features:
99
+ - Add option: extract small models for each epoch save
100
+ - Add option: export additional mp3 to the specified path during inference
101
+ - Support multi-person training tab (up to 4 people)
102
+
103
+ Base model:
104
+ - Collect breathing wav files to add to the training dataset to fix the issue of distorted breath sounds
105
+ - We are currently training a base model with an extended singing dataset, which will be released in the future