diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..c32138339e4a73d00fbc64e90f2ac02ce606bd54 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,296 @@ +# syntax=docker/dockerfile:1.4 + +ARG BASE_IMAGE=ubuntu:20.04 +ARG BASE_RUNTIME_IMAGE=$BASE_IMAGE + +# Download VOICEVOX Core shared object +FROM ${BASE_IMAGE} AS download-core-env +ARG DEBIAN_FRONTEND=noninteractive + +WORKDIR /work + +RUN <= 0.11.0 (ONNX) +ARG TARGETPLATFORM +ARG USE_GPU=false +ARG VOICEVOX_CORE_VERSION=0.14.3 + +RUN < /etc/ld.so.conf.d/voicevox_core.conf + + # Update dynamic library search cache + ldconfig +EOF + + +# Download ONNX Runtime +FROM ${BASE_IMAGE} AS download-onnxruntime-env +ARG DEBIAN_FRONTEND=noninteractive + +WORKDIR /work + +RUN < /etc/ld.so.conf.d/onnxruntime.conf + + # Update dynamic library search cache + ldconfig +EOF + + +# Compile Python (version locked) +FROM ${BASE_IMAGE} AS compile-python-env + +ARG DEBIAN_FRONTEND=noninteractive + +RUN < /etc/profile.d/python-path.sh +# echo "export LD_LIBRARY_PATH=/opt/python/lib:\$LD_LIBRARY_PATH" >> /etc/profile.d/python-path.sh +# echo "export C_INCLUDE_PATH=/opt/python/include:\$C_INCLUDE_PATH" >> /etc/profile.d/python-path.sh +# +# rm -f /etc/ld.so.cache +# ldconfig +# EOF + + +# Runtime +FROM ${BASE_RUNTIME_IMAGE} AS runtime-env +ARG DEBIAN_FRONTEND=noninteractive + +WORKDIR /opt/voicevox_engine + +# libsndfile1: soundfile shared object +# ca-certificates: pyopenjtalk dictionary download +# build-essential: pyopenjtalk local build +RUN < /opt/voicevox_engine/engine_manifest_assets/dependency_licenses.json + cp /opt/voicevox_engine/engine_manifest_assets/dependency_licenses.json /opt/voicevox_engine/licenses.json +EOF + +# Keep this layer separated to use layer cache on download failed in local build +RUN < /dev/stderr + +exec "\$@" +EOF +USER user +ENTRYPOINT [ "/entrypoint.sh" ] +CMD [ "/opt/python/bin/python3", "./run.py", "--voicelib_dir", "/opt/voicevox_core/", "--runtime_dir", "/opt/onnxruntime/lib", "--host", "0.0.0.0","--port","7860" ] diff --git a/LGPL_LICENSE b/LGPL_LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..153d416dc8d2d60076698ec3cbfce34d91436a03 --- /dev/null +++ b/LGPL_LICENSE @@ -0,0 +1,165 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + + This version of the GNU Lesser General Public License incorporates +the terms and conditions of version 3 of the GNU General Public +License, supplemented by the additional permissions listed below. + + 0. Additional Definitions. + + As used herein, "this License" refers to version 3 of the GNU Lesser +General Public License, and the "GNU GPL" refers to version 3 of the GNU +General Public License. + + "The Library" refers to a covered work governed by this License, +other than an Application or a Combined Work as defined below. + + An "Application" is any work that makes use of an interface provided +by the Library, but which is not otherwise based on the Library. +Defining a subclass of a class defined by the Library is deemed a mode +of using an interface provided by the Library. + + A "Combined Work" is a work produced by combining or linking an +Application with the Library. The particular version of the Library +with which the Combined Work was made is also called the "Linked +Version". + + The "Minimal Corresponding Source" for a Combined Work means the +Corresponding Source for the Combined Work, excluding any source code +for portions of the Combined Work that, considered in isolation, are +based on the Application, and not on the Linked Version. + + The "Corresponding Application Code" for a Combined Work means the +object code and/or source code for the Application, including any data +and utility programs needed for reproducing the Combined Work from the +Application, but excluding the System Libraries of the Combined Work. + + 1. Exception to Section 3 of the GNU GPL. + + You may convey a covered work under sections 3 and 4 of this License +without being bound by section 3 of the GNU GPL. + + 2. Conveying Modified Versions. + + If you modify a copy of the Library, and, in your modifications, a +facility refers to a function or data to be supplied by an Application +that uses the facility (other than as an argument passed when the +facility is invoked), then you may convey a copy of the modified +version: + + a) under this License, provided that you make a good faith effort to + ensure that, in the event an Application does not supply the + function or data, the facility still operates, and performs + whatever part of its purpose remains meaningful, or + + b) under the GNU GPL, with none of the additional permissions of + this License applicable to that copy. + + 3. Object Code Incorporating Material from Library Header Files. + + The object code form of an Application may incorporate material from +a header file that is part of the Library. You may convey such object +code under terms of your choice, provided that, if the incorporated +material is not limited to numerical parameters, data structure +layouts and accessors, or small macros, inline functions and templates +(ten or fewer lines in length), you do both of the following: + + a) Give prominent notice with each copy of the object code that the + Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the object code with a copy of the GNU GPL and this license + document. + + 4. Combined Works. + + You may convey a Combined Work under terms of your choice that, +taken together, effectively do not restrict modification of the +portions of the Library contained in the Combined Work and reverse +engineering for debugging such modifications, if you also do each of +the following: + + a) Give prominent notice with each copy of the Combined Work that + the Library is used in it and that the Library and its use are + covered by this License. + + b) Accompany the Combined Work with a copy of the GNU GPL and this license + document. + + c) For a Combined Work that displays copyright notices during + execution, include the copyright notice for the Library among + these notices, as well as a reference directing the user to the + copies of the GNU GPL and this license document. + + d) Do one of the following: + + 0) Convey the Minimal Corresponding Source under the terms of this + License, and the Corresponding Application Code in a form + suitable for, and under terms that permit, the user to + recombine or relink the Application with a modified version of + the Linked Version to produce a modified Combined Work, in the + manner specified by section 6 of the GNU GPL for conveying + Corresponding Source. + + 1) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (a) uses at run time + a copy of the Library already present on the user's computer + system, and (b) will operate properly with a modified version + of the Library that is interface-compatible with the Linked + Version. + + e) Provide Installation Information, but only if you would otherwise + be required to provide such information under section 6 of the + GNU GPL, and only to the extent that such information is + necessary to install and execute a modified version of the + Combined Work produced by recombining or relinking the + Application with a modified version of the Linked Version. (If + you use option 4d0, the Installation Information must accompany + the Minimal Corresponding Source and Corresponding Application + Code. If you use option 4d1, you must provide the Installation + Information in the manner specified by section 6 of the GNU GPL + for conveying Corresponding Source.) + + 5. Combined Libraries. + + You may place library facilities that are a work based on the +Library side by side in a single library together with other library +facilities that are not Applications and are not covered by this +License, and convey such a combined library under terms of your +choice, if you do both of the following: + + a) Accompany the combined library with a copy of the same work based + on the Library, uncombined with any other library facilities, + conveyed under the terms of this License. + + b) Give prominent notice with the combined library that part of it + is a work based on the Library, and explaining where to find the + accompanying uncombined form of the same work. + + 6. Revised Versions of the GNU Lesser General Public License. + + The Free Software Foundation may publish revised and/or new versions +of the GNU Lesser General Public License from time to time. Such new +versions will be similar in spirit to the present version, but may +differ in detail to address new problems or concerns. + + Each version is given a distinguishing version number. If the +Library as you received it specifies that a certain numbered version +of the GNU Lesser General Public License "or any later version" +applies to it, you have the option of following the terms and +conditions either of that published version or of any later version +published by the Free Software Foundation. If the Library as you +received it does not specify a version number of the GNU Lesser +General Public License, you may choose any version of the GNU Lesser +General Public License ever published by the Free Software Foundation. + + If the Library as you received it specifies that a proxy can decide +whether future versions of the GNU Lesser General Public License shall +apply, that proxy's public statement of acceptance of any version is +permanent authorization for you to choose that version for the +Library. \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2eaac841f2f3bd2b16ef482eb75b14507d050f87 --- /dev/null +++ b/LICENSE @@ -0,0 +1,9 @@ +LGPL v3 と、ソースコードの公開が不要な別ライセンスのデュアルライセンスです。 + +1. LGPL v3 + +LGPL_LICENSEを参照してください。 + +2. ソースコードの公開が不要な別ライセンス + +別ライセンスを取得したい場合は、ヒホ(twitter: @hiho_karuta)に求めてください。 diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..f2c33cc10eb26ea348a235f13bbcea9e665a825a --- /dev/null +++ b/Makefile @@ -0,0 +1,125 @@ +CMD= +NOCACHE= + +ARGS:= +ifeq ($(NOCACHE),1) + ARGS:=$(ARGS) --no-cache +endif + +# Ubuntu 20.04 +.PHONY: build-linux-docker-ubuntu20.04 +build-linux-docker-ubuntu20.04: + docker buildx build . \ + -t voicevox/voicevox_engine:cpu-ubuntu20.04-latest \ + --target runtime-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:20.04 \ + --build-arg BASE_RUNTIME_IMAGE=ubuntu:20.04 \ + --build-arg ONNXRUNTIME_URL=https://github.com/microsoft/onnxruntime/releases/download/v1.13.1/onnxruntime-linux-x64-1.13.1.tgz \ + --build-arg VOICEVOX_CORE_LIBRARY_NAME=libcore_cpu_x64.so $(ARGS) + +.PHONY: run-linux-docker-ubuntu20.04 +run-linux-docker-ubuntu20.04: + docker run --rm -it \ + -p '127.0.0.1:50021:50021' $(ARGS) \ + voicevox/voicevox_engine:cpu-ubuntu20.04-latest $(CMD) + +.PHONY: build-linux-docker-nvidia-ubuntu20.04 +build-linux-docker-nvidia-ubuntu20.04: + docker buildx build . \ + -t voicevox/voicevox_engine:nvidia-ubuntu20.04-latest \ + --target runtime-nvidia-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:20.04 \ + --build-arg BASE_RUNTIME_IMAGE=nvidia/cuda:11.6.2-cudnn8-runtime-ubuntu20.04 \ + --build-arg ONNXRUNTIME_URL=https://github.com/microsoft/onnxruntime/releases/download/v1.13.1/onnxruntime-linux-x64-gpu-1.13.1.tgz \ + --build-arg VOICEVOX_CORE_LIBRARY_NAME=libcore_gpu_x64_nvidia.so $(ARGS) + +.PHONY: run-linux-docker-nvidia-ubuntu20.04 +run-linux-docker-nvidia-ubuntu20.04: + docker run --rm -it \ + --gpus all \ + -p '127.0.0.1:50021:50021' $(ARGS) \ + voicevox/voicevox_engine:nvidia-ubuntu20.04-latest $(CMD) + + +# Ubuntu 18.04 +.PHONY: build-linux-docker-ubuntu18.04 +build-linux-docker-ubuntu18.04: + docker buildx build . \ + -t voicevox/voicevox_engine:cpu-ubuntu18.04-latest \ + --target runtime-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:18.04 \ + --build-arg BASE_RUNTIME_IMAGE=ubuntu:18.04 \ + --build-arg ONNXRUNTIME_URL=https://github.com/microsoft/onnxruntime/releases/download/v1.13.1/onnxruntime-linux-x64-1.13.1.tgz \ + --build-arg VOICEVOX_CORE_LIBRARY_NAME=libcore_cpu_x64.so $(ARGS) + +.PHONY: run-linux-docker-ubuntu18.04 +run-linux-docker-ubuntu18.04: + docker run --rm -it \ + -p '127.0.0.1:50021:50021' $(ARGS) \ + voicevox/voicevox_engine:cpu-ubuntu18.04-latest $(CMD) + +.PHONY: build-linux-docker-nvidia-ubuntu18.04 +build-linux-docker-nvidia-ubuntu18.04: + docker buildx build . \ + -t voicevox/voicevox_engine:nvidia-ubuntu18.04-latest \ + --target runtime-nvidia-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:18.04 \ + --build-arg BASE_RUNTIME_IMAGE=nvidia/cuda:11.6.2-cudnn8-runtime-ubuntu18.04 \ + --build-arg ONNXRUNTIME_URL=https://github.com/microsoft/onnxruntime/releases/download/v1.13.1/onnxruntime-linux-x64-gpu-1.13.1.tgz \ + --build-arg VOICEVOX_CORE_LIBRARY_NAME=libcore_gpu_x64_nvidia.so $(ARGS) + +.PHONY: run-linux-docker-nvidia-ubuntu18.04 +run-linux-docker-nvidia-ubuntu18.04: + docker run --rm -it \ + --gpus all \ + -p '127.0.0.1:50021:50021' $(ARGS) \ + voicevox/voicevox_engine:nvidia-ubuntu18.04-latest $(CMD) + + +# VOICEVOX Core env for test +.PHONY: build-linux-docker-download-core-env-ubuntu18.04 +build-linux-docker-download-core-env-ubuntu18.04: + docker buildx build . \ + -t voicevox/voicevox_engine:download-core-env-ubuntu18.04 \ + --target download-core-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:18.04 $(ARGS) + +.PHONY: run-linux-docker-download-core-env-ubuntu18.04 +run-linux-docker-download-core-env-ubuntu18.04: + docker run --rm -it $(ARGS) \ + voicevox/voicevox_engine:download-core-env-ubuntu18.04 $(CMD) + + +# ONNX Runtime env for test +.PHONY: build-linux-docker-download-onnxruntime-env-ubuntu18.04 +build-linux-docker-download-onnxruntime-env-ubuntu18.04: + docker buildx build . \ + -t voicevox/voicevox_engine:download-onnxruntime-env-ubuntu18.04 \ + --target download-onnxruntime-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:18.04 $(ARGS) + +.PHONY: run-linux-docker-download-onnxruntime-env-ubuntu18.04 +run-linux-docker-download-onnxruntime-env-ubuntu18.04: + docker run --rm -it $(ARGS) \ + voicevox/voicevox_engine:download-onnxruntime-env-ubuntu18.04 $(CMD) + + +# Python env for test +.PHONY: build-linux-docker-compile-python-env +build-linux-docker-compile-python-env: + docker buildx build . \ + -t voicevox/voicevox_engine:compile-python-env \ + --target compile-python-env \ + --progress plain \ + --build-arg BASE_IMAGE=ubuntu:20.04 $(ARGS) + +.PHONY: run-linux-docker-compile-python-env +run-linux-docker-compile-python-env: + docker run --rm -it $(ARGS) \ + voicevox/voicevox_engine:compile-python-env $(CMD) diff --git a/README.md b/README.md index c3cba541c9dda122bdba4ab6fe9579c58a01a1bc..c2424e143d7cac1211f5afa9e006ac69677bf151 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,570 @@ ---- -title: Voicevox -emoji: 💩 -colorFrom: yellow -colorTo: indigo -sdk: docker -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +# VOICEVOX ENGINE + +[![build](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build.yml) +[![releases](https://img.shields.io/github/v/release/VOICEVOX/voicevox_engine)](https://github.com/VOICEVOX/voicevox_engine/releases) +[![discord](https://img.shields.io/discord/879570910208733277?color=5865f2&label=&logo=discord&logoColor=ffffff)](https://discord.gg/WMwWetrzuh) + +[![test](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/test.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/test.yml) +[![Coverage Status](https://coveralls.io/repos/github/VOICEVOX/voicevox_engine/badge.svg)](https://coveralls.io/github/VOICEVOX/voicevox_engine) + +[![build-docker](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build-docker.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build-docker.yml) +[![docker](https://img.shields.io/docker/pulls/voicevox/voicevox_engine)](https://hub.docker.com/r/voicevox/voicevox_engine) + +[VOICEVOX](https://voicevox.hiroshiba.jp/) のエンジンです。 +実態は HTTP サーバーなので、リクエストを送信すればテキスト音声合成できます。 + +(エディターは [VOICEVOX](https://github.com/VOICEVOX/voicevox/) 、 +コアは [VOICEVOX CORE](https://github.com/VOICEVOX/voicevox_core/) 、 +全体構成は [こちら](https://github.com/VOICEVOX/voicevox/blob/main/docs/%E5%85%A8%E4%BD%93%E6%A7%8B%E6%88%90.md) に詳細があります。) + +## ダウンロード + +[こちら](https://github.com/VOICEVOX/voicevox_engine/releases/latest)から対応するエンジンをダウンロードしてください。 + +## API ドキュメント + +[API ドキュメント](https://voicevox.github.io/voicevox_engine/api/)をご参照ください。 + +VOICEVOX エンジンもしくはエディタを起動した状態で http://127.0.0.1:50021/docs にアクセスすると、起動中のエンジンのドキュメントも確認できます。 +今後の方針などについては [VOICEVOX 音声合成エンジンとの連携](./docs/VOICEVOX音声合成エンジンとの連携.md) も参考になるかもしれません。 + +リクエスト・レスポンスの文字コードはすべて UTF-8 です。 + +### HTTP リクエストで音声合成するサンプルコード + +```bash +echo -n "こんにちは、音声合成の世界へようこそ" >text.txt + +curl -s \ + -X POST \ + "127.0.0.1:50021/audio_query?speaker=1"\ + --get --data-urlencode text@text.txt \ + > query.json + +curl -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d @query.json \ + "127.0.0.1:50021/synthesis?speaker=1" \ + > audio.wav +``` + +生成される音声はサンプリングレートが 24000Hz と少し特殊なため、音声プレーヤーによっては再生できない場合があります。 + +`speaker` に指定する値は `/speakers` エンドポイントで得られる `style_id` です。互換性のために `speaker` という名前になっています。 + +### 読み方を AquesTalk 記法で取得・修正するサンプルコード + +`/audio_query`のレスポンスにはエンジンが判断した読み方が AquesTalk ライクな記法([本家の記法](https://www.a-quest.com/archive/manual/siyo_onseikigou.pdf)とは一部異なります)で記録されています。 +記法は次のルールに従います。 + +- 全てのカナはカタカナで記述される +- アクセント句は`/`または`、`で区切る。`、`で区切った場合に限り無音区間が挿入される。 +- カナの手前に`_`を入れるとそのカナは無声化される +- アクセント位置を`'`で指定する。全てのアクセント句にはアクセント位置を 1 つ指定する必要がある。 +- アクセント句末に`?`(全角)を入れることにより疑問文の発音ができる + +```bash +# 読ませたい文章をutf-8でtext.txtに書き出す +echo -n "ディープラーニングは万能薬ではありません" >text.txt + +curl -s \ + -X POST \ + "127.0.0.1:50021/audio_query?speaker=1" \ + --get --data-urlencode text@text.txt \ + > query.json + +cat query.json | grep -o -E "\"kana\":\".*\"" +# 結果... "kana":"ディ'イプ/ラ'アニングワ/バンノオヤクデワアリマセ'ン" + +# "ディイプラ'アニングワ/バンノ'オヤクデワ/アリマセ'ン"と読ませたいので、 +# is_kana=trueをつけてイントネーションを取得しnewphrases.jsonに保存 +echo -n "ディイプラ'アニングワ/バンノ'オヤクデワ/アリマセ'ン" > kana.txt +curl -s \ + -X POST \ + "127.0.0.1:50021/accent_phrases?speaker=1&is_kana=true" \ + --get --data-urlencode text@kana.txt \ + > newphrases.json + +# query.jsonの"accent_phrases"の内容をnewphrases.jsonの内容に置き換える +cat query.json | sed -e "s/\[{.*}\]/$(cat newphrases.json)/g" > newquery.json + +curl -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d @newquery.json \ + "127.0.0.1:50021/synthesis?speaker=1" \ + > audio.wav +``` + +### ユーザー辞書機能について + +APIからユーザー辞書の参照、単語の追加、編集、削除を行うことができます。 + +#### 参照 + +`/user_dict`にGETリクエストを投げることでユーザー辞書の一覧を取得することができます。 + +```bash +curl -s -X GET "127.0.0.1:50021/user_dict" +``` + +#### 単語追加 + +`/user_dict_word`にPOSTリクエストを投げる事でユーザー辞書に単語を追加することができます。 +URLパラメータとして、以下が必要です。 +- surface (辞書に登録する単語) +- pronunciation (カタカナでの読み方) +- accent_type (アクセント核位置、整数) + +アクセント核位置については、こちらの文章が参考になるかと思います。 +〇型となっている数字の部分がアクセント核位置になります。 +https://tdmelodic.readthedocs.io/ja/latest/pages/introduction.html + +成功した場合の返り値は単語に割り当てられるUUIDの文字列になります。 + +```bash +surface="test" +pronunciation="テスト" +accent_type="1" + +curl -s -X POST "127.0.0.1:50021/user_dict_word" \ + --get \ + --data-urlencode "surface=$surface" \ + --data-urlencode "pronunciation=$pronunciation" \ + --data-urlencode "accent_type=$accent_type" +``` + +#### 単語修正 + +`/user_dict_word/{word_uuid}`にPUTリクエストを投げる事でユーザー辞書の単語を修正することができます。 +URLパラメータとして、以下が必要です。 +- surface (辞書に登録するワード) +- pronunciation (カタカナでの読み方) +- accent_type (アクセント核位置、整数) + +word_uuidは単語追加時に確認できるほか、ユーザー辞書を参照することでも確認できます。 +成功した場合の返り値は`204 No Content`になります。 + +```bash +surface="test2" +pronunciation="テストツー" +accent_type="2" +# 環境によってword_uuidは適宜書き換えてください +word_uuid="cce59b5f-86ab-42b9-bb75-9fd3407f1e2d" + +curl -s -X PUT "127.0.0.1:50021/user_dict_word/$word_uuid" \ + --get \ + --data-urlencode "surface=$surface" \ + --data-urlencode "pronunciation=$pronunciation" \ + --data-urlencode "accent_type=$accent_type" +``` + +#### 単語削除 + +`/user_dict_word/{word_uuid}`にDELETEリクエストを投げる事でユーザー辞書の単語を削除することができます。 + +word_uuidは単語追加時に確認できるほか、ユーザー辞書を参照することでも確認できます。 +成功した場合の返り値は`204 No Content`になります。 + +```bash +# 環境によってword_uuidは適宜書き換えてください +word_uuid="cce59b5f-86ab-42b9-bb75-9fd3407f1e2d" + +curl -s -X DELETE "127.0.0.1:50021/user_dict_word/$word_uuid" +``` + +### プリセット機能について + +`presets.yaml`を編集することで話者や話速などのプリセットを使うことができます。 + +```bash +echo -n "プリセットをうまく活用すれば、サードパーティ間で同じ設定を使うことができます" >text.txt + +# プリセット情報を取得 +curl -s -X GET "127.0.0.1:50021/presets" > presets.json + +preset_id=$(cat presets.json | sed -r 's/^.+"id"\:\s?([0-9]+?).+$/\1/g') +style_id=$(cat presets.json | sed -r 's/^.+"style_id"\:\s?([0-9]+?).+$/\1/g') + +# AudioQueryの取得 +curl -s \ + -X POST \ + "127.0.0.1:50021/audio_query_from_preset?preset_id=$preset_id"\ + --get --data-urlencode text@text.txt \ + > query.json + +# 音声合成 +curl -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d @query.json \ + "127.0.0.1:50021/synthesis?speaker=$style_id" \ + > audio.wav +``` + +- `speaker_uuid`は、`/speakers`で確認できます +- `id`は重複してはいけません +- エンジン起動後にファイルを書き換えるとエンジンに反映されます + +### 2 人の話者でモーフィングするサンプルコード + +`/synthesis_morphing`では、2 人の話者でそれぞれ合成された音声を元に、モーフィングした音声を生成します。 + +```bash +echo -n "モーフィングを利用することで、2つの声を混ぜることができます。" > text.txt + +curl -s \ + -X POST \ + "127.0.0.1:50021/audio_query?speaker=0"\ + --get --data-urlencode text@text.txt \ + > query.json + +# 元の話者での合成結果 +curl -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d @query.json \ + "127.0.0.1:50021/synthesis?speaker=0" \ + > audio.wav + +export MORPH_RATE=0.5 + +# 話者2人分の音声合成+WORLDによる音声分析が入るため時間が掛かるので注意 +curl -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d @query.json \ + "127.0.0.1:50021/synthesis_morphing?base_speaker=0&target_speaker=1&morph_rate=$MORPH_RATE" \ + > audio.wav + +export MORPH_RATE=0.9 + +# query、base_speaker、target_speakerが同じ場合はキャッシュが使用されるため比較的高速に生成される +curl -s \ + -H "Content-Type: application/json" \ + -X POST \ + -d @query.json \ + "127.0.0.1:50021/synthesis_morphing?base_speaker=0&target_speaker=1&morph_rate=$MORPH_RATE" \ + > audio.wav +``` + +### 話者の追加情報を取得するサンプルコード + +追加情報の中の portrait.png を取得するコードです。 +([jq](https://stedolan.github.io/jq/)を使用して json をパースしています。) + +```bash +curl -s -X GET "127.0.0.1:50021/speaker_info?speaker_uuid=7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff" \ + | jq -r ".portrait" \ + | base64 -d \ + > portrait.png +``` + +### キャンセル可能な音声合成 + +`/cancellable_synthesis`では通信を切断した場合に即座に計算リソースが開放されます。 +(`/synthesis`では通信を切断しても最後まで音声合成の計算が行われます) +この API は実験的機能であり、エンジン起動時に引数で`--enable_cancellable_synthesis`を指定しないと有効化されません。 +音声合成に必要なパラメータは`/synthesis`と同様です。 + +### CORS設定 + +VOICEVOXではセキュリティ保護のため`localhost`・`127.0.0.1`・`app://`・Originなし以外のOriginからリクエストを受け入れないようになっています。 +そのため、一部のサードパーティアプリからのレスポンスを受け取れない可能性があります。 +これを回避する方法として、エンジンから設定できるUIを用意しています。 + +#### 設定方法 + +1. にアクセスします。 +2. 利用するアプリに合わせて設定を変更、追加してください。 +3. 保存ボタンを押して、変更を確定してください。 +4. 設定の適用にはエンジンの再起動が必要です。必要に応じて再起動をしてください。 + +## アップデート + +エンジンディレクトリ内にあるファイルを全て消去し、新しいものに置き換えてください。 + +## Docker イメージ + +### CPU + +```bash +docker pull voicevox/voicevox_engine:cpu-ubuntu20.04-latest +docker run --rm -p '127.0.0.1:50021:50021' voicevox/voicevox_engine:cpu-ubuntu20.04-latest +``` + +### GPU + +```bash +docker pull voicevox/voicevox_engine:nvidia-ubuntu20.04-latest +docker run --rm --gpus all -p '127.0.0.1:50021:50021' voicevox/voicevox_engine:nvidia-ubuntu20.04-latest +``` + +#### トラブルシューティング +GPU版を利用する場合、環境によってエラーが発生することがあります。その場合、`--runtime=nvidia`を`docker run`につけて実行すると解決できることがあります。 + +## 貢献者の方へ + +Issue を解決するプルリクエストを作成される際は、別の方と同じ Issue に取り組むことを避けるため、 +Issue 側で取り組み始めたことを伝えるか、最初に Draft プルリクエストを作成してください。 + +[VOICEVOX 非公式 Discord サーバー](https://discord.gg/WMwWetrzuh)にて、開発の議論や雑談を行っています。気軽にご参加ください。 + +## 環境構築 + +`Python 3.11.3` を用いて開発されています。 +インストールするには、各 OS ごとの C/C++ コンパイラ、CMake が必要になります。 + +```bash +# 開発に必要なライブラリのインストール +python -m pip install -r requirements-dev.txt -r requirements-test.txt + +# とりあえず実行したいだけなら代わりにこちら +python -m pip install -r requirements.txt +``` + +## 実行 + +コマンドライン引数の詳細は以下のコマンドで確認してください。 + +```bash +python run.py --help +``` + +```bash +# 製品版 VOICEVOX でサーバーを起動 +VOICEVOX_DIR="C:/path/to/voicevox" # 製品版 VOICEVOX ディレクトリのパス +python run.py --voicevox_dir=$VOICEVOX_DIR +``` + + + +```bash +# モックでサーバー起動 +python run.py --enable_mock +``` + +```bash +# ログをUTF8に変更 +python run.py --output_log_utf8 +# もしくは VV_OUTPUT_LOG_UTF8=1 python run.py +``` + +### CPU スレッド数を指定する + +CPU スレッド数が未指定の場合は、論理コア数の半分か物理コア数が使われます。(殆どの CPU で、これは全体の処理能力の半分です) +もし IaaS 上で実行していたり、専用サーバーで実行している場合など、 +エンジンが使う処理能力を調節したい場合は、CPU スレッド数を指定することで実現できます。 + +- 実行時引数で指定する + + ```bash + python run.py --voicevox_dir=$VOICEVOX_DIR --cpu_num_threads=4 + ``` + +- 環境変数で指定する + ```bash + export VV_CPU_NUM_THREADS=4 + python run.py --voicevox_dir=$VOICEVOX_DIR + ``` + +### 過去のバージョンのコアを使う +VOICEVOX Core 0.5.4以降のコアを使用する事が可能です。 +Macでのlibtorch版コアのサポートはしていません。 + +#### 過去のバイナリを指定する +製品版VOICEVOXもしくはコンパイル済みエンジンのディレクトリを`--voicevox_dir`引数で指定すると、そのバージョンのコアが使用されます。 +```bash +python run.py --voicevox_dir="/path/to/voicevox" +``` +Macでは、`DYLD_LIBRARY_PATH`の指定が必要です。 +```bash +DYLD_LIBRARY_PATH="/path/to/voicevox" python run.py --voicevox_dir="/path/to/voicevox" +``` + +#### 音声ライブラリを直接指定する +[VOICEVOX Coreのzipファイル](https://github.com/VOICEVOX/voicevox_core/releases)を解凍したディレクトリを`--voicelib_dir`引数で指定します。 +また、コアのバージョンに合わせて、[libtorch](https://pytorch.org/)や[onnxruntime](https://github.com/microsoft/onnxruntime)のディレクトリを`--runtime_dir`引数で指定します。 +ただし、システムの探索パス上にlibtorch、onnxruntimeがある場合、`--runtime_dir`引数の指定は不要です。 +`--voicelib_dir`引数、`--runtime_dir`引数は複数回使用可能です。 +APIエンドポイントでコアのバージョンを指定する場合は`core_version`引数を指定してください。(未指定の場合は最新のコアが使用されます) +```bash +python run.py --voicelib_dir="/path/to/voicevox_core" --runtime_dir="/path/to/libtorch_or_onnx" +``` +Macでは、`--runtime_dir`引数の代わりに`DYLD_LIBRARY_PATH`の指定が必要です。 +```bash +DYLD_LIBRARY_PATH="/path/to/onnx" python run.py --voicelib_dir="/path/to/voicevox_core" +``` + +## コードフォーマット + +このソフトウェアでは、リモートにプッシュする前にコードフォーマットを確認する仕組み(静的解析ツール)を利用できます。 +利用するには、開発に必要なライブラリのインストールに加えて、以下のコマンドを実行してください。 +プルリクエストを作成する際は、利用することを推奨します。 + +```bash +pre-commit install -t pre-push +``` + +エラーが出た際は、以下のコマンドで修正することが可能です。なお、完全に修正できるわけではないので注意してください。 + +```bash +pysen run format lint +``` + +## タイポチェック + +[typos](https://github.com/crate-ci/typos) を使ってタイポのチェックを行っています。 +[typos をインストール](https://github.com/crate-ci/typos#install) した後 + +```bash +typos +``` + +でタイポチェックを行えます。 +もし誤判定やチェックから除外すべきファイルがあれば +[設定ファイルの説明](https://github.com/crate-ci/typos#false-positives) に従って`_typos.toml`を編集してください。 + +## API ドキュメントの確認 + +[API ドキュメント](https://voicevox.github.io/voicevox_engine/api/)(実体は`docs/api/index.html`)は自動で更新されます。 +次のコマンドで API ドキュメントを手動で作成することができます。 + +```bash +python make_docs.py +``` + +## ビルド + +この方法でビルドしたものは、リリースで公開されているものとは異なります。 +また、GPUで利用するにはcuDNNやCUDA、DirectMLなどのライブラリが追加で必要となります。 + +```bash +python -m pip install -r requirements-dev.txt + +OUTPUT_LICENSE_JSON_PATH=licenses.json \ +bash build_util/create_venv_and_generate_licenses.bash + +# ビルド自体はLIBCORE_PATH及びLIBONNXRUNTIME_PATHの指定がなくても可能です +LIBCORE_PATH="/path/to/libcore" \ + LIBONNXRUNTIME_PATH="/path/to/libonnxruntime" \ + pyinstaller --noconfirm run.spec +``` + +## 依存関係 + +### 更新 + +[Poetry](https://python-poetry.org/) を用いて依存ライブラリのバージョンを固定しています。 +以下のコマンドで操作できます: + +```bash +# パッケージを追加する場合 +poetry add `パッケージ名` +poetry add --group dev `パッケージ名` # 開発依存の追加 +poetry add --group test `パッケージ名` # テスト依存の追加 + +# パッケージをアップデートする場合 +poetry update `パッケージ名` +poetry update # 全部更新 + +# requirements.txtの更新 +poetry export --without-hashes -o requirements.txt # こちらを更新する場合は下3つも更新する必要があります。 +poetry export --without-hashes --with dev -o requirements-dev.txt +poetry export --without-hashes --with test -o requirements-test.txt +poetry export --without-hashes --with license -o requirements-license.txt +``` + +### ライセンス + +依存ライブラリは「コアビルド時にリンクして一体化しても、コア部のコード非公開 OK」なライセンスを持つ必要があります。 +主要ライセンスの可否は以下の通りです。 + +- MIT/Apache/BSD-3: OK +- LGPL: OK (コアと動的分離されているため) +- GPL: NG (全関連コードの公開が必要なため) + +## ユーザー辞書の更新について + +以下のコマンドで openjtalk のユーザー辞書をコンパイルできます。 + +```bash +python -c "import pyopenjtalk; pyopenjtalk.create_user_dict('default.csv','user.dic')" +``` + +## マルチエンジン機能に関して + +VOICEVOX エディターでは、複数のエンジンを同時に起動することができます。 +この機能を利用することで、自作の音声合成エンジンや既存の音声合成エンジンを VOICEVOX エディター上で動かすことが可能です。 + + + +
+ +### マルチエンジン機能の仕組み + +VOICEVOX API に準拠した複数のエンジンの Web API をポートを分けて起動し、統一的に扱うことでマルチエンジン機能を実現しています。 +エディターがそれぞれのエンジンを実行バイナリ経由で起動し、EngineID と結びつけて設定や状態を個別管理します。 + +### マルチエンジン機能への対応方法 + +VOICEVOX API 準拠エンジンを起動する実行バイナリを作ることで対応が可能です。 +VOICEVOX ENGINE リポジトリを fork し、一部の機能を改造するのが簡単です。 + +改造すべき点はエンジン情報・キャラクター情報・音声合成の3点です。 + +エンジンの情報はエンジンマニフェスト(`engine_manifest.json`)で管理されています。 +マニフェストファイル内の情報を見て適宜変更してください。 +音声合成手法によっては、例えばモーフィング機能など、VOICEVOX と同じ機能を持つことができない場合があります。 +その場合はマニフェストファイル内の`supported_features`内の情報を適宜変更してください。 + +キャラクター情報は`speaker_info`ディレクトリ内のファイルで管理されています。 +ダミーのアイコンなどが用意されているので適宜変更してください。 + +音声合成は`voicevox_engine/synthesis_engine/synthesis_engine.py`で行われています。 +VOICEVOX API での音声合成は、エンジン側で音声合成クエリ`AudioQuery`の初期値を作成してユーザーに返し、ユーザーが必要に応じてクエリを編集したあと、エンジンがクエリに従って音声合成することで実現しています。 +クエリ作成は`/audio_query`エンドポイントで、音声合成は`/synthesis`エンドポイントで行っており、最低この2つに対応すれば VOICEVOX API に準拠したことになります。 + +### マルチエンジン機能対応エンジンの配布方法 + +VVPP ファイルとして配布するのがおすすめです。 +VVPP は「VOICEVOX プラグインパッケージ」の略で、中身はビルドしたエンジンなどを含んだディレクトリの Zip ファイルです。 +拡張子を`.vvpp`にすると、ダブルクリックで VOICEVOX エディターにインストールできます。 + +エディター側は受け取った VVPP ファイルをローカルディスク上に Zip 展開したあと、ルートの直下にある`engine_manifest.json`に従ってファイルを探査します。 +VOICEVOX エディターにうまく読み込ませられないときは、エディターのエラーログを参照してください。 + +また、`xxx.vvpp`は分割して連番を付けた`xxx.0.vvppp`ファイルとして配布することも可能です。 +これはファイル容量が大きくて配布が困難な場合に有用です。 + +
+ +## GitHub Actions + +### Variables + +| name | description | +| :----------------- | :---------------------------------------------------------------------- | +| DOCKERHUB_USERNAME | Docker Hub ユーザ名 | + +### Secrets + +| name | description | +| :----------------- | :---------------------------------------------------------------------- | +| DOCKERHUB_TOKEN | [Docker Hub アクセストークン](https://hub.docker.com/settings/security) | + +## 事例紹介 + +**[voicevox-client](https://github.com/tuna2134/voicevox-client) [@tuna2134](https://github.com/tuna2134)** ・・・ VOICEVOX ENGINE のためのPythonラッパー + +## ライセンス + +LGPL v3 と、ソースコードの公開が不要な別ライセンスのデュアルライセンスです。 +別ライセンスを取得したい場合は、ヒホ(twitter: @hiho_karuta)に求めてください。 diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 0000000000000000000000000000000000000000..e02a7b5e32629181d0edce9223c94eccd59e7ec5 --- /dev/null +++ b/_typos.toml @@ -0,0 +1,11 @@ +# Files for typos +# Instruction: https://github.com/marketplace/actions/typos-action#getting-started + +[default.extend-identifiers] + +[default.extend-words] +ba="ba" # 7zコマンドの-baオプション +datas="datas" # PyInstallerの引数 + +[files] +extend-exclude = ["package-lock.json", "src/store/project.ts", "*.svg"] diff --git a/build_util/check_release_build.py b/build_util/check_release_build.py new file mode 100644 index 0000000000000000000000000000000000000000..71bf49c080f4fc39d1e08ccaa9cd6b1c35731ce8 --- /dev/null +++ b/build_util/check_release_build.py @@ -0,0 +1,70 @@ +""" +ビルド結果をテストする +""" +import argparse +import json +import time +from io import BytesIO +from pathlib import Path +from subprocess import Popen +from urllib.parse import urlencode +from urllib.request import Request, urlopen + +import soundfile + +base_url = "http://127.0.0.1:50021/" + + +def test_release_build(dist_dir: Path, skip_run_process: bool) -> None: + run_file = dist_dir / "run" + if not run_file.exists(): + run_file = dist_dir / "run.exe" + + # 起動 + process = None + if not skip_run_process: + process = Popen([run_file.absolute()], cwd=dist_dir) + time.sleep(60) # 待機 + + # バージョン取得テスト + req = Request(base_url + "version") + with urlopen(req) as res: + assert len(res.read()) > 0 + + # テキスト -> クエリ + text = "こんにちは、音声合成の世界へようこそ" + req = Request( + base_url + "audio_query?" + urlencode({"speaker": "1", "text": text}), + method="POST", + ) + with urlopen(req) as res: + query = json.loads(res.read().decode("utf-8")) + + # クエリ -> 音声 + req = Request(base_url + "synthesis?speaker=1", method="POST") + req.add_header("Content-Type", "application/json") + req.data = json.dumps(query).encode("utf-8") + with urlopen(req) as res: + wave = res.read() + soundfile.read(BytesIO(wave)) + + # エンジンマニフェスト + req = Request(base_url + "engine_manifest", method="GET") + with urlopen(req) as res: + manifest = json.loads(res.read().decode("utf-8")) + assert "uuid" in manifest + + if not skip_run_process: + # プロセスが稼働中であることを確認 + assert process.poll() is None + + # 停止 + process.terminate() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--dist_dir", type=Path, default=Path("dist/")) + parser.add_argument("--skip_run_process", action="store_true") + args = parser.parse_args() + test_release_build(dist_dir=args.dist_dir, skip_run_process=args.skip_run_process) diff --git a/build_util/codesign.bash b/build_util/codesign.bash new file mode 100644 index 0000000000000000000000000000000000000000..f8f79f99c6700edff198b60b44aac11960c7f62d --- /dev/null +++ b/build_util/codesign.bash @@ -0,0 +1,49 @@ +# !!! コードサイニング証明書を取り扱うので取り扱い注意 !!! + +set -eu + +if [ ! -v CERT_BASE64 ]; then + echo "CERT_BASE64が未定義です" + exit 1 +fi +if [ ! -v CERT_PASSWORD ]; then + echo "CERT_PASSWORDが未定義です" + exit 1 +fi + +if [ $# -ne 1 ]; then + echo "引数の数が一致しません" + exit 1 +fi +target_file_glob="$1" + +# 証明書 +CERT_PATH=cert.pfx +echo -n "$CERT_BASE64" | base64 -d - > $CERT_PATH + +# 指定ファイルに署名する +function codesign() { + TARGET="$1" + SIGNTOOL=$(find "C:/Program Files (x86)/Windows Kits/10/App Certification Kit" -name "signtool.exe" | sort -V | tail -n 1) + powershell "& '$SIGNTOOL' sign /fd SHA256 /td SHA256 /tr http://timestamp.digicert.com /f $CERT_PATH /p $CERT_PASSWORD '$TARGET'" +} + +# 指定ファイルが署名されているか +function is_signed() { + TARGET="$1" + SIGNTOOL=$(find "C:/Program Files (x86)/Windows Kits/10/App Certification Kit" -name "signtool.exe" | sort -V | tail -n 1) + powershell "& '$SIGNTOOL' verify /pa '$TARGET'" || return 1 +} + +# 署名されていなければ署名 +ls $target_file_glob | while read target_file; do + if is_signed "$target_file"; then + echo "署名済み: $target_file" + else + echo "署名: $target_file" + codesign "$target_file" + fi +done + +# 証明書を消去 +rm $CERT_PATH diff --git a/build_util/create_venv_and_generate_licenses.bash b/build_util/create_venv_and_generate_licenses.bash new file mode 100644 index 0000000000000000000000000000000000000000..d2c837dbfad2f66bf1c3d73f19199e6fa93910dd --- /dev/null +++ b/build_util/create_venv_and_generate_licenses.bash @@ -0,0 +1,24 @@ +# 仮想環境を作ってrequirements.txtをインストールし、ライセンス一覧を生成する + +set -eux + +if [ ! -v OUTPUT_LICENSE_JSON_PATH ]; then + echo "OUTPUT_LICENSE_JSON_PATHが未定義です" + exit 1 +fi + +VENV_PATH="licenses_venv" + +python -m venv $VENV_PATH +if [ -d "$VENV_PATH/Scripts" ]; then + source $VENV_PATH/Scripts/activate +else + source $VENV_PATH/bin/activate +fi + +pip install -r requirements-license.txt +python generate_licenses.py >$OUTPUT_LICENSE_JSON_PATH + +deactivate + +rm -rf $VENV_PATH diff --git a/build_util/merge_update_infos.py b/build_util/merge_update_infos.py new file mode 100644 index 0000000000000000000000000000000000000000..d3a5bb3a820afc805c5b039b7c3786855ad29d4f --- /dev/null +++ b/build_util/merge_update_infos.py @@ -0,0 +1,57 @@ +""" +更新履歴をマージする。 +""" + +import argparse +import json +from collections import OrderedDict +from pathlib import Path +from typing import Dict, List, Union + + +def merge_json_string(src: str, dst: str) -> str: + """ + バージョンが同じ場合は要素を結合する + >>> src = '[{"version": "0.0.1", "a": ["a1"], "b": ["b1", "b2"]}]' + >>> dst = '[{"version": "0.0.1", "a": ["a2"], "b": ["b1", "b3"]}]' + >>> merge_json_string(src, dst) + '[{"version": "0.0.1", "a": ["a1", "a2"], "b": ["b1", "b2", "b3"]}]' + + バージョンが無かった場合は無視される + >>> src = '[{"version": "1"}]' + >>> dst = '[{"version": "1"}, {"version": "2"}]' + >>> merge_json_string(src, dst) + '[{"version": "1"}]' + """ + src_json: List[Dict[str, Union[str, List[str]]]] = json.loads(src) + dst_json: List[Dict[str, Union[str, List[str]]]] = json.loads(dst) + + for src_item in src_json: + for dst_item in dst_json: + if src_item["version"] == dst_item["version"]: + for key in src_item: + if key == "version": + continue + + # 異なるものがあった場合だけ後ろに付け足す + src_item[key] = list( + OrderedDict.fromkeys(src_item[key] + dst_item[key]) + ) + + return json.dumps(src_json) + + +def merge_update_infos(src_path: Path, dst_path: Path, output_path: Path) -> None: + src = src_path.read_text(encoding="utf-8") + dst = dst_path.read_text(encoding="utf-8") + merged = merge_json_string(src, dst) + output_path.write_text(merged) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("src_path", type=Path) + parser.add_argument("dst_path", type=Path) + parser.add_argument("output_path", type=Path) + args = parser.parse_args() + merge_update_infos(args.src_path, args.dst_path, args.output_path) diff --git a/build_util/modify_pyinstaller.bash b/build_util/modify_pyinstaller.bash new file mode 100755 index 0000000000000000000000000000000000000000..de4815fd2c85c4b0a01f4035f48a40cbca91db3d --- /dev/null +++ b/build_util/modify_pyinstaller.bash @@ -0,0 +1,25 @@ +#!/bin/bash + +# PyInstallerをカスタマイズしてから再インストールする +# 良いGPUが自動的に選択されるようにしている +# https://github.com/VOICEVOX/voicevox_engine/issues/502 + +set -eux + +pyinstaller_version=$(pyinstaller -v) +tempdir=$(mktemp -dt modify_pyinstaller.XXXXXXXX) +trap 'rm -rf "$tempdir"' EXIT +git clone https://github.com/pyinstaller/pyinstaller.git "$tempdir" -b "v$pyinstaller_version" --depth 1 +cat > "$tempdir/bootloader/src/symbols.c" << EOF +#ifdef _WIN32 +#include + +// https://docs.nvidia.com/gameworks/content/technologies/desktop/optimus.htm +__declspec(dllexport) DWORD NvOptimusEnablement = 0x00000001; + +// https://gpuopen.com/learn/amdpowerxpressrequesthighperformance/ +__declspec(dllexport) DWORD AmdPowerXpressRequestHighPerformance = 0x00000001; +#endif +EOF +(cd "$tempdir/bootloader" && python ./waf all) +pip install -U "$tempdir" diff --git a/build_util/process_voicevox_resource.bash b/build_util/process_voicevox_resource.bash new file mode 100644 index 0000000000000000000000000000000000000000..1b0cfe285e8e092296ec728a328385f8b91b3378 --- /dev/null +++ b/build_util/process_voicevox_resource.bash @@ -0,0 +1,26 @@ +set -eux + +if [ ! -v DOWNLOAD_RESOURCE_PATH ]; then + echo "DOWNLOAD_RESOURCE_PATHが未定義です" + exit 1 +fi + +rm -r speaker_info +cp -r $DOWNLOAD_RESOURCE_PATH/character_info speaker_info + +python $DOWNLOAD_RESOURCE_PATH/scripts/clean_character_info.py \ + --character_info_dir speaker_info/ + +# マニフェスト +jq -s '.[0] * .[1]' engine_manifest.json $DOWNLOAD_RESOURCE_PATH/engine/engine_manifest.json \ + > engine_manifest.json.tmp +mv engine_manifest.json.tmp engine_manifest.json + +python build_util/merge_update_infos.py \ + engine_manifest_assets/update_infos.json \ + $DOWNLOAD_RESOURCE_PATH/engine/engine_manifest_assets/update_infos.json \ + engine_manifest_assets/update_infos.json + +for f in $(ls $DOWNLOAD_RESOURCE_PATH/engine/engine_manifest_assets/* | grep -v update_infos.json); do + cp $f ./engine_manifest_assets/ +done diff --git a/default.csv b/default.csv new file mode 100644 index 0000000000000000000000000000000000000000..ec2ab0754de435fb6657cd73186d250132b60792 --- /dev/null +++ b/default.csv @@ -0,0 +1,62 @@ +朱司,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,アカシ,アカシ,1/3,C1 +青山,1350,1350,5000,名詞,固有名詞,人名,姓,*,*,*,アオヤマ,アオヤマ,2/4,C1 +雨晴,1350,1350,7000,名詞,固有名詞,人名,姓,*,*,*,アメハレ,アメハレ,2/4,C1 +アル,1351,1351,7000,名詞,固有名詞,人名,名,*,*,*,アル,アル,1/2,C1 +櫻歌,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,オウカ,オーカ,1/3,C1 +音街,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,オトマチ,オトマチ,2/4,C1 +春日部,1350,1350,8600,名詞,固有名詞,人名,姓,*,*,*,カスカベ,カスカベ,0/4,C1 +麒ヶ島,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,キガシマ,キガシマ,2/4,C1 +紲星,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,キズナ,キズナ,1/3,C1 +九州,1350,1350,8600,名詞,固有名詞,人名,姓,*,*,*,キュウシュウ,キュウシュウ,1/4,C1 +キョウコ,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,キョオコ,キョオコ,1/3,C1 +玄野,1350,1350,5000,名詞,固有名詞,人名,姓,*,*,*,クロノ,クロノ,1/3,C1 +剣崎,1350,1350,5000,名詞,固有名詞,人名,姓,*,*,*,ケンザキ,ケンザキ,1/4,C1 +後鬼,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,ゴキ,ゴキ,1/2,C1 +虎太郎,1351,1351,5000,名詞,固有名詞,人名,名,*,*,*,コタロウ,コタロー,4/4,C1 +琴葉,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,コトノハ,コトノハ,0/4,C1 +小夜,1351,1351,2200,名詞,固有名詞,人名,名,*,*,*,サヨ,サヨ,1/2,C1 +四国,1350,1350,2200,名詞,固有名詞,人名,姓,*,*,*,シコク,シコク,1/3,C1 +白上,1350,1350,5000,名詞,固有名詞,人名,姓,*,*,*,シラカミ,シラカミ,4/4,C1 +ずんだもん,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,ズンダモン,ズンダモン,1/5,C1 +そら,1351,1351,7000,名詞,固有名詞,人名,名,*,*,*,ソラ,ソラ,1/2,C1 +宗麟,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,ソウリン,ソウリン,1/4,C1 +タイプT,1351,1351,5000,名詞,固有名詞,人名,名,*,*,*,タイプティー,タイプティー,4/5,C1 +波音,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ナミネ,ナミネ,0/3,C1 +武宏,1351,1351,5000,名詞,固有名詞,人名,名,*,*,*,タケヒロ,タケヒロ,2/4,C1 +ちび式じい,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,チビシキジー,チビシキジー,5/6,C1 +月読,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ツクヨミ,ツクヨミ,0/4,C1 +つむぎ,1351,1351,7450,名詞,固有名詞,人名,名,*,*,*,ツムギ,ツムギ,0/3,C1 +ナースロボ,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ナースロボ,ナースロボ,4/5,C1 +ナナ,1351,1351,8600,名詞,固有名詞,人名,名,*,*,*,ナナ,ナナ,1/2,C1 +No.7,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,ナンバーセブン,ナンバーセブン,5/7,C1 +猫使,1350,1350,2200,名詞,固有名詞,人名,姓,*,*,*,ネコツカ,ネコツカ,2/4,C1 +はう,1351,1351,5000,名詞,固有名詞,人名,名,*,*,*,ハウ,ハウ,1/2,C1 +春歌,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ハルカ,ハルカ,1/3,C1 +桜乃,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ハルノ,ハルノ,1/3,C1 +ビィ,1351,1351,7000,名詞,固有名詞,人名,名,*,*,*,ビー,ビー,1/2,C1 +ひまり,1351,1351,7000,名詞,固有名詞,人名,名,*,*,*,ヒマリ,ヒマリ,0/3,C1 +紅桜,1351,1351,7000,名詞,固有名詞,人名,名,*,*,*,ベニザクラ,ベニザクラ,3/5,C1 +聖騎士,1350,1350,8600,名詞,固有名詞,人名,姓,*,*,*,ホーリーナイト,ホーリーナイト,5/7,C1 +WhiteCUL,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,ホワイトカル,ホワイトカル,5/6,C1 +ミコ,1351,1351,3900,名詞,固有名詞,人名,名,*,*,*,ミコ,ミコ,1/2,C1 +水奈瀬,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ミナセ,ミナセ,2/3,C1 +冥鳴,1350,1350,5000,名詞,固有名詞,人名,姓,*,*,*,メイメイ,メイメイ,1/4,C1 +鳴花,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,メイカ,メイカ,1/3,C1 +めたん,1351,1351,7000,名詞,固有名詞,人名,名,*,*,*,メタン,メタン,1/3,C1 +雌雄,1351,1351,8600,名詞,固有名詞,人名,名,*,*,*,メスオ,メスオ,0/3,C1 +もち子さん,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,モチコサン,モチコサン,1/5,C1 +モチノ,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,モチノ,モチノ,0/3,C1 +結月,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ユヅキ,ユヅキ,1/3,C1 +弓鶴,1351,1351,0,名詞,固有名詞,人名,名,*,*,*,ユヅル,ユヅル,0/3,C1 +リツ,1351,1351,3900,名詞,固有名詞,人名,名,*,*,*,リツ,リツ,1/2,C1 +六花,1351,1351,4900,名詞,固有名詞,人名,名,*,*,*,リッカ,リッカ,1/3,C1 +龍星,1351,1351,5000,名詞,固有名詞,人名,名,*,*,*,リュウセイ,リュウセイ,1/4,C1 +雀松,1350,1350,0,名詞,固有名詞,人名,姓,*,*,*,ワカマツ,ワカマツ,2/4,C1 +COEIROINK,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,コエイロインク,コエイロインク,5/7,C1 +coeiroink,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,コエイロインク,コエイロインク,5/7,C1 +CoeFont,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,コエフォント,コエフォント,3/5,C1 +coefont,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,コエフォント,コエフォント,3/5,C1 +TALQu,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,トーク,トーク,0/3,C1 +talqu,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,トーク,トーク,0/3,C1 +VOICEVOX,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,ボイスボックス,ボイスボックス,4/7,C1 +voicevox,1348,1348,0,名詞,固有名詞,一般,*,*,*,*,ボイスボックス,ボイスボックス,4/7,C1 \ No newline at end of file diff --git a/default_setting.yml b/default_setting.yml new file mode 100644 index 0000000000000000000000000000000000000000..3421e7a6a32073e3413444495b3bb37d80d4d351 --- /dev/null +++ b/default_setting.yml @@ -0,0 +1,2 @@ +allow_origin: null +cors_policy_mode: localapps diff --git "a/docs/VOICEVOX\351\237\263\345\243\260\345\220\210\346\210\220\343\202\250\343\203\263\343\202\270\343\203\263\343\201\250\343\201\256\351\200\243\346\220\272.md" "b/docs/VOICEVOX\351\237\263\345\243\260\345\220\210\346\210\220\343\202\250\343\203\263\343\202\270\343\203\263\343\201\250\343\201\256\351\200\243\346\220\272.md" new file mode 100644 index 0000000000000000000000000000000000000000..540173be1b280ce5c3593b8aed02fd42ef633f65 --- /dev/null +++ "b/docs/VOICEVOX\351\237\263\345\243\260\345\220\210\346\210\220\343\202\250\343\203\263\343\202\270\343\203\263\343\201\250\343\201\256\351\200\243\346\220\272.md" @@ -0,0 +1,7 @@ +メモ書き程度ですが、どういう方針で開発を進めているかを紹介します。 + +- バージョンが上がっても、`/audio_query`で返ってくる値をそのまま`/synthesis`に POST すれば音声合成できるようにする予定です + - `AudioQuery`のパラメータは増えますが、なるべくデフォルト値で以前と変わらない音声が生成されるようにします +- バージョン 0.7 から音声スタイルが実装されました。スタイルの情報は`/speakers`から取得できます + - スタイルの情報にある`style_id`を`speaker`に指定することで、今まで通り音声合成ができます + - style_id の指定先が speaker なのは互換性のためです diff --git a/docs/api/.gitkeep b/docs/api/.gitkeep new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/docs/licenses/cuda/EULA.txt b/docs/licenses/cuda/EULA.txt new file mode 100755 index 0000000000000000000000000000000000000000..b1c1f891b53aebccfb4bae04dbcd53c048a7ce85 --- /dev/null +++ b/docs/licenses/cuda/EULA.txt @@ -0,0 +1,1598 @@ +End User License Agreement +-------------------------- + +NVIDIA Software License Agreement and CUDA Supplement to +Software License Agreement. Last updated: October 8, 2021 + +The CUDA Toolkit End User License Agreement applies to the +NVIDIA CUDA Toolkit, the NVIDIA CUDA Samples, the NVIDIA +Display Driver, NVIDIA Nsight tools (Visual Studio Edition), +and the associated documentation on CUDA APIs, programming +model and development tools. If you do not agree with the +terms and conditions of the license agreement, then do not +download or use the software. + +Last updated: October 8, 2021. + + +Preface +------- + +The Software License Agreement in Chapter 1 and the Supplement +in Chapter 2 contain license terms and conditions that govern +the use of NVIDIA software. By accepting this agreement, you +agree to comply with all the terms and conditions applicable +to the product(s) included herein. + + +NVIDIA Driver + + +Description + +This package contains the operating system driver and +fundamental system software components for NVIDIA GPUs. + + +NVIDIA CUDA Toolkit + + +Description + +The NVIDIA CUDA Toolkit provides command-line and graphical +tools for building, debugging and optimizing the performance +of applications accelerated by NVIDIA GPUs, runtime and math +libraries, and documentation including programming guides, +user manuals, and API references. + + +Default Install Location of CUDA Toolkit + +Windows platform: + +%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.# + +Linux platform: + +/usr/local/cuda-#.# + +Mac platform: + +/Developer/NVIDIA/CUDA-#.# + + +NVIDIA CUDA Samples + + +Description + +CUDA Samples are now located in +https://github.com/nvidia/cuda-samples, which includes +instructions for obtaining, building, and running the samples. +They are no longer included in the CUDA toolkit. + + +NVIDIA Nsight Visual Studio Edition (Windows only) + + +Description + +NVIDIA Nsight Development Platform, Visual Studio Edition is a +development environment integrated into Microsoft Visual +Studio that provides tools for debugging, profiling, analyzing +and optimizing your GPU computing and graphics applications. + + +Default Install Location of Nsight Visual Studio Edition + +Windows platform: + +%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.# + + +1. License Agreement for NVIDIA Software Development Kits +--------------------------------------------------------- + + +Important Notice—Read before downloading, installing, +copying or using the licensed software: +------------------------------------------------------- + +This license agreement, including exhibits attached +("Agreement”) is a legal agreement between you and NVIDIA +Corporation ("NVIDIA") and governs your use of a NVIDIA +software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here +is a description of the types of items that may be included in +a SDK: source code, header files, APIs, data sets and assets +(examples include images, textures, models, scenes, videos, +native API input/output files), binary software, sample code, +libraries, utility programs, programming code and +documentation. + +This Agreement can be accepted only by an adult of legal age +of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company +or other legal entity, you represent that you have the legal +authority to bind the entity to this Agreement, in which case +“you” will mean the entity you represent. + +If you don’t have the required age or authority to accept +this Agreement, or if you don’t accept all the terms and +conditions of this Agreement, do not download, install or use +the SDK. + +You agree to use the SDK only for purposes that are permitted +by (a) this Agreement, and (b) any applicable law, regulation +or generally accepted practices or guidelines in the relevant +jurisdictions. + + +1.1. License + + +1.1.1. License Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants +you a non-exclusive, non-transferable license, without the +right to sublicense (except as expressly provided in this +Agreement) to: + + 1. Install and use the SDK, + + 2. Modify and create derivative works of sample source code + delivered in the SDK, and + + 3. Distribute those portions of the SDK that are identified + in this Agreement as distributable, as incorporated in + object code format into a software application that meets + the distribution requirements indicated in this Agreement. + + +1.1.2. Distribution Requirements + +These are the distribution requirements for you to exercise +the distribution grant: + + 1. Your application must have material additional + functionality, beyond the included portions of the SDK. + + 2. The distributable portions of the SDK shall only be + accessed by your application. + + 3. The following notice shall be included in modifications + and derivative works of sample source code distributed: + “This software contains source code provided by NVIDIA + Corporation.” + + 4. Unless a developer tool is identified in this Agreement + as distributable, it is delivered for your internal use + only. + + 5. The terms under which you distribute your application + must be consistent with the terms of this Agreement, + including (without limitation) terms relating to the + license grant and license restrictions and protection of + NVIDIA’s intellectual property rights. Additionally, you + agree that you will protect the privacy, security and + legal rights of your application users. + + 6. You agree to notify NVIDIA in writing of any known or + suspected distribution or use of the SDK not in compliance + with the requirements of this Agreement, and to enforce + the terms of your agreements with respect to distributed + SDK. + + +1.1.3. Authorized Users + +You may allow employees and contractors of your entity or of +your subsidiary(ies) to access and use the SDK from your +secure network to perform work on your behalf. + +If you are an academic institution you may allow users +enrolled or employed by the academic institution to access and +use the SDK from your secure network. + +You are responsible for the compliance with the terms of this +Agreement by your authorized users. If you become aware that +your authorized users didn’t follow the terms of this +Agreement, you agree to take reasonable steps to resolve the +non-compliance and prevent new occurrences. + + +1.1.4. Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or +otherwise as pre-release, may not be fully functional, may +contain errors or design flaws, and may have reduced or +different security, privacy, accessibility, availability, and +reliability standards relative to commercial versions of +NVIDIA software and materials. Use of a pre-release SDK may +result in unexpected results, loss of data, project delays or +other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding +that pre-release SDKs are not intended for use in production +or business-critical systems. + +NVIDIA may choose not to make available a commercial version +of any pre-release SDK. NVIDIA may also choose to abandon +development and terminate the availability of a pre-release +SDK at any time without liability. + + +1.1.5. Updates + +NVIDIA may, at its option, make available patches, workarounds +or other updates to this SDK. Unless the updates are provided +with their separate governing terms, they are deemed part of +the SDK licensed to you as provided in this Agreement. You +agree that the form and content of the SDK that NVIDIA +provides may change without prior notice to you. While NVIDIA +generally maintains compatibility between versions, NVIDIA may +in some cases make changes that introduce incompatibilities in +future versions of the SDK. + + +1.1.6. Components Under Other Licenses + +The SDK may come bundled with, or otherwise include or be +distributed with, NVIDIA or third-party components with +separate legal notices or terms as may be described in +proprietary notices accompanying the SDK. If and to the extent +there is a conflict between the terms in this Agreement and +the license terms associated with the component, the license +terms associated with the components control only to the +extent necessary to resolve the conflict. + +Subject to the other terms of this Agreement, you may use the +SDK to develop and test applications released under Open +Source Initiative (OSI) approved open source software +licenses. + + +1.1.7. Reservation of Rights + +NVIDIA reserves all rights, title, and interest in and to the +SDK, not expressly granted to you under this Agreement. + + +1.2. Limitations + +The following license limitations apply to your use of the +SDK: + + 1. You may not reverse engineer, decompile or disassemble, + or remove copyright or other proprietary notices from any + portion of the SDK or copies of the SDK. + + 2. Except as expressly provided in this Agreement, you may + not copy, sell, rent, sublicense, transfer, distribute, + modify, or create derivative works of any portion of the + SDK. For clarity, you may not distribute or sublicense the + SDK as a stand-alone product. + + 3. Unless you have an agreement with NVIDIA for this + purpose, you may not indicate that an application created + with the SDK is sponsored or endorsed by NVIDIA. + + 4. You may not bypass, disable, or circumvent any + encryption, security, digital rights management or + authentication mechanism in the SDK. + + 5. You may not use the SDK in any manner that would cause it + to become subject to an open source software license. As + examples, licenses that require as a condition of use, + modification, and/or distribution that the SDK be: + + a. Disclosed or distributed in source code form; + + b. Licensed for the purpose of making derivative works; + or + + c. Redistributable at no charge. + + 6. You acknowledge that the SDK as delivered is not tested + or certified by NVIDIA for use in connection with the + design, construction, maintenance, and/or operation of any + system where the use or failure of such system could + result in a situation that threatens the safety of human + life or results in catastrophic damages (each, a "Critical + Application"). Examples of Critical Applications include + use in avionics, navigation, autonomous vehicle + applications, ai solutions for automotive products, + military, medical, life support or other life critical + applications. NVIDIA shall not be liable to you or any + third party, in whole or in part, for any claims or + damages arising from such uses. You are solely responsible + for ensuring that any product or service developed with + the SDK as a whole includes sufficient features to comply + with all applicable legal and regulatory standards and + requirements. + + 7. You agree to defend, indemnify and hold harmless NVIDIA + and its affiliates, and their respective employees, + contractors, agents, officers and directors, from and + against any and all claims, damages, obligations, losses, + liabilities, costs or debt, fines, restitutions and + expenses (including but not limited to attorney’s fees + and costs incident to establishing the right of + indemnification) arising out of or related to products or + services that use the SDK in or for Critical Applications, + and for use of the SDK outside of the scope of this + Agreement or not in compliance with its terms. + + 8. You may not reverse engineer, decompile or disassemble + any portion of the output generated using SDK elements for + the purpose of translating such output artifacts to target + a non-NVIDIA platform. + + +1.3. Ownership + + 1. NVIDIA or its licensors hold all rights, title and + interest in and to the SDK and its modifications and + derivative works, including their respective intellectual + property rights, subject to your rights under Section + 1.3.2. This SDK may include software and materials from + NVIDIA’s licensors, and these licensors are intended + third party beneficiaries that may enforce this Agreement + with respect to their intellectual property rights. + + 2. You hold all rights, title and interest in and to your + applications and your derivative works of the sample + source code delivered in the SDK, including their + respective intellectual property rights, subject to + NVIDIA’s rights under Section 1.3.1. + + 3. You may, but don’t have to, provide to NVIDIA + suggestions, feature requests or other feedback regarding + the SDK, including possible enhancements or modifications + to the SDK. For any feedback that you voluntarily provide, + you hereby grant NVIDIA and its affiliates a perpetual, + non-exclusive, worldwide, irrevocable license to use, + reproduce, modify, license, sublicense (through multiple + tiers of sublicensees), and distribute (through multiple + tiers of distributors) it without the payment of any + royalties or fees to you. NVIDIA will use feedback at its + choice. NVIDIA is constantly looking for ways to improve + its products, so you may send feedback to NVIDIA through + the developer portal at https://developer.nvidia.com. + + +1.4. No Warranties + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL +FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND +ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND +OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, +BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE +ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO +WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF +DEALING OR COURSE OF TRADE. + + +1.5. Limitation of Liability + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS +AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, +PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS +OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF +PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION +WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, +WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH +OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), +PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF +LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES +TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS +AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE +NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS +LIMIT. + +These exclusions and limitations of liability shall apply +regardless if NVIDIA or its affiliates have been advised of +the possibility of such damages, and regardless of whether a +remedy fails its essential purpose. These exclusions and +limitations of liability form an essential basis of the +bargain between the parties, and, absent any of these +exclusions or limitations of liability, the provisions of this +Agreement, including, without limitation, the economic terms, +would be substantially different. + + +1.6. Termination + + 1. This Agreement will continue to apply until terminated by + either you or NVIDIA as described below. + + 2. If you want to terminate this Agreement, you may do so by + stopping to use the SDK. + + 3. NVIDIA may, at any time, terminate this Agreement if: + + a. (i) you fail to comply with any term of this + Agreement and the non-compliance is not fixed within + thirty (30) days following notice from NVIDIA (or + immediately if you violate NVIDIA’s intellectual + property rights); + + b. (ii) you commence or participate in any legal + proceeding against NVIDIA with respect to the SDK; or + + c. (iii) NVIDIA decides to no longer provide the SDK in + a country or, in NVIDIA’s sole discretion, the + continued use of it is no longer commercially viable. + + 4. Upon any termination of this Agreement, you agree to + promptly discontinue use of the SDK and destroy all copies + in your possession or control. Your prior distributions in + accordance with this Agreement are not affected by the + termination of this Agreement. Upon written request, you + will certify in writing that you have complied with your + commitments under this section. Upon any termination of + this Agreement all provisions survive except for the + license grant provisions. + + +1.7. General + +If you wish to assign this Agreement or your rights and +obligations, including by merger, consolidation, dissolution +or operation of law, contact NVIDIA to ask for permission. Any +attempted assignment not approved by NVIDIA in writing shall +be void and of no effect. NVIDIA may assign, delegate or +transfer this Agreement and its rights and obligations, and if +to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably +requested information to verify your compliance with this +Agreement. + +This Agreement will be governed in all respects by the laws of +the United States and of the State of Delaware as those laws +are applied to contracts entered into and performed entirely +within Delaware by Delaware residents, without regard to the +conflicts of laws principles. The United Nations Convention on +Contracts for the International Sale of Goods is specifically +disclaimed. You agree to all terms of this Agreement in the +English language. + +The state or federal courts residing in Santa Clara County, +California shall have exclusive jurisdiction over any dispute +or claim arising out of this Agreement. Notwithstanding this, +you agree that NVIDIA shall still be allowed to apply for +injunctive remedies or an equivalent type of urgent legal +relief in any jurisdiction. + +If any court of competent jurisdiction determines that any +provision of this Agreement is illegal, invalid or +unenforceable, such provision will be construed as limited to +the extent necessary to be consistent with and fully +enforceable under the law and the remaining provisions will +remain in full force and effect. Unless otherwise specified, +remedies are cumulative. + +Each party acknowledges and agrees that the other is an +independent contractor in the performance of this Agreement. + +The SDK has been developed entirely at private expense and is +“commercial items” consisting of “commercial computer +software” and “commercial computer software +documentation” provided with RESTRICTED RIGHTS. Use, +duplication or disclosure by the U.S. Government or a U.S. +Government subcontractor is subject to the restrictions in +this Agreement pursuant to DFARS 227.7202-3(a) or as set forth +in subparagraphs (c)(1) and (2) of the Commercial Computer +Software - Restricted Rights clause at FAR 52.227-19, as +applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas +Expressway, Santa Clara, CA 95051. + +The SDK is subject to United States export laws and +regulations. You agree that you will not ship, transfer or +export the SDK into any country, or use the SDK in any manner, +prohibited by the United States Bureau of Industry and +Security or economic sanctions regulations administered by the +U.S. Department of Treasury’s Office of Foreign Assets +Control (OFAC), or any applicable export laws, restrictions or +regulations. These laws include restrictions on destinations, +end users and end use. By accepting this Agreement, you +confirm that you are not a resident or citizen of any country +currently embargoed by the U.S. and that you are not otherwise +prohibited from receiving the SDK. + +Any notice delivered by NVIDIA to you under this Agreement +will be delivered via mail, email or fax. You agree that any +notices that NVIDIA sends you electronically will satisfy any +legal communication requirements. Please direct your legal +notices or other correspondence to NVIDIA Corporation, 2788 +San Tomas Expressway, Santa Clara, California 95051, United +States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this +Agreement constitute the entire agreement of the parties with +respect to the subject matter of this Agreement and supersede +all prior negotiations or documentation exchanged between the +parties relating to this SDK license. Any additional and/or +conflicting terms on documents issued by you are null, void, +and invalid. Any amendment or waiver under this Agreement +shall be in writing and signed by representatives of both +parties. + + +2. CUDA Toolkit Supplement to Software License Agreement for +NVIDIA Software Development Kits +------------------------------------------------------------ + +The terms in this supplement govern your use of the NVIDIA +CUDA Toolkit SDK under the terms of your license agreement +(“Agreement”) as modified by this supplement. Capitalized +terms used but not defined below have the meaning assigned to +them in the Agreement. + +This supplement is an exhibit to the Agreement and is +incorporated as an integral part of the Agreement. In the +event of conflict between the terms in this supplement and the +terms in the Agreement, the terms in this supplement govern. + + +2.1. License Scope + +The SDK is licensed for you to develop applications only for +use in systems with NVIDIA GPUs. + + +2.2. Distribution + +The portions of the SDK that are distributable under the +Agreement are listed in Attachment A. + + +2.3. Operating Systems + +Those portions of the SDK designed exclusively for use on the +Linux or FreeBSD operating systems, or other operating systems +derived from the source code to these operating systems, may +be copied and redistributed for use in accordance with this +Agreement, provided that the object code files are not +modified in any way (except for unzipping of compressed +files). + + +2.4. Audio and Video Encoders and Decoders + +You acknowledge and agree that it is your sole responsibility +to obtain any additional third-party licenses required to +make, have made, use, have used, sell, import, and offer for +sale your products or services that include or incorporate any +third-party software and content relating to audio and/or +video encoders and decoders from, including but not limited +to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A., +MPEG-LA, and Coding Technologies. NVIDIA does not grant to you +under this Agreement any necessary patent or other rights with +respect to any audio and/or video encoders and decoders. + + +2.5. Licensing + +If the distribution terms in this Agreement are not suitable +for your organization, or for any questions regarding this +Agreement, please contact NVIDIA at +nvidia-compute-license-questions@nvidia.com. + + +2.6. Attachment A + +The following CUDA Toolkit files may be distributed with +Licensee Applications developed by you, including certain +variations of these files that have version number or +architecture specific information embedded in the file name - +as an example only, for release version 9.0 of the 64-bit +Windows software, the file cudart64_90.dll is redistributable. + +Component + +CUDA Runtime + +Windows + +cudart.dll, cudart_static.lib, cudadevrt.lib + +Mac OSX + +libcudart.dylib, libcudart_static.a, libcudadevrt.a + +Linux + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Android + +libcudart.so, libcudart_static.a, libcudadevrt.a + +Component + +CUDA FFT Library + +Windows + +cufft.dll, cufftw.dll, cufft.lib, cufftw.lib + +Mac OSX + +libcufft.dylib, libcufft_static.a, libcufftw.dylib, +libcufftw_static.a + +Linux + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Android + +libcufft.so, libcufft_static.a, libcufftw.so, +libcufftw_static.a + +Component + +CUDA BLAS Library + +Windows + +cublas.dll, cublasLt.dll + +Mac OSX + +libcublas.dylib, libcublasLt.dylib, libcublas_static.a, +libcublasLt_static.a + +Linux + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Android + +libcublas.so, libcublasLt.so, libcublas_static.a, +libcublasLt_static.a + +Component + +NVIDIA "Drop-in" BLAS Library + +Windows + +nvblas.dll + +Mac OSX + +libnvblas.dylib + +Linux + +libnvblas.so + +Component + +CUDA Sparse Matrix Library + +Windows + +cusparse.dll, cusparse.lib + +Mac OSX + +libcusparse.dylib, libcusparse_static.a + +Linux + +libcusparse.so, libcusparse_static.a + +Android + +libcusparse.so, libcusparse_static.a + +Component + +CUDA Linear Solver Library + +Windows + +cusolver.dll, cusolver.lib + +Mac OSX + +libcusolver.dylib, libcusolver_static.a + +Linux + +libcusolver.so, libcusolver_static.a + +Android + +libcusolver.so, libcusolver_static.a + +Component + +CUDA Random Number Generation Library + +Windows + +curand.dll, curand.lib + +Mac OSX + +libcurand.dylib, libcurand_static.a + +Linux + +libcurand.so, libcurand_static.a + +Android + +libcurand.so, libcurand_static.a + +Component + +NVIDIA Performance Primitives Library + +Windows + +nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll, +nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll, +nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib, +nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll, +nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib + +Mac OSX + +libnppc.dylib, libnppc_static.a, libnppial.dylib, +libnppial_static.a, libnppicc.dylib, libnppicc_static.a, +libnppicom.dylib, libnppicom_static.a, libnppidei.dylib, +libnppidei_static.a, libnppif.dylib, libnppif_static.a, +libnppig.dylib, libnppig_static.a, libnppim.dylib, +libnppisu_static.a, libnppitc.dylib, libnppitc_static.a, +libnpps.dylib, libnpps_static.a + +Linux + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Android + +libnppc.so, libnppc_static.a, libnppial.so, +libnppial_static.a, libnppicc.so, libnppicc_static.a, +libnppicom.so, libnppicom_static.a, libnppidei.so, +libnppidei_static.a, libnppif.so, libnppif_static.a +libnppig.so, libnppig_static.a, libnppim.so, +libnppim_static.a, libnppist.so, libnppist_static.a, +libnppisu.so, libnppisu_static.a, libnppitc.so +libnppitc_static.a, libnpps.so, libnpps_static.a + +Component + +NVIDIA JPEG Library + +Windows + +nvjpeg.lib, nvjpeg.dll + +Linux + +libnvjpeg.so, libnvjpeg_static.a + +Component + +Internal common library required for statically linking to +cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP + +Mac OSX + +libculibos.a + +Linux + +libculibos.a + +Component + +NVIDIA Runtime Compilation Library and Header + +All + +nvrtc.h + +Windows + +nvrtc.dll, nvrtc-builtins.dll + +Mac OSX + +libnvrtc.dylib, libnvrtc-builtins.dylib + +Linux + +libnvrtc.so, libnvrtc-builtins.so + +Component + +NVIDIA Optimizing Compiler Library + +Windows + +nvvm.dll + +Mac OSX + +libnvvm.dylib + +Linux + +libnvvm.so + +Component + +NVIDIA Common Device Math Functions Library + +Windows + +libdevice.10.bc + +Mac OSX + +libdevice.10.bc + +Linux + +libdevice.10.bc + +Component + +CUDA Occupancy Calculation Header Library + +All + +cuda_occupancy.h + +Component + +CUDA Half Precision Headers + +All + +cuda_fp16.h, cuda_fp16.hpp + +Component + +CUDA Profiling Tools Interface (CUPTI) Library + +Windows + +cupti.dll + +Mac OSX + +libcupti.dylib + +Linux + +libcupti.so + +Component + +NVIDIA Tools Extension Library + +Windows + +nvToolsExt.dll, nvToolsExt.lib + +Mac OSX + +libnvToolsExt.dylib + +Linux + +libnvToolsExt.so + +Component + +NVIDIA CUDA Driver Libraries + +Linux + +libcuda.so, libnvidia-ptxjitcompiler.so + +Component + +NVIDIA CUDA File IO Libraries and Header + +All + +cufile.h + +Linux + +libcufile.so, libcufile_rdma.so, libcufile_static.a, +libcufile_rdma_static.a + +The NVIDIA CUDA Driver Libraries are only distributable in +applications that meet this criteria: + + 1. The application was developed starting from a NVIDIA CUDA + container obtained from Docker Hub or the NVIDIA GPU + Cloud, and + + 2. The resulting application is packaged as a Docker + container and distributed to users on Docker Hub or the + NVIDIA GPU Cloud only. + +In addition to the rights above, for parties that are +developing software intended solely for use on Jetson +development kits or Jetson modules, and running Linux for +Tegra software, the following shall apply: + + * The SDK may be distributed in its entirety, as provided by + NVIDIA, and without separation of its components, for you + and/or your licensees to create software development kits + for use only on the Jetson platform and running Linux for + Tegra software. + + +2.7. Attachment B + + +Additional Licensing Obligations + +The following third party components included in the SOFTWARE +are licensed to Licensee pursuant to the following terms and +conditions: + + 1. Licensee's use of the GDB third party component is + subject to the terms and conditions of GNU GPL v3: + + This product includes copyrighted third-party software licensed + under the terms of the GNU General Public License v3 ("GPL v3"). + All third-party software packages are copyright by their respective + authors. GPL v3 terms and conditions are hereby incorporated into + the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt + + Consistent with these licensing requirements, the software + listed below is provided under the terms of the specified + open source software licenses. To obtain source code for + software provided under licenses that require + redistribution of source code, including the GNU General + Public License (GPL) and GNU Lesser General Public License + (LGPL), contact oss-requests@nvidia.com. This offer is + valid for a period of three (3) years from the date of the + distribution of this product by NVIDIA CORPORATION. + + Component License + CUDA-GDB GPL v3 + + 2. Licensee represents and warrants that any and all third + party licensing and/or royalty payment obligations in + connection with Licensee's use of the H.264 video codecs + are solely the responsibility of Licensee. + + 3. Licensee's use of the Thrust library is subject to the + terms and conditions of the Apache License Version 2.0. + All third-party software packages are copyright by their + respective authors. Apache License Version 2.0 terms and + conditions are hereby incorporated into the Agreement by + this reference. + http://www.apache.org/licenses/LICENSE-2.0.html + + In addition, Licensee acknowledges the following notice: + Thrust includes source code from the Boost Iterator, + Tuple, System, and Random Number libraries. + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 4. Licensee's use of the LLVM third party component is + subject to the following terms and conditions: + + ====================================================== + LLVM Release License + ====================================================== + University of Illinois/NCSA + Open Source License + + Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign. + All rights reserved. + + Developed by: + + LLVM Team + + University of Illinois at Urbana-Champaign + + http://llvm.org + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to + deal with the Software without restriction, including without limitation the + rights to use, copy, modify, merge, publish, distribute, sublicense, and/or + sell copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimers. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimers in the + documentation and/or other materials provided with the distribution. + + * Neither the names of the LLVM Team, University of Illinois at Urbana- + Champaign, nor the names of its contributors may be used to endorse or + promote products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + DEALINGS WITH THE SOFTWARE. + + 5. Licensee's use of the PCRE third party component is + subject to the following terms and conditions: + + ------------ + PCRE LICENCE + ------------ + PCRE is a library of functions to support regular expressions whose syntax + and semantics are as close as possible to those of the Perl 5 language. + Release 8 of PCRE is distributed under the terms of the "BSD" licence, as + specified below. The documentation for PCRE, supplied in the "doc" + directory, is distributed under the same terms as the software itself. The + basic library functions are written in C and are freestanding. Also + included in the distribution is a set of C++ wrapper functions, and a just- + in-time compiler that can be used to optimize pattern matching. These are + both optional features that can be omitted when the library is built. + + THE BASIC LIBRARY FUNCTIONS + --------------------------- + Written by: Philip Hazel + Email local part: ph10 + Email domain: cam.ac.uk + University of Cambridge Computing Service, + Cambridge, England. + Copyright (c) 1997-2012 University of Cambridge + All rights reserved. + + PCRE JUST-IN-TIME COMPILATION SUPPORT + ------------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2010-2012 Zoltan Herczeg + All rights reserved. + + STACK-LESS JUST-IN-TIME COMPILER + -------------------------------- + Written by: Zoltan Herczeg + Email local part: hzmester + Emain domain: freemail.hu + Copyright(c) 2009-2012 Zoltan Herczeg + All rights reserved. + + THE C++ WRAPPER FUNCTIONS + ------------------------- + Contributed by: Google Inc. + Copyright (c) 2007-2012, Google Inc. + All rights reserved. + + THE "BSD" LICENCE + ----------------- + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of the University of Cambridge nor the name of Google + Inc. nor the names of their contributors may be used to endorse or + promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 6. Some of the cuBLAS library routines were written by or + derived from code written by Vasily Volkov and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2007-2009, Regents of the University of California + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the University of California, Berkeley nor + the names of its contributors may be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 7. Some of the cuBLAS library routines were written by or + derived from code written by Davide Barbieri and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * The name of the author may not be used to endorse or promote + products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR + IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, + INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, + STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING + IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + + 8. Some of the cuBLAS library routines were derived from + code developed by the University of Tennessee and are + subject to the Modified Berkeley Software Distribution + License as follows: + + Copyright (c) 2010 The University of Tennessee. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer listed in this license in the documentation and/or + other materials provided with the distribution. + * Neither the name of the copyright holders nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 9. Some of the cuBLAS library routines were written by or + derived from code written by Jonathan Hogg and are subject + to the Modified Berkeley Software Distribution License as + follows: + + Copyright (c) 2012, The Science and Technology Facilities Council (STFC). + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the STFC nor the names of its contributors + may be used to endorse or promote products derived from this + software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE + LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 10. Some of the cuBLAS library routines were written by or + derived from code written by Ahmad M. Abdelfattah, David + Keyes, and Hatem Ltaief, and are subject to the Apache + License, Version 2.0, as follows: + + -- (C) Copyright 2013 King Abdullah University of Science and Technology + Authors: + Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa) + David Keyes (david.keyes@kaust.edu.sa) + Hatem Ltaief (hatem.ltaief@kaust.edu.sa) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the King Abdullah University of Science and + Technology nor the names of its contributors may be used to endorse + or promote products derived from this software without specific prior + written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + + 11. Some of the cuSPARSE library routines were written by or + derived from code written by Li-Wen Chang and are subject + to the NCSA Open Source License as follows: + + Copyright (c) 2012, University of Illinois. + + All rights reserved. + + Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal with the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimers in the documentation and/or other materials provided + with the distribution. + * Neither the names of IMPACT Group, University of Illinois, nor + the names of its contributors may be used to endorse or promote + products derived from this Software without specific prior + written permission. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR + IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE + SOFTWARE. + + 12. Some of the cuRAND library routines were written by or + derived from code written by Mutsuo Saito and Makoto + Matsumoto and are subject to the following license: + + Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima + University. All rights reserved. + + Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima + University and University of Tokyo. All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of the Hiroshima University nor the names of + its contributors may be used to endorse or promote products + derived from this software without specific prior written + permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 13. Some of the cuRAND library routines were derived from + code developed by D. E. Shaw Research and are subject to + the following license: + + Copyright 2010-2011, D. E. Shaw Research. + + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions, and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions, and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + * Neither the name of D. E. Shaw Research nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 14. Some of the Math library routines were written by or + derived from code developed by Norbert Juffa and are + subject to the following license: + + Copyright (c) 2015-2017, Norbert Juffa + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 15. Licensee's use of the lz4 third party component is + subject to the following terms and conditions: + + Copyright (C) 2011-2013, Yann Collet. + BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following disclaimer + in the documentation and/or other materials provided with the + distribution. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + 16. The NPP library uses code from the Boost Math Toolkit, + and is subject to the following license: + + Boost Software License - Version 1.0 - August 17th, 2003 + . . . . + + Permission is hereby granted, free of charge, to any person or + organization obtaining a copy of the software and accompanying + documentation covered by this license (the "Software") to use, + reproduce, display, distribute, execute, and transmit the Software, + and to prepare derivative works of the Software, and to permit + third-parties to whom the Software is furnished to do so, all + subject to the following: + + The copyright notices in the Software and this entire statement, + including the above license grant, this restriction and the following + disclaimer, must be included in all copies of the Software, in whole + or in part, and all derivative works of the Software, unless such + copies or derivative works are solely in the form of machine-executable + object code generated by a source language processor. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND + NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR + ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR + OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + 17. Portions of the Nsight Eclipse Edition is subject to the + following license: + + The Eclipse Foundation makes available all content in this plug-in + ("Content"). Unless otherwise indicated below, the Content is provided + to you under the terms and conditions of the Eclipse Public License + Version 1.0 ("EPL"). A copy of the EPL is available at http:// + www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program" + will mean the Content. + + If you did not receive this Content directly from the Eclipse + Foundation, the Content is being redistributed by another party + ("Redistributor") and different terms and conditions may apply to your + use of any object code in the Content. Check the Redistributor's + license that was provided with the Content. If no such license exists, + contact the Redistributor. Unless otherwise indicated below, the terms + and conditions of the EPL still apply to any source code in the + Content and such source code may be obtained at http://www.eclipse.org. + + 18. Some of the cuBLAS library routines uses code from + OpenAI, which is subject to the following license: + + License URL + https://github.com/openai/openai-gemm/blob/master/LICENSE + + License Text + The MIT License + + Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc. + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. + + 19. Licensee's use of the Visual Studio Setup Configuration + Samples is subject to the following license: + + The MIT License (MIT) + Copyright (C) Microsoft Corporation. All rights reserved. + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without restriction, + including without limitation the rights to use, copy, modify, merge, + publish, distribute, sublicense, and/or sell copies of the Software, + and to permit persons to whom the Software is furnished to do so, + subject to the following conditions: + + The above copyright notice and this permission notice shall be included + in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + 20. Licensee's use of linmath.h header for CPU functions for + GL vector/matrix operations from lunarG is subject to the + Apache License Version 2.0. + + 21. The DX12-CUDA sample uses the d3dx12.h header, which is + subject to the MIT license . + +----------------- diff --git a/docs/licenses/cudnn/LICENSE b/docs/licenses/cudnn/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..2fa778e2aa3e8154da5857635f1bd4f2b5357038 --- /dev/null +++ b/docs/licenses/cudnn/LICENSE @@ -0,0 +1,291 @@ +LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS + +This license agreement, including exhibits attached ("Agreement”) is a legal agreement between you and NVIDIA Corporation ("NVIDIA") and governs your use of a NVIDIA software development kit (“SDK”). + +Each SDK has its own set of software and materials, but here is a description of the types of items that may be included in a SDK: source code, header files, APIs, data sets and assets (examples include images, textures, models, scenes, videos, native API input/output files), binary software, sample code, libraries, utility programs, programming code and documentation. + +This Agreement can be accepted only by an adult of legal age of majority in the country in which the SDK is used. + +If you are entering into this Agreement on behalf of a company or other legal entity, you represent that you have the legal authority to bind the entity to this Agreement, in which case “you” will mean the entity you represent. + +If you don’t have the required age or authority to accept this Agreement, or if you don’t accept all the terms and conditions of this Agreement, do not download, install or use the SDK. + +You agree to use the SDK only for purposes that are permitted by (a) this Agreement, and (b) any applicable law, regulation or generally accepted practices or guidelines in the relevant jurisdictions. + +Chapter 1. License. + +1.1. Grant + +Subject to the terms of this Agreement, NVIDIA hereby grants you a non-exclusive, non-transferable license, without the right to sublicense (except as expressly provided in this Agreement) to: + +(i) Install and use the SDK, + +(ii) Modify and create derivative works of sample source code delivered in the SDK, and + +(iii) Distribute those portions of the SDK that are identified in this Agreement as distributable, as incorporated in object code format into a software application that meets the distribution requirements indicated in this Agreement. + +1.2. Distribution Requirements + +These are the distribution requirements for you to exercise the distribution grant: + +(i) Your application must have material additional functionality, beyond the included portions of the SDK. + +(ii) The distributable portions of the SDK shall only be accessed by your application. + +(iii) The following notice shall be included in modifications and derivative works of sample source code distributed: “This software contains source code provided by NVIDIA Corporation.” + +(iv) Unless a developer tool is identified in this Agreement as distributable, it is delivered for your internal use only. + +(v) The terms under which you distribute your application must be consistent with the terms of this Agreement, including (without limitation) terms relating to the license grant and license restrictions and protection of NVIDIA’s intellectual property rights. Additionally, you agree that you will protect the privacy, security and legal rights of your application users. + +(vi) You agree to notify NVIDIA in writing of any known or suspected distribution or use of the SDK not in compliance with the requirements of this Agreement, and to enforce the terms of your agreements with respect to distributed SDK. + +1.3 Authorized Users + +You may allow employees and contractors of your entity or of your subsidiary(ies) to access and use the SDK from your secure network to perform work on your behalf. + +If you are an academic institution you may allow users enrolled or employed by the academic institution to access and use the SDK from your secure network. + +You are responsible for the compliance with the terms of this Agreement by your authorized users. If you become aware that your authorized users didn’t follow the terms of this Agreement, you agree to take reasonable steps to resolve the non-compliance and prevent new occurrences. + +1.4 Pre-Release SDK + +The SDK versions identified as alpha, beta, preview or otherwise as pre-release, may not be fully functional, may contain errors or design flaws, and may have reduced or different security, privacy, accessibility, availability, and reliability standards relative to commercial versions of NVIDIA software and materials. Use of a pre-release SDK may result in unexpected results, loss of data, project delays or other unpredictable damage or loss. + +You may use a pre-release SDK at your own risk, understanding that pre-release SDKs are not intended for use in production or business-critical systems. + +NVIDIA may choose not to make available a commercial version of any pre-release SDK. NVIDIA may also choose to abandon development and terminate the availability of a pre-release SDK at any time without liability. + +1.5 Updates + +NVIDIA may, at its option, make available patches, workarounds or other updates to this SDK. Unless the updates are provided with their separate governing terms, they are deemed part of the SDK licensed to you as provided in this Agreement. + +You agree that the form and content of the SDK that NVIDIA provides may change without prior notice to you. While NVIDIA generally maintains compatibility between versions, NVIDIA may in some cases make changes that introduce incompatibilities in future versions of the SDK. + +1.6 Components Under Other Licenses + +The SDK may come bundled with, or otherwise include or be distributed with, NVIDIA or third party software licensed with separate legal notices or terms as may be described in proprietary notices accompanying the SDK. If and to the extent there is a conflict between the terms in this Agreement and the license terms associated with the component, the license terms associated with the components control only to the extent necessary to resolve the conflict. + +1.7 Reservation of Rights + +NVIDIA reserves all rights, title and interest in and to the SDK not expressly granted to you under this Agreement. + +Chapter 2. Limitations. + +The following license limitations apply to your use of the SDK: + +2.1 You may not reverse engineer, decompile or disassemble, or remove copyright or other proprietary notices from any portion of the SDK or copies of the SDK. + +2.2 Except as expressly provided in this Agreement, you may not copy, sell, rent, sublicense, transfer, distribute, modify, or create derivative works of any portion of the SDK. + +2.3 Unless you have an agreement with NVIDIA for this purpose, you may not indicate that an application created with the SDK is sponsored or endorsed by NVIDIA. + +2.4 You may not bypass, disable, or circumvent any encryption, security, digital rights management or authentication mechanism in the SDK. + +2.5 You may not use the SDK in any manner that would cause it to become subject to an open source software license. As examples, licenses that require as a condition of use, modification, and/or distribution that the SDK be (i) disclosed or distributed in source code form; (ii) licensed for the purpose of making derivative works; or (iii) redistributable at no charge. + +2.6 You acknowledge that the SDK as delivered is not tested or certified by NVIDIA for use in connection with the design, construction, maintenance, and/or operation of any system where the use or failure of such system could result in a situation that threatens the safety of human life or results in catastrophic damages (each, a “Critical Application”). Examples of Critical Applications include use in avionics, navigation, autonomous vehicle applications, ai solutions for automotive products, military, medical, life support or other life critical applications. NVIDIA shall not be liable to you or any third party, in whole or in part, for any claims or damages arising from such uses. You are solely responsible for ensuring that any product or service developed with the SDK as a whole includes sufficient features to comply with all applicable legal and regulatory standards and requirements. + +2.7 You agree to defend, indemnify and hold harmless NVIDIA and its affiliates, and their respective employees, contractors, agents, officers and directors, from and against any and all claims, damages, obligations, losses, liabilities, costs or debt, fines, restitutions and expenses (including but not limited to attorney’s fees and costs incident to establishing the right of indemnification) arising out of or related to products or services that use the SDK in or for Critical Applications, and for use of the SDK, outside of the scope of this Agreement or not in compliance with its terms. + +Chapter 3. Ownership. + +3.1 NVIDIA or its licensors hold all rights, title and interest in and to the SDK and its modifications and derivative works, including their respective intellectual property rights, subject to your rights under Section 3.2. This SDK may include software and materials from NVIDIA’s licensors, and these licensors are intended third party beneficiaries that may enforce this Agreement with respect to their intellectual property rights. + +3.2 You hold all rights, title and interest in and to your applications and your derivative works of the sample source code delivered in the SDK, including their respective intellectual property rights, subject to NVIDIA’s rights under section 3.1. + +3.3 You may, but don’t have to, provide to NVIDIA suggestions, feature requests or other feedback regarding the SDK, including possible enhancements or modifications to the SDK. For any feedback that you voluntarily provide, you hereby grant NVIDIA and its affiliates a perpetual, non-exclusive, worldwide, irrevocable license to use, reproduce, modify, license, sublicense (through multiple tiers of sublicensees), and distribute (through multiple tiers of distributors) it without the payment of any royalties or fees to you. NVIDIA will use feedback at its choice. NVIDIA is constantly looking for ways to improve its products, so you may send feedback to NVIDIA through the developer portal at https://developer.nvidia.com. + +Chapter 4. No Warranties. + +THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF DEALING OR COURSE OF TRADE. + +Chapter 5. Limitations of Liability. + +TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL, PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK, WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE), PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS LIMIT. + +These exclusions and limitations of liability shall apply regardless if NVIDIA or its affiliates have been advised of the possibility of such damages, and regardless of whether a remedy fails its essential purpose. These exclusions and limitations of liability form an essential basis of the bargain between the parties, and, absent any of these exclusions or limitations of liability, the provisions of this Agreement, including, without limitation, the economic terms, would be substantially different. + +Chapter 6. Termination. + +6.1 This Agreement will continue to apply until terminated by either you or NVIDIA as described below. + +6.2 If you want to terminate this Agreement, you may do so by stopping to use the SDK. + +6.3 NVIDIA may, at any time, terminate this Agreement if: (i) you fail to comply with any term of this Agreement and the non-compliance is not fixed within thirty (30) days following notice from NVIDIA (or immediately if you violate NVIDIA’s intellectual property rights); (ii) you commence or participate in any legal proceeding against NVIDIA with respect to the SDK; or (iii) NVIDIA decides to no longer provide the SDK in a country or, in NVIDIA’s sole discretion, the continued use of it is no longer commercially viable. + +6.4 Upon any termination of this Agreement, you agree to promptly discontinue use of the SDK and destroy all copies in your possession or control. Your prior distributions in accordance with this Agreement are not affected by the termination of this Agreement. Upon written request, you will certify in writing that you have complied with your commitments under this section. Upon any termination of this Agreement all provisions survive except for the licenses granted to you. + +Chapter 7. General. + +If you wish to assign this Agreement or your rights and obligations, including by merger, consolidation, dissolution or operation of law, contact NVIDIA to ask for permission. Any attempted assignment not approved by NVIDIA in writing shall be void and of no effect. NVIDIA may assign, delegate or transfer this Agreement and its rights and obligations, and if to a non-affiliate you will be notified. + +You agree to cooperate with NVIDIA and provide reasonably requested information to verify your compliance with this Agreement. + +This Agreement will be governed in all respects by the laws of the United States and of the State of Delaware as those laws are applied to contracts entered into and performed entirely within Delaware by Delaware residents, without regard to the conflicts of laws principles. The United Nations Convention on Contracts for the International Sale of Goods is specifically disclaimed. You agree to all terms of this Agreement in the English language. + +The state or federal courts residing in Santa Clara County, California shall have exclusive jurisdiction over any dispute or claim arising out of this Agreement. Notwithstanding this, you agree that NVIDIA shall still be allowed to apply for injunctive remedies or an equivalent type of urgent legal relief in any jurisdiction. + +If any court of competent jurisdiction determines that any provision of this Agreement is illegal, invalid or unenforceable, such provision will be construed as limited to the extent necessary to be consistent with and fully enforceable under the law and the remaining provisions will remain in full force and effect. Unless otherwise specified, remedies are cumulative. + +Each party acknowledges and agrees that the other is an independent contractor in the performance of this Agreement + +The SDK has been developed entirely at private expense and is “commercial items” consisting of “commercial computer software” and “commercial computer software documentation” provided with RESTRICTED RIGHTS. Use, duplication or disclosure by the U.S. Government or a U.S. Government subcontractor is subject to the restrictions in this Agreement pursuant to DFARS 227.7202-3(a) or as set forth in subparagraphs (b)(1) and (2) of the Commercial Computer Software - Restricted Rights clause at FAR 52.227-19, as applicable. Contractor/ manufacturer is NVIDIA, 2788 San Tomas Expressway, Santa Clara, CA 95051 + +The SDK is subject to United States export laws and regulations. You agree that you will not ship, transfer or export the SDK into any country, or use the SDK in any manner, prohibited by the United States Bureau of Industry and Security or economic sanctions regulations administered by the U.S. Department of Treasury’s Office of Foreign Assets Control (OFAC), or any applicable export laws, restrictions or regulations. These laws include restrictions on destinations, end users and end use. By accepting this Agreement, you confirm that you are not a resident or citizen of any country currently embargoed by the U.S. and that you are not otherwise prohibited from receiving the SDK + +Any notice delivered by NVIDIA to you under this Agreement will be delivered via mail, email or fax. You agree that any notices that NVIDIA sends you electronically will satisfy any legal communication requirements. Please direct your legal notices or other correspondence to NVIDIA Corporation, 2788 San Tomas Expressway, Santa Clara, California 95051, United States of America, Attention: Legal Department. + +This Agreement and any exhibits incorporated into this Agreement constitute the entire agreement of the parties with respect to the subject matter of this Agreement and supersede all prior negotiations or documentation exchanged between the parties relating to this SDK license. Any additional and/or conflicting terms on documents issued by you are null, void, and invalid. Any amendment or waiver under this Agreement shall be in writing and signed by representatives of both parties. + +(v. February 22, 2022) + +Chapter 8. cuDNN SUPPLEMENT TO SOFTWARE LICENSE AGREEMENT FOR NVIDIA SOFTWARE DEVELOPMENT KITS + +The terms in this supplement govern your use of the NVIDIA cuDNN SDK under the terms of your license agreement (“Agreement”) as modified by this supplement. Capitalized terms used but not defined below have the meaning assigned to them in the Agreement. + +This supplement is an exhibit to the Agreement and is incorporated as an integral part of the Agreement. In the event of conflict between the terms in this supplement and the terms in the Agreement, the terms in this supplement govern. + +4.1 License Scope. The SDK is licensed for you to develop applications only for use in systems with NVIDIA GPUs. + +2. Distribution. The following portions of the SDK are distributable under the Agreement: the runtime files .so and .dll. + +In addition to the rights above, for parties that are developing software intended solely for use on Jetson development kits or Jetson modules and running Linux for Tegra software the following shall apply: the SDK may be distributed in its entirety, as provided by NVIDIA and without separation of its components, for you and/or your licensees to create software development kits for use only on the Jetson platform and running Linux for Tegra software. + +3. Licensing. If the distribution terms in this Agreement are not suitable for your organization, or for any questions regarding this Agreement, please contact NVIDIA at nvidia-compute-license-questions@nvidia.com + +(v. February 22, 2022) + +Notice + +This document is provided for information purposes only and shall not be regarded as a warranty of a certain functionality, condition, or quality of a product. NVIDIA Corporation (“NVIDIA”) makes no representations or warranties, expressed or implied, as to the accuracy or completeness of the information contained in this document and assumes no responsibility for any errors contained herein. NVIDIA shall have no liability for the consequences or use of such information or for any infringement of patents or other rights of third parties that may result from its use. This document is not a commitment to develop, release, or deliver any Material (defined below), code, or functionality. + +NVIDIA reserves the right to make corrections, modifications, enhancements, improvements, and any other changes to this document, at any time without notice. + +Customer should obtain the latest relevant information before placing orders and should verify that such information is current and complete. + +NVIDIA products are sold subject to the NVIDIA standard terms and conditions of sale supplied at the time of order acknowledgement, unless otherwise agreed in an individual sales agreement signed by authorized representatives of NVIDIA and customer (“Terms of Sale”). NVIDIA hereby expressly objects to applying any customer general terms and conditions with regards to the purchase of the NVIDIA product referenced in this document. No contractual obligations are formed either directly or indirectly by this document. + +NVIDIA products are not designed, authorized, or warranted to be suitable for use in medical, military, aircraft, space, or life support equipment, nor in applications where failure or malfunction of the NVIDIA product can reasonably be expected to result in personal injury, death, or property or environmental damage. NVIDIA accepts no liability for inclusion and/or use of NVIDIA products in such equipment or applications and therefore such inclusion and/or use is at customer’s own risk. + +NVIDIA makes no representation or warranty that products based on this document will be suitable for any specified use. Testing of all parameters of each product is not necessarily performed by NVIDIA. It is customer’s sole responsibility to evaluate and determine the applicability of any information contained in this document, ensure the product is suitable and fit for the application planned by customer, and perform the necessary testing for the application in order to avoid a default of the application or the product. Weaknesses in customer’s product designs may affect the quality and reliability of the NVIDIA product and may result in additional or different conditions and/or requirements beyond those contained in this document. NVIDIA accepts no liability related to any default, damage, costs, or problem which may be based on or attributable to: (i) the use of the NVIDIA product in any manner that is contrary to this document or (ii) customer product designs. + +No license, either expressed or implied, is granted under any NVIDIA patent right, copyright, or other NVIDIA intellectual property right under this document. Information published by NVIDIA regarding third-party products or services does not constitute a license from NVIDIA to use such products or services or a warranty or endorsement thereof. Use of such information may require a license from a third party under the patents or other intellectual property rights of the third party, or a license from NVIDIA under the patents or other intellectual property rights of NVIDIA. + +Reproduction of information in this document is permissible only if approved in advance by NVIDIA in writing, reproduced without alteration and in full compliance with all applicable export laws and regulations, and accompanied by all associated conditions, limitations, and notices. + +THIS DOCUMENT AND ALL NVIDIA DESIGN SPECIFICATIONS, REFERENCE BOARDS, FILES, DRAWINGS, DIAGNOSTICS, LISTS, AND OTHER DOCUMENTS (TOGETHER AND SEPARATELY, “MATERIALS”) ARE BEING PROVIDED “AS IS.” NVIDIA MAKES NO WARRANTIES, EXPRESSED, IMPLIED, STATUTORY, OR OTHERWISE WITH RESPECT TO THE MATERIALS, AND EXPRESSLY DISCLAIMS ALL IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A PARTICULAR PURPOSE. TO THE EXTENT NOT PROHIBITED BY LAW, IN NO EVENT WILL NVIDIA BE LIABLE FOR ANY DAMAGES, INCLUDING WITHOUT LIMITATION ANY DIRECT, INDIRECT, SPECIAL, INCIDENTAL, PUNITIVE, OR CONSEQUENTIAL DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF ANY USE OF THIS DOCUMENT, EVEN IF NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. Notwithstanding any damages that customer might incur for any reason whatsoever, NVIDIA’s aggregate and cumulative liability towards customer for the products described herein shall be limited in accordance with the Terms of Sale for the product. + +ARM + +ARM, AMBA and ARM Powered are registered trademarks of ARM Limited. Cortex, MPCore and Mali are trademarks of ARM Limited. "ARM" is used to represent ARM Holdings plc; its operating company ARM Limited; and the regional subsidiaries ARM Inc.; ARM KK; ARM Korea Limited.; ARM Taiwan Limited; ARM France SAS; ARM Consulting (Shanghai) Co. Ltd.; ARM Germany GmbH; ARM Embedded Technologies Pvt. Ltd.; ARM Norway, AS and ARM Sweden AB. + +Trademarks + +NVIDIA, the NVIDIA logo, and CUDA, DRIVE, JetPack, Kepler, Maxwell, Pascal, Turing, Volta and Xavier are trademarks and/or registered trademarks of NVIDIA Corporation in the United States and other countries. Other company and product names may be trademarks of the respective companies with which they are associated. + +Copyright + +© 2017-2022 NVIDIA Corporation & affiliates. All rights reserved. + +NVIDIA Corporation | 2788 San Tomas Expressway, Santa Clara, CA 95051 + +www.nvidia.com + +THIRD PARTY LICENSES + +zlib + +/* zlib.h -- interface of the 'zlib' general purpose compression library + version 1.2.11, January 15th, 2017 + + Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + Jean-loup Gailly Mark Adler + jloup@gzip.org madler@alumni.caltech.edu + +*/ + + + +mateidavid/zstr + +The MIT License (MIT) + +Copyright (c) 2015 Matei David, Ontario Institute for Cancer Research + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +RapidJSON + +Tencent is pleased to support the open source community by making RapidJSON available. + +Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. + +Licensed under the MIT License (the "License"); you may not use this file except +in compliance with the License. You may obtain a copy of the License at + +http://opensource.org/licenses/MIT + +Unless required by applicable law or agreed to in writing, software distributed +under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. + + + +{fmt} + +Copyright (c) 2012 - present, Victor Zverovich + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +--- Optional exception to the license --- + +As an exception, if, as a result of your compiling your source code, portions of this Software are embedded into a machine-executable object form of such source code, you may redistribute such embedded portions in such object form without including the above copyright and permission notices. + + + +Sleef + +Copyright Naoki Shibata and contributors 2010 - 2021. +Distributed under the Boost Software License, Version 1.0. +(See accompanying file LICENSE.txt or copy at http://www.boost.org/LICENSE_1_0.txt) diff --git a/docs/licenses/open_jtalk/COPYING b/docs/licenses/open_jtalk/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..495268369d51f7794083769e3305ef108593ab94 --- /dev/null +++ b/docs/licenses/open_jtalk/COPYING @@ -0,0 +1,39 @@ +/* ----------------------------------------------------------------- */ +/* The Japanese TTS System "Open JTalk" */ +/* developed by HTS Working Group */ +/* http://open-jtalk.sourceforge.net/ */ +/* ----------------------------------------------------------------- */ +/* */ +/* Copyright (c) 2008-2016 Nagoya Institute of Technology */ +/* Department of Computer Science */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* - Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* - Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials provided */ +/* with the distribution. */ +/* - Neither the name of the HTS working group nor the names of its */ +/* contributors may be used to endorse or promote products derived */ +/* from this software without specific prior written permission. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */ +/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */ +/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */ +/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */ +/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ +/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */ +/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */ +/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* ----------------------------------------------------------------- */ diff --git a/docs/licenses/open_jtalk/mecab-naist-jdic/COPYING b/docs/licenses/open_jtalk/mecab-naist-jdic/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..40bdeecb430c04ac9b463c27e7fda85699206e6c --- /dev/null +++ b/docs/licenses/open_jtalk/mecab-naist-jdic/COPYING @@ -0,0 +1,69 @@ +Copyright (c) 2009, Nara Institute of Science and Technology, Japan. + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + +Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. +Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. +Neither the name of the Nara Institute of Science and Technology +(NAIST) nor the names of its contributors may be used to endorse or +promote products derived from this software without specific prior +written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* ----------------------------------------------------------------- */ +/* The Japanese TTS System "Open JTalk" */ +/* developed by HTS Working Group */ +/* http://open-jtalk.sourceforge.net/ */ +/* ----------------------------------------------------------------- */ +/* */ +/* Copyright (c) 2008-2016 Nagoya Institute of Technology */ +/* Department of Computer Science */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* - Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* - Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials provided */ +/* with the distribution. */ +/* - Neither the name of the HTS working group nor the names of its */ +/* contributors may be used to endorse or promote products derived */ +/* from this software without specific prior written permission. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */ +/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */ +/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */ +/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */ +/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ +/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */ +/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */ +/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* ----------------------------------------------------------------- */ diff --git a/docs/licenses/open_jtalk/mecab/COPYING b/docs/licenses/open_jtalk/mecab/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..8c50c6c47472d3b190177ce754c5227091040856 --- /dev/null +++ b/docs/licenses/open_jtalk/mecab/COPYING @@ -0,0 +1,69 @@ +Copyright (c) 2001-2008, Taku Kudo +Copyright (c) 2004-2008, Nippon Telegraph and Telephone Corporation +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are +permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above + copyright notice, this list of conditions and the + following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the + following disclaimer in the documentation and/or other + materials provided with the distribution. + + * Neither the name of the Nippon Telegraph and Telegraph Corporation + nor the names of its contributors may be used to endorse or + promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR +TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* ----------------------------------------------------------------- */ +/* The Japanese TTS System "Open JTalk" */ +/* developed by HTS Working Group */ +/* http://open-jtalk.sourceforge.net/ */ +/* ----------------------------------------------------------------- */ +/* */ +/* Copyright (c) 2008-2016 Nagoya Institute of Technology */ +/* Department of Computer Science */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* - Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* - Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials provided */ +/* with the distribution. */ +/* - Neither the name of the HTS working group nor the names of its */ +/* contributors may be used to endorse or promote products derived */ +/* from this software without specific prior written permission. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */ +/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */ +/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */ +/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */ +/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ +/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */ +/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */ +/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* ----------------------------------------------------------------- */ diff --git a/docs/licenses/world/LICENSE.txt b/docs/licenses/world/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c12e7699545c3f2a4340f339664c6925de4d634 --- /dev/null +++ b/docs/licenses/world/LICENSE.txt @@ -0,0 +1,39 @@ +/* ----------------------------------------------------------------- */ +/* WORLD: High-quality speech analysis, */ +/* manipulation and synthesis system */ +/* developed by M. Morise */ +/* http://www.kisc.meiji.ac.jp/~mmorise/world/english/ */ +/* ----------------------------------------------------------------- */ +/* */ +/* Copyright (c) 2010 M. Morise */ +/* */ +/* All rights reserved. */ +/* */ +/* Redistribution and use in source and binary forms, with or */ +/* without modification, are permitted provided that the following */ +/* conditions are met: */ +/* */ +/* - Redistributions of source code must retain the above copyright */ +/* notice, this list of conditions and the following disclaimer. */ +/* - Redistributions in binary form must reproduce the above */ +/* copyright notice, this list of conditions and the following */ +/* disclaimer in the documentation and/or other materials provided */ +/* with the distribution. */ +/* - Neither the name of the M. Morise nor the names of its */ +/* contributors may be used to endorse or promote products derived */ +/* from this software without specific prior written permission. */ +/* */ +/* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND */ +/* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, */ +/* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF */ +/* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE */ +/* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS */ +/* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, */ +/* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED */ +/* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, */ +/* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON */ +/* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, */ +/* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY */ +/* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ +/* POSSIBILITY OF SUCH DAMAGE. */ +/* ----------------------------------------------------------------- */ diff --git "a/docs/res/\343\203\236\343\203\253\343\203\201\343\202\250\343\203\263\343\202\270\343\203\263\346\246\202\345\277\265\345\233\263.svg" "b/docs/res/\343\203\236\343\203\253\343\203\201\343\202\250\343\203\263\343\202\270\343\203\263\346\246\202\345\277\265\345\233\263.svg" new file mode 100644 index 0000000000000000000000000000000000000000..479b701f0653439dbd334f252e027b18e9e4cc11 --- /dev/null +++ "b/docs/res/\343\203\236\343\203\253\343\203\201\343\202\250\343\203\263\343\202\270\343\203\263\346\246\202\345\277\265\345\233\263.svg" @@ -0,0 +1,35 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/engine_manifest.json b/engine_manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..73782a833c720c54e1deb8b22e5e590483eba96e --- /dev/null +++ b/engine_manifest.json @@ -0,0 +1,62 @@ +{ + "manifest_version": "0.13.1", + "name": "DUMMY Engine", + "brand_name": "DUMMY", + "uuid": "c7b58856-bd56-4aa1-afb7-b8415f824b06", + "version": "999.999.999", + "url": "https://github.com/VOICEVOX/voicevox_engine", + "command": "run", + "port": 50021, + "icon": "engine_manifest_assets/icon.png", + "default_sampling_rate": 24000, + "terms_of_service": "engine_manifest_assets/terms_of_service.md", + "update_infos": "engine_manifest_assets/update_infos.json", + "dependency_licenses": "engine_manifest_assets/dependency_licenses.json", + "supported_features": { + "adjust_mora_pitch": { + "type": "bool", + "value": true, + "name": "モーラごとの音高の調整" + }, + "adjust_phoneme_length": { + "type": "bool", + "value": true, + "name": "音素ごとの長さの調整" + }, + "adjust_speed_scale": { + "type": "bool", + "value": true, + "name": "全体の話速の調整" + }, + "adjust_pitch_scale": { + "type": "bool", + "value": true, + "name": "全体の音高の調整" + }, + "adjust_intonation_scale": { + "type": "bool", + "value": true, + "name": "全体の抑揚の調整" + }, + "adjust_volume_scale": { + "type": "bool", + "value": true, + "name": "全体の音量の調整" + }, + "interrogative_upspeak": { + "type": "bool", + "value": true, + "name": "疑問文の自動調整" + }, + "synthesis_morphing" : { + "type": "bool", + "value": true, + "name": "2人の話者でモーフィングした音声を合成" + }, + "manage_library": { + "type": "bool", + "value": true, + "name": "音声ライブラリのインストール・アンインストール" + } + } +} diff --git a/engine_manifest_assets/dependency_licenses.json b/engine_manifest_assets/dependency_licenses.json new file mode 100644 index 0000000000000000000000000000000000000000..875de0f80d5cd0986e56248dba2a011702628c14 --- /dev/null +++ b/engine_manifest_assets/dependency_licenses.json @@ -0,0 +1,8 @@ +[ + { + "name": "dummy library", + "version": "0.0.1", + "license": "dummy license", + "text": "dummy license text" + } +] \ No newline at end of file diff --git a/engine_manifest_assets/downloadable_libraries.json b/engine_manifest_assets/downloadable_libraries.json new file mode 100644 index 0000000000000000000000000000000000000000..954786200326db91b9bbcebc794b66d993a5b2e9 --- /dev/null +++ b/engine_manifest_assets/downloadable_libraries.json @@ -0,0 +1,44 @@ +[ + { + "name": "Dummy Library", + "uuid": "2bb8bccf-1c3f-4bc9-959a-f388e37af3ad", + "version": "0.0.1", + "download_url": "https://github.com/VOICEVOX/voicevox_engine/archive/d7cf31c058bc83e1abf8e14d4231a06409c4cc2d.zip", + "bytes": 1000, + "speakers": [ + { + "speaker": { + "name": "dummy1", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "styles": [ + { + "name": "style1", + "id": 0 + }, + { + "name": "style2", + "id": 2 + } + ], + "version": "0.0.1" + }, + "speaker_info": { + "policy": "", + "portrait": "", + "style_infos": [ + { + "id": 0, + "icon": "", + "voice_samples": ["", "", ""] + }, + { + "id": 2, + "icon": "", + "voice_samples": ["", "", ""] + } + ] + } + } + ] + } +] diff --git a/engine_manifest_assets/icon.png b/engine_manifest_assets/icon.png new file mode 100644 index 0000000000000000000000000000000000000000..98064f347fdf8bd4fe42098c79ad293f291d40c8 Binary files /dev/null and b/engine_manifest_assets/icon.png differ diff --git a/engine_manifest_assets/terms_of_service.md b/engine_manifest_assets/terms_of_service.md new file mode 100644 index 0000000000000000000000000000000000000000..c97bb512c679e98b7685a3b3d7013497ca280954 --- /dev/null +++ b/engine_manifest_assets/terms_of_service.md @@ -0,0 +1 @@ +dummy teams of service \ No newline at end of file diff --git a/engine_manifest_assets/update_infos.json b/engine_manifest_assets/update_infos.json new file mode 100644 index 0000000000000000000000000000000000000000..41d8183e85f2b82819c7039f1048f2b766ed7e4b --- /dev/null +++ b/engine_manifest_assets/update_infos.json @@ -0,0 +1,67 @@ +[ + { + "version": "0.14.4", + "descriptions": [ + "キャラクター「春歌ナナ」「猫使アル」「猫使ビィ」を追加", + "バグ修正" + ], + "contributors": ["Hiroshiba"] + }, + { + "version": "0.14.3", + "descriptions": [ + "キャラクター「†聖騎士 紅桜†」「雀松朱司」「麒ヶ島宗麟」を追加", + "同時書き込みで辞書が破損する問題を修正" + ], + "contributors": ["Hiroshiba"] + }, + { + "version": "0.14.2", + "descriptions": ["DirectML版の生成が遅い問題を修正"], + "contributors": [] + }, + { + "version": "0.14.1", + "descriptions": ["AquesTalkライクな記法で生成した音声のバグを修正"], + "contributors": [] + }, + { + "version": "0.14.0", + "descriptions": [ + "コアをRust言語に移行", + "セキュリティアップデート", + "スタイルごとに異なる立ち絵の提供を可能に", + "VVPPファイルの提供", + "設定GUIの提供", + "プリセットの保存", + "モーフィングAPIの仕様変更", + "DirectML利用時に適したGPUを自動選択", + "開発環境の向上", + "バグ修正" + ], + "contributors": [ + "aoirint", + "Appletigerv", + "haru3me", + "Hiroshiba", + "ksk001100", + "masinc", + "misogihagi", + "My-MC", + "nebocco", + "PickledChair", + "qryxip", + "qwerty2501", + "sabonerune", + "sarisia", + "Segu-g", + "sevenc-nanashi", + "shigobu", + "smly", + "takana-v", + "ts-klassen", + "whiteball", + "y-chan" + ] + } +] diff --git a/generate_licenses.py b/generate_licenses.py new file mode 100644 index 0000000000000000000000000000000000000000..da41db0c01e20dc8cf935418bb59a5c4923c56ae --- /dev/null +++ b/generate_licenses.py @@ -0,0 +1,337 @@ +import json +import os +import subprocess +import urllib.request +from dataclasses import asdict, dataclass +from pathlib import Path +from typing import List, Optional + + +@dataclass +class License: + name: str + version: Optional[str] + license: Optional[str] + text: str + + +def generate_licenses() -> List[License]: + licenses: List[License] = [] + + # openjtalk + # https://sourceforge.net/projects/open-jtalk/files/Open%20JTalk/open_jtalk-1.11/ + licenses.append( + License( + name="Open JTalk", + version="1.11", + license="Modified BSD license", + text=Path("docs/licenses/open_jtalk/COPYING").read_text(), + ) + ) + licenses.append( + License( + name="MeCab", + version=None, + license="Modified BSD license", + text=Path("docs/licenses/open_jtalk/mecab/COPYING").read_text(), + ) + ) + licenses.append( + License( + name="NAIST Japanese Dictionary", + version=None, + license="Modified BSD license", + text=Path("docs/licenses//open_jtalk/mecab-naist-jdic/COPYING").read_text(), + ) + ) + with urllib.request.urlopen( + "https://raw.githubusercontent.com/r9y9/pyopenjtalk/master/pyopenjtalk/htsvoice/LICENSE_mei_normal.htsvoice" # noqa: B950 + ) as res: + licenses.append( + License( + name='HTS Voice "Mei"', + version=None, + license="Creative Commons Attribution 3.0 license", + text=res.read().decode(), + ) + ) + + # VOICEVOX CORE + with urllib.request.urlopen( + "https://raw.githubusercontent.com/VOICEVOX/voicevox_core/main/LICENSE" + ) as res: + licenses.append( + License( + name="VOICEVOX CORE", + version=None, + license="MIT license", + text=res.read().decode(), + ) + ) + + # VOICEVOX ENGINE + with urllib.request.urlopen( + "https://raw.githubusercontent.com/VOICEVOX/voicevox_engine/master/LGPL_LICENSE" + ) as res: + licenses.append( + License( + name="VOICEVOX ENGINE", + version=None, + license="LGPL license", + text=res.read().decode(), + ) + ) + + # world + with urllib.request.urlopen( + "https://raw.githubusercontent.com/mmorise/World/master/LICENSE.txt" + ) as res: + licenses.append( + License( + name="world", + version=None, + license="Modified BSD license", + text=res.read().decode(), + ) + ) + + # pytorch + with urllib.request.urlopen( + "https://raw.githubusercontent.com/pytorch/pytorch/master/LICENSE" + ) as res: + licenses.append( + License( + name="PyTorch", + version="1.9.0", + license="BSD-style license", + text=res.read().decode(), + ) + ) + + # onnxruntime + with urllib.request.urlopen( + "https://raw.githubusercontent.com/microsoft/onnxruntime/master/LICENSE" + ) as res: + licenses.append( + License( + name="ONNX Runtime", + version="1.13.1", + license="MIT license", + text=res.read().decode(), + ) + ) + + # Python + python_version = "3.11.3" + with urllib.request.urlopen( + f"https://raw.githubusercontent.com/python/cpython/v{python_version}/LICENSE" + ) as res: + licenses.append( + License( + name="Python", + version=python_version, + license="Python Software Foundation License", + text=res.read().decode(), + ) + ) + + # pip + try: + pip_licenses_output = subprocess.run( + "pip-licenses " + "--from=mixed " + "--format=json " + "--with-urls " + "--with-license-file " + "--no-license-path ", + shell=True, + capture_output=True, + check=True, + env=os.environ, + ).stdout.decode() + except subprocess.CalledProcessError as err: + raise Exception( + f"command output:\n{err.stderr and err.stderr.decode()}" + ) from err + + licenses_json = json.loads(pip_licenses_output) + for license_json in licenses_json: + license = License( + name=license_json["Name"], + version=license_json["Version"], + license=license_json["License"], + text=license_json["LicenseText"], + ) + # FIXME: assert license type + if license.text == "UNKNOWN": + if license.name.lower() == "core" and license.version == "0.0.0": + continue + elif license.name.lower() == "future": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/PythonCharmers/python-future/master/LICENSE.txt" # noqa: B950 + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "pefile": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/erocarrera/pefile/master/LICENSE" # noqa: B950 + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "pyopenjtalk": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/r9y9/pyopenjtalk/master/LICENSE.md" + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "python-multipart": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/andrew-d/python-multipart/master/LICENSE.txt" # noqa: B950 + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "romkan": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/soimort/python-romkan/master/LICENSE" + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "distlib": + with urllib.request.urlopen( + "https://bitbucket.org/pypa/distlib/raw/7d93712134b28401407da27382f2b6236c87623a/LICENSE.txt" # noqa: B950 + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "jsonschema": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/python-jsonschema/jsonschema/dbc398245a583cb2366795dc529ae042d10c1577/COPYING" + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "lockfile": + with urllib.request.urlopen( + "https://opendev.org/openstack/pylockfile/raw/tag/0.12.2/LICENSE" + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "platformdirs": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/platformdirs/platformdirs/aa671aaa97913c7b948567f4d9c77d4f98bfa134/LICENSE" + ) as res: + license.text = res.read().decode() + elif license.name.lower() == "webencodings": + with urllib.request.urlopen( + "https://raw.githubusercontent.com/gsnedders/python-webencodings/fa2cb5d75ab41e63ace691bc0825d3432ba7d694/LICENSE" + ) as res: + license.text = res.read().decode() + else: + # ライセンスがpypiに無い + raise Exception(f"No License info provided for {license.name}") + licenses.append(license) + + # OpenBLAS + with urllib.request.urlopen( + "https://raw.githubusercontent.com/xianyi/OpenBLAS/develop/LICENSE" + ) as res: + licenses.append( + License( + name="OpenBLAS", + version=None, + license="BSD 3-clause license", + text=res.read().decode(), + ) + ) + + # libsndfile-binaries + with urllib.request.urlopen( + "https://raw.githubusercontent.com/bastibe/libsndfile-binaries/84cb164928f17c7ca0c1e5c40342c20ce2b90e8c/COPYING" # noqa: B950 + ) as res: + licenses.append( + License( + name="libsndfile-binaries", + version="1.0.28", + license="LGPL-2.1 license", + text=res.read().decode(), + ) + ) + + # libogg + with urllib.request.urlopen( + "https://raw.githubusercontent.com/xiph/ogg/v1.3.2/COPYING" + ) as res: + licenses.append( + License( + name="libogg", + version="1.3.2", + license="BSD 3-clause license", + text=res.read().decode(), + ) + ) + + # libvorbis + with urllib.request.urlopen( + "https://raw.githubusercontent.com/xiph/vorbis/v1.3.5/COPYING" + ) as res: + licenses.append( + License( + name="libvorbis", + version="1.3.5", + license="BSD 3-clause license", + text=res.read().decode(), + ) + ) + + # libflac + with urllib.request.urlopen( + "https://raw.githubusercontent.com/xiph/flac/1.3.2/COPYING.Xiph" + ) as res: + licenses.append( + License( + name="FLAC", + version="1.3.2", + license="Xiph.org's BSD-like license", + text=res.read().decode(), + ) + ) + + # cuda + # license text from CUDA 11.6.2 + # https://developer.nvidia.com/cuda-11-6-2-download-archive?target_os=Windows&target_arch=x86_64&target_version=10&target_type=exe_local # noqa: B950 + # https://developer.download.nvidia.com/compute/cuda/11.6.2/local_installers/cuda_11.6.2_511.65_windows.exe # noqa: B950 + # cuda_11.6.2_511.65_windows.exe (cuda_documentation/Doc/EULA.txt) + licenses.append( + License( + name="CUDA Toolkit", + version="11.6.2", + license=None, + text=Path("docs/licenses/cuda/EULA.txt").read_text(encoding="utf8"), + ) + ) + # cudnn + # license text from + # cuDNN v8.4.1 (May 27th, 2022), for CUDA 11.x, cuDNN Library for Windows + # https://developer.nvidia.com/rdp/cudnn-archive # noqa: B950 + # https://developer.download.nvidia.com/compute/redist/cudnn/v8.4.1/local_installers/11.6/cudnn-windows-x86_64-8.4.1.50_cuda11.6-archive.zip # noqa: B950 + # cudnn-windows-x86_64-8.4.1.50_cuda11.6-archive.zip (cudnn-windows-x86_64-8.4.1.50_cuda11.6-archive/LICENSE) # noqa: B950 + licenses.append( + License( + name="cuDNN", + version="8.4.1", + license=None, + text=Path("docs/licenses/cudnn/LICENSE").read_text(encoding="utf8"), + ) + ) + + return licenses + + +if __name__ == "__main__": + import argparse + import sys + + parser = argparse.ArgumentParser() + parser.add_argument("-o", "--output_path", type=str) + args = parser.parse_args() + + output_path = args.output_path + + licenses = generate_licenses() + + # dump + out = Path(output_path).open("w") if output_path else sys.stdout + json.dump( + [asdict(license) for license in licenses], + out, + ) diff --git a/get_cost_candidates.py b/get_cost_candidates.py new file mode 100644 index 0000000000000000000000000000000000000000..072c4b4d57a757c957a0a1e9ab0afb0c5c989cb0 --- /dev/null +++ b/get_cost_candidates.py @@ -0,0 +1,91 @@ +""" +voicevox_engine/part_of_speech_data.pyのcost_candidatesを計算するプログラムです。 +引数のnaist_jdic_pathには、open_jtalkのsrc/mecab-naist-jdic/naist-jdic.csvを指定してください。 + +実行例: +python get_cost_candidates.py --naist_jdic_path=/path/to/naist-jdic.csv \ + --pos=名詞 \ + --pos_detail_1=固有名詞 \ + --pos_detail_2=一般 \ + --pos_detail_3=* + +cost_candidatesの値の詳細は以下の通りです。 +- 1番目の値はnaist_jdic内の同一品詞の最小コストから1を引いたもの、11番目の値は最大コストに1を足したものです。 +- 2番目の値はnaist_jdic内の同一品詞のコストの下位1%、10番目の値は99%の値です。 +- 6番目の値はnaist_jdic内の同一品詞のコストの最頻値です。 +- 2番目から6番目、6番目から10番目までの値は一定割合で増加するようになっています。 +""" + +import argparse +import statistics +from pathlib import Path +from typing import List + +import numpy as np + + +def get_candidates( + naist_jdic_path: Path, + pos: str, + pos_detail_1: str, + pos_detail_2: str, + pos_detail_3: str, +) -> List[int]: + costs = [] + with naist_jdic_path.open(encoding="utf-8") as f: + for line in f: + ( + _, + _, + _, + _cost, + _pos, + _pos_detail_1, + _pos_detail_2, + _pos_detail_3, + _, + _, + _, + _, + _, + _, + _, + ) = line.split(",") + if (_pos, _pos_detail_1, _pos_detail_2, _pos_detail_3) == ( + pos, + pos_detail_1, + pos_detail_2, + pos_detail_3, + ): + costs.append(int(_cost)) + assert len(costs) > 0 + cost_min = min(costs) - 1 + cost_1per = np.quantile(costs, 0.01).astype(np.int64) + cost_mode = statistics.mode(costs) + cost_99per = np.quantile(costs, 0.99).astype(np.int64) + cost_max = max(costs) + 1 + return ( + [cost_min] + + [int(cost_1per + (cost_mode - cost_1per) * i / 4) for i in range(5)] + + [int(cost_mode + (cost_99per - cost_mode) * i / 4) for i in range(1, 5)] + + [cost_max] + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--naist_jdic_path", type=Path) + parser.add_argument("--pos", type=str) + parser.add_argument("--pos_detail_1", type=str) + parser.add_argument("--pos_detail_2", type=str) + parser.add_argument("--pos_detail_3", type=str) + args = parser.parse_args() + print( + get_candidates( + naist_jdic_path=args.naist_jdic_path, + pos=args.pos, + pos_detail_1=args.pos_detail_1, + pos_detail_2=args.pos_detail_2, + pos_detail_3=args.pos_detail_3, + ) + ) diff --git a/make_docs.py b/make_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..d10bd1aa40887783ba8cb90dabda031dce213be0 --- /dev/null +++ b/make_docs.py @@ -0,0 +1,33 @@ +import json + +from voicevox_engine.dev.core import mock as core +from voicevox_engine.dev.synthesis_engine.mock import MockSynthesisEngine +from voicevox_engine.setting import USER_SETTING_PATH, SettingLoader + +if __name__ == "__main__": + import run + + app = run.generate_app( + synthesis_engines={"mock": MockSynthesisEngine(speakers=core.metas())}, + latest_core_version="mock", + setting_loader=SettingLoader(USER_SETTING_PATH), + ) + with open("docs/api/index.html", "w") as f: + f.write( + """ + + + voicevox_engine API Document + + + + +
+ + + +""" + % json.dumps(app.openapi()) + ) diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000000000000000000000000000000000000..9cfd457f21b3c88c1fe94b646fe7d3ea17583c6c --- /dev/null +++ b/poetry.lock @@ -0,0 +1,2426 @@ +# This file is automatically @generated by Poetry and should not be changed by hand. + +[[package]] +name = "aiofiles" +version = "0.7.0" +description = "File support for asyncio." +category = "main" +optional = false +python-versions = ">=3.6,<4.0" +files = [ + {file = "aiofiles-0.7.0-py3-none-any.whl", hash = "sha256:c67a6823b5f23fcab0a2595a289cec7d8c863ffcb4322fb8cd6b90400aedfdbc"}, + {file = "aiofiles-0.7.0.tar.gz", hash = "sha256:a1c4fc9b2ff81568c83e21392a82f344ea9d23da906e4f6a52662764545e19d4"}, +] + +[[package]] +name = "altgraph" +version = "0.17.3" +description = "Python graph (network) package" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "altgraph-0.17.3-py2.py3-none-any.whl", hash = "sha256:c8ac1ca6772207179ed8003ce7687757c04b0b71536f81e2ac5755c6226458fe"}, + {file = "altgraph-0.17.3.tar.gz", hash = "sha256:ad33358114df7c9416cdb8fa1eaa5852166c505118717021c6a8c7c7abbd03dd"}, +] + +[[package]] +name = "anyio" +version = "3.6.2" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +category = "main" +optional = false +python-versions = ">=3.6.2" +files = [ + {file = "anyio-3.6.2-py3-none-any.whl", hash = "sha256:fbbe32bd270d2a2ef3ed1c5d45041250284e31fc0a4df4a5a6071842051a51e3"}, + {file = "anyio-3.6.2.tar.gz", hash = "sha256:25ea0d673ae30af41a0c442f81cf3b38c7e79fdc7b60335a4c14e05eb0947421"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] +test = ["contextlib2", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (<0.15)", "uvloop (>=0.15)"] +trio = ["trio (>=0.16,<0.22)"] + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "asgiref" +version = "3.6.0" +description = "ASGI specs, helper code, and adapters" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "asgiref-3.6.0-py3-none-any.whl", hash = "sha256:71e68008da809b957b7ee4b43dbccff33d1b23519fb8344e33f049897077afac"}, + {file = "asgiref-3.6.0.tar.gz", hash = "sha256:9567dfe7bd8d3c8c892227827c41cce860b368104c3431da67a0c5a65a949506"}, +] + +[package.extras] +tests = ["mypy (>=0.800)", "pytest", "pytest-asyncio"] + +[[package]] +name = "atomicwrites" +version = "1.4.0" +description = "Atomic file writes." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] + +[[package]] +name = "attrs" +version = "22.2.0" +description = "Classes Without Boilerplate" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "attrs-22.2.0-py3-none-any.whl", hash = "sha256:29e95c7f6778868dbd49170f98f8818f78f3dc5e0e37c0b1f474e3561b240836"}, + {file = "attrs-22.2.0.tar.gz", hash = "sha256:c9227bfc2f01993c03f68db37d1d15c9690188323c067c641f1a35ca58185f99"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage-enable-subprocess", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope.interface"] +tests = ["attrs[tests-no-zope]", "zope.interface"] +tests-no-zope = ["cloudpickle", "cloudpickle", "hypothesis", "hypothesis", "mypy (>=0.971,<0.990)", "mypy (>=0.971,<0.990)", "pympler", "pympler", "pytest (>=4.3.0)", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-mypy-plugins", "pytest-xdist[psutil]", "pytest-xdist[psutil]"] + +[[package]] +name = "black" +version = "22.12.0" +description = "The uncompromising code formatter." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "black-22.12.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9eedd20838bd5d75b80c9f5487dbcb06836a43833a37846cf1d8c1cc01cef59d"}, + {file = "black-22.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:159a46a4947f73387b4d83e87ea006dbb2337eab6c879620a3ba52699b1f4351"}, + {file = "black-22.12.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d30b212bffeb1e252b31dd269dfae69dd17e06d92b87ad26e23890f3efea366f"}, + {file = "black-22.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:7412e75863aa5c5411886804678b7d083c7c28421210180d67dfd8cf1221e1f4"}, + {file = "black-22.12.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c116eed0efb9ff870ded8b62fe9f28dd61ef6e9ddd28d83d7d264a38417dcee2"}, + {file = "black-22.12.0-cp37-cp37m-win_amd64.whl", hash = "sha256:1f58cbe16dfe8c12b7434e50ff889fa479072096d79f0a7f25e4ab8e94cd8350"}, + {file = "black-22.12.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:77d86c9f3db9b1bf6761244bc0b3572a546f5fe37917a044e02f3166d5aafa7d"}, + {file = "black-22.12.0-cp38-cp38-win_amd64.whl", hash = "sha256:82d9fe8fee3401e02e79767016b4907820a7dc28d70d137eb397b92ef3cc5bfc"}, + {file = "black-22.12.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:101c69b23df9b44247bd88e1d7e90154336ac4992502d4197bdac35dd7ee3320"}, + {file = "black-22.12.0-cp39-cp39-win_amd64.whl", hash = "sha256:559c7a1ba9a006226f09e4916060982fd27334ae1998e7a38b3f33a37f7a2148"}, + {file = "black-22.12.0-py3-none-any.whl", hash = "sha256:436cc9167dd28040ad90d3b404aec22cedf24a6e4d7de221bec2730ec0c97bcf"}, + {file = "black-22.12.0.tar.gz", hash = "sha256:229351e5a18ca30f447bf724d007f890f97e13af070bb6ad4c0a441cd7596a2f"}, +] + +[package.dependencies] +click = ">=8.0.0" +mypy-extensions = ">=0.4.3" +pathspec = ">=0.9.0" +platformdirs = ">=2" + +[package.extras] +colorama = ["colorama (>=0.4.3)"] +d = ["aiohttp (>=3.7.4)"] +jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] +uvloop = ["uvloop (>=0.15.2)"] + +[[package]] +name = "cachecontrol" +version = "0.12.11" +description = "httplib2 caching for requests" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "CacheControl-0.12.11-py2.py3-none-any.whl", hash = "sha256:2c75d6a8938cb1933c75c50184549ad42728a27e9f6b92fd677c3151aa72555b"}, + {file = "CacheControl-0.12.11.tar.gz", hash = "sha256:a5b9fcc986b184db101aa280b42ecdcdfc524892596f606858e0b7a8b4d9e144"}, +] + +[package.dependencies] +lockfile = {version = ">=0.9", optional = true, markers = "extra == \"filecache\""} +msgpack = ">=0.5.2" +requests = "*" + +[package.extras] +filecache = ["lockfile (>=0.9)"] +redis = ["redis (>=2.10.5)"] + +[[package]] +name = "certifi" +version = "2022.12.7" +description = "Python package for providing Mozilla's CA Bundle." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"}, + {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "cfgv" +version = "3.3.1" +description = "Validate configuration and produce human readable error messages." +category = "dev" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"}, + {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, +] + +[[package]] +name = "charset-normalizer" +version = "2.1.1" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "main" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "charset-normalizer-2.1.1.tar.gz", hash = "sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845"}, + {file = "charset_normalizer-2.1.1-py3-none-any.whl", hash = "sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f"}, +] + +[package.extras] +unicode-backport = ["unicodedata2"] + +[[package]] +name = "cleo" +version = "2.0.1" +description = "Cleo allows you to create beautiful and testable command-line interfaces." +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "cleo-2.0.1-py3-none-any.whl", hash = "sha256:6eb133670a3ed1f3b052d53789017b6e50fca66d1287e6e6696285f4cb8ea448"}, + {file = "cleo-2.0.1.tar.gz", hash = "sha256:eb4b2e1f3063c11085cebe489a6e9124163c226575a3c3be69b2e51af4a15ec5"}, +] + +[package.dependencies] +crashtest = ">=0.4.1,<0.5.0" +rapidfuzz = ">=2.2.0,<3.0.0" + +[[package]] +name = "click" +version = "8.0.4" +description = "Composable command line interface toolkit" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "click-8.0.4-py3-none-any.whl", hash = "sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1"}, + {file = "click-8.0.4.tar.gz", hash = "sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.4" +description = "Cross-platform colored terminal text." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, + {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, +] + +[[package]] +name = "colorlog" +version = "4.8.0" +description = "Log formatting with colors!" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "colorlog-4.8.0-py2.py3-none-any.whl", hash = "sha256:3dd15cb27e8119a24c1a7b5c93f9f3b455855e0f73993b1c25921b2f646f1dcd"}, + {file = "colorlog-4.8.0.tar.gz", hash = "sha256:59b53160c60902c405cdec28d38356e09d40686659048893e026ecbd589516b1"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} + +[[package]] +name = "coverage" +version = "5.5" +description = "Code coverage measurement for Python" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" +files = [ + {file = "coverage-5.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:b6d534e4b2ab35c9f93f46229363e17f63c53ad01330df9f2d6bd1187e5eaacf"}, + {file = "coverage-5.5-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:b7895207b4c843c76a25ab8c1e866261bcfe27bfaa20c192de5190121770672b"}, + {file = "coverage-5.5-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:c2723d347ab06e7ddad1a58b2a821218239249a9e4365eaff6649d31180c1669"}, + {file = "coverage-5.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:900fbf7759501bc7807fd6638c947d7a831fc9fdf742dc10f02956ff7220fa90"}, + {file = "coverage-5.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:004d1880bed2d97151facef49f08e255a20ceb6f9432df75f4eef018fdd5a78c"}, + {file = "coverage-5.5-cp27-cp27m-win32.whl", hash = "sha256:06191eb60f8d8a5bc046f3799f8a07a2d7aefb9504b0209aff0b47298333302a"}, + {file = "coverage-5.5-cp27-cp27m-win_amd64.whl", hash = "sha256:7501140f755b725495941b43347ba8a2777407fc7f250d4f5a7d2a1050ba8e82"}, + {file = "coverage-5.5-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:372da284cfd642d8e08ef606917846fa2ee350f64994bebfbd3afb0040436905"}, + {file = "coverage-5.5-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:8963a499849a1fc54b35b1c9f162f4108017b2e6db2c46c1bed93a72262ed083"}, + {file = "coverage-5.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:869a64f53488f40fa5b5b9dcb9e9b2962a66a87dab37790f3fcfb5144b996ef5"}, + {file = "coverage-5.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:4a7697d8cb0f27399b0e393c0b90f0f1e40c82023ea4d45d22bce7032a5d7b81"}, + {file = "coverage-5.5-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:8d0a0725ad7c1a0bcd8d1b437e191107d457e2ec1084b9f190630a4fb1af78e6"}, + {file = "coverage-5.5-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:51cb9476a3987c8967ebab3f0fe144819781fca264f57f89760037a2ea191cb0"}, + {file = "coverage-5.5-cp310-cp310-win_amd64.whl", hash = "sha256:c0891a6a97b09c1f3e073a890514d5012eb256845c451bd48f7968ef939bf4ae"}, + {file = "coverage-5.5-cp35-cp35m-macosx_10_9_x86_64.whl", hash = "sha256:3487286bc29a5aa4b93a072e9592f22254291ce96a9fbc5251f566b6b7343cdb"}, + {file = "coverage-5.5-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:deee1077aae10d8fa88cb02c845cfba9b62c55e1183f52f6ae6a2df6a2187160"}, + {file = "coverage-5.5-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:f11642dddbb0253cc8853254301b51390ba0081750a8ac03f20ea8103f0c56b6"}, + {file = "coverage-5.5-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:6c90e11318f0d3c436a42409f2749ee1a115cd8b067d7f14c148f1ce5574d701"}, + {file = "coverage-5.5-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:30c77c1dc9f253283e34c27935fded5015f7d1abe83bc7821680ac444eaf7793"}, + {file = "coverage-5.5-cp35-cp35m-win32.whl", hash = "sha256:9a1ef3b66e38ef8618ce5fdc7bea3d9f45f3624e2a66295eea5e57966c85909e"}, + {file = "coverage-5.5-cp35-cp35m-win_amd64.whl", hash = "sha256:972c85d205b51e30e59525694670de6a8a89691186012535f9d7dbaa230e42c3"}, + {file = "coverage-5.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:af0e781009aaf59e25c5a678122391cb0f345ac0ec272c7961dc5455e1c40066"}, + {file = "coverage-5.5-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:74d881fc777ebb11c63736622b60cb9e4aee5cace591ce274fb69e582a12a61a"}, + {file = "coverage-5.5-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:92b017ce34b68a7d67bd6d117e6d443a9bf63a2ecf8567bb3d8c6c7bc5014465"}, + {file = "coverage-5.5-cp36-cp36m-manylinux2010_i686.whl", hash = "sha256:d636598c8305e1f90b439dbf4f66437de4a5e3c31fdf47ad29542478c8508bbb"}, + {file = "coverage-5.5-cp36-cp36m-manylinux2010_x86_64.whl", hash = "sha256:41179b8a845742d1eb60449bdb2992196e211341818565abded11cfa90efb821"}, + {file = "coverage-5.5-cp36-cp36m-win32.whl", hash = "sha256:040af6c32813fa3eae5305d53f18875bedd079960822ef8ec067a66dd8afcd45"}, + {file = "coverage-5.5-cp36-cp36m-win_amd64.whl", hash = "sha256:5fec2d43a2cc6965edc0bb9e83e1e4b557f76f843a77a2496cbe719583ce8184"}, + {file = "coverage-5.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:18ba8bbede96a2c3dde7b868de9dcbd55670690af0988713f0603f037848418a"}, + {file = "coverage-5.5-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:2910f4d36a6a9b4214bb7038d537f015346f413a975d57ca6b43bf23d6563b53"}, + {file = "coverage-5.5-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:f0b278ce10936db1a37e6954e15a3730bea96a0997c26d7fee88e6c396c2086d"}, + {file = "coverage-5.5-cp37-cp37m-manylinux2010_i686.whl", hash = "sha256:796c9c3c79747146ebd278dbe1e5c5c05dd6b10cc3bcb8389dfdf844f3ead638"}, + {file = "coverage-5.5-cp37-cp37m-manylinux2010_x86_64.whl", hash = "sha256:53194af30d5bad77fcba80e23a1441c71abfb3e01192034f8246e0d8f99528f3"}, + {file = "coverage-5.5-cp37-cp37m-win32.whl", hash = "sha256:184a47bbe0aa6400ed2d41d8e9ed868b8205046518c52464fde713ea06e3a74a"}, + {file = "coverage-5.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2949cad1c5208b8298d5686d5a85b66aae46d73eec2c3e08c817dd3513e5848a"}, + {file = "coverage-5.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:217658ec7187497e3f3ebd901afdca1af062b42cfe3e0dafea4cced3983739f6"}, + {file = "coverage-5.5-cp38-cp38-manylinux1_i686.whl", hash = "sha256:1aa846f56c3d49205c952d8318e76ccc2ae23303351d9270ab220004c580cfe2"}, + {file = "coverage-5.5-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:24d4a7de75446be83244eabbff746d66b9240ae020ced65d060815fac3423759"}, + {file = "coverage-5.5-cp38-cp38-manylinux2010_i686.whl", hash = "sha256:d1f8bf7b90ba55699b3a5e44930e93ff0189aa27186e96071fac7dd0d06a1873"}, + {file = "coverage-5.5-cp38-cp38-manylinux2010_x86_64.whl", hash = "sha256:970284a88b99673ccb2e4e334cfb38a10aab7cd44f7457564d11898a74b62d0a"}, + {file = "coverage-5.5-cp38-cp38-win32.whl", hash = "sha256:01d84219b5cdbfc8122223b39a954820929497a1cb1422824bb86b07b74594b6"}, + {file = "coverage-5.5-cp38-cp38-win_amd64.whl", hash = "sha256:2e0d881ad471768bf6e6c2bf905d183543f10098e3b3640fc029509530091502"}, + {file = "coverage-5.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d1f9ce122f83b2305592c11d64f181b87153fc2c2bbd3bb4a3dde8303cfb1a6b"}, + {file = "coverage-5.5-cp39-cp39-manylinux1_i686.whl", hash = "sha256:13c4ee887eca0f4c5a247b75398d4114c37882658300e153113dafb1d76de529"}, + {file = "coverage-5.5-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:52596d3d0e8bdf3af43db3e9ba8dcdaac724ba7b5ca3f6358529d56f7a166f8b"}, + {file = "coverage-5.5-cp39-cp39-manylinux2010_i686.whl", hash = "sha256:2cafbbb3af0733db200c9b5f798d18953b1a304d3f86a938367de1567f4b5bff"}, + {file = "coverage-5.5-cp39-cp39-manylinux2010_x86_64.whl", hash = "sha256:44d654437b8ddd9eee7d1eaee28b7219bec228520ff809af170488fd2fed3e2b"}, + {file = "coverage-5.5-cp39-cp39-win32.whl", hash = "sha256:d314ed732c25d29775e84a960c3c60808b682c08d86602ec2c3008e1202e3bb6"}, + {file = "coverage-5.5-cp39-cp39-win_amd64.whl", hash = "sha256:13034c4409db851670bc9acd836243aeee299949bd5673e11844befcb0149f03"}, + {file = "coverage-5.5-pp36-none-any.whl", hash = "sha256:f030f8873312a16414c0d8e1a1ddff2d3235655a2174e3648b4fa66b3f2f1079"}, + {file = "coverage-5.5-pp37-none-any.whl", hash = "sha256:2a3859cb82dcbda1cfd3e6f71c27081d18aa251d20a17d87d26d4cd216fb0af4"}, + {file = "coverage-5.5.tar.gz", hash = "sha256:ebe78fe9a0e874362175b02371bdfbee64d8edc42a044253ddf4ee7d3c15212c"}, +] + +[package.extras] +toml = ["toml"] + +[[package]] +name = "coveralls" +version = "3.2.0" +description = "Show coverage stats online via coveralls.io" +category = "dev" +optional = false +python-versions = ">= 3.5" +files = [ + {file = "coveralls-3.2.0-py2.py3-none-any.whl", hash = "sha256:aedfcc5296b788ebaf8ace8029376e5f102f67c53d1373f2e821515c15b36527"}, + {file = "coveralls-3.2.0.tar.gz", hash = "sha256:15a987d9df877fff44cd81948c5806ffb6eafb757b3443f737888358e96156ee"}, +] + +[package.dependencies] +coverage = ">=4.1,<6.0" +docopt = ">=0.6.1" +requests = ">=1.0.0" + +[package.extras] +yaml = ["PyYAML (>=3.10)"] + +[[package]] +name = "crashtest" +version = "0.4.1" +description = "Manage Python errors with ease" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "crashtest-0.4.1-py3-none-any.whl", hash = "sha256:8d23eac5fa660409f57472e3851dab7ac18aba459a8d19cbbba86d3d5aecd2a5"}, + {file = "crashtest-0.4.1.tar.gz", hash = "sha256:80d7b1f316ebfbd429f648076d6275c877ba30ba48979de4191714a75266f0ce"}, +] + +[[package]] +name = "cryptography" +version = "39.0.0" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "cryptography-39.0.0-cp36-abi3-macosx_10_12_universal2.whl", hash = "sha256:c52a1a6f81e738d07f43dab57831c29e57d21c81a942f4602fac7ee21b27f288"}, + {file = "cryptography-39.0.0-cp36-abi3-macosx_10_12_x86_64.whl", hash = "sha256:80ee674c08aaef194bc4627b7f2956e5ba7ef29c3cc3ca488cf15854838a8f72"}, + {file = "cryptography-39.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:887cbc1ea60786e534b00ba8b04d1095f4272d380ebd5f7a7eb4cc274710fad9"}, + {file = "cryptography-39.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f97109336df5c178ee7c9c711b264c502b905c2d2a29ace99ed761533a3460f"}, + {file = "cryptography-39.0.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a6915075c6d3a5e1215eab5d99bcec0da26036ff2102a1038401d6ef5bef25b"}, + {file = "cryptography-39.0.0-cp36-abi3-manylinux_2_24_x86_64.whl", hash = "sha256:76c24dd4fd196a80f9f2f5405a778a8ca132f16b10af113474005635fe7e066c"}, + {file = "cryptography-39.0.0-cp36-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:bae6c7f4a36a25291b619ad064a30a07110a805d08dc89984f4f441f6c1f3f96"}, + {file = "cryptography-39.0.0-cp36-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:875aea1039d78557c7c6b4db2fe0e9d2413439f4676310a5f269dd342ca7a717"}, + {file = "cryptography-39.0.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:f6c0db08d81ead9576c4d94bbb27aed8d7a430fa27890f39084c2d0e2ec6b0df"}, + {file = "cryptography-39.0.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f3ed2d864a2fa1666e749fe52fb8e23d8e06b8012e8bd8147c73797c506e86f1"}, + {file = "cryptography-39.0.0-cp36-abi3-win32.whl", hash = "sha256:f671c1bb0d6088e94d61d80c606d65baacc0d374e67bf895148883461cd848de"}, + {file = "cryptography-39.0.0-cp36-abi3-win_amd64.whl", hash = "sha256:e324de6972b151f99dc078defe8fb1b0a82c6498e37bff335f5bc6b1e3ab5a1e"}, + {file = "cryptography-39.0.0-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:754978da4d0457e7ca176f58c57b1f9de6556591c19b25b8bcce3c77d314f5eb"}, + {file = "cryptography-39.0.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ee1fd0de9851ff32dbbb9362a4d833b579b4a6cc96883e8e6d2ff2a6bc7104f"}, + {file = "cryptography-39.0.0-pp38-pypy38_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:fec8b932f51ae245121c4671b4bbc030880f363354b2f0e0bd1366017d891458"}, + {file = "cryptography-39.0.0-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:407cec680e811b4fc829de966f88a7c62a596faa250fc1a4b520a0355b9bc190"}, + {file = "cryptography-39.0.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7dacfdeee048814563eaaec7c4743c8aea529fe3dd53127313a792f0dadc1773"}, + {file = "cryptography-39.0.0-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:ad04f413436b0781f20c52a661660f1e23bcd89a0e9bb1d6d20822d048cf2856"}, + {file = "cryptography-39.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50386acb40fbabbceeb2986332f0287f50f29ccf1497bae31cf5c3e7b4f4b34f"}, + {file = "cryptography-39.0.0-pp39-pypy39_pp73-manylinux_2_24_x86_64.whl", hash = "sha256:e5d71c5d5bd5b5c3eebcf7c5c2bb332d62ec68921a8c593bea8c394911a005ce"}, + {file = "cryptography-39.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:844ad4d7c3850081dffba91cdd91950038ee4ac525c575509a42d3fc806b83c8"}, + {file = "cryptography-39.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e0a05aee6a82d944f9b4edd6a001178787d1546ec7c6223ee9a848a7ade92e39"}, + {file = "cryptography-39.0.0.tar.gz", hash = "sha256:f964c7dcf7802d133e8dbd1565914fa0194f9d683d82411989889ecd701e8adf"}, +] + +[package.dependencies] +cffi = ">=1.12" + +[package.extras] +docs = ["sphinx (>=1.6.5,!=1.8.0,!=3.1.0,!=3.1.1,!=5.2.0,!=5.2.0.post0)", "sphinx-rtd-theme"] +docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"] +pep8test = ["black", "ruff"] +sdist = ["setuptools-rust (>=0.11.4)"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["hypothesis (>=1.11.4,!=3.79.2)", "iso8601", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-subtests", "pytest-xdist", "pytz"] + +[[package]] +name = "cython" +version = "0.29.34" +description = "The Cython compiler for writing C extensions for the Python language." +category = "main" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "Cython-0.29.34-cp27-cp27m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:742544024ddb74314e2d597accdb747ed76bd126e61fcf49940a5b5be0a8f381"}, + {file = "Cython-0.29.34-cp27-cp27m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:03daae07f8cbf797506446adae512c3dd86e7f27a62a541fa1ee254baf43e32c"}, + {file = "Cython-0.29.34-cp27-cp27mu-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5a8de3e793a576e40ca9b4f5518610cd416273c7dc5e254115656b6e4ec70663"}, + {file = "Cython-0.29.34-cp27-cp27mu-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:60969d38e6a456a67e7ef8ae20668eff54e32ba439d4068ccf2854a44275a30f"}, + {file = "Cython-0.29.34-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:21b88200620d80cfe193d199b259cdad2b9af56f916f0f7f474b5a3631ca0caa"}, + {file = "Cython-0.29.34-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:308c8f1e58bf5e6e8a1c4dcf8abbd2d13d0f9b1e582f4d9ae8b89857342d8bb5"}, + {file = "Cython-0.29.34-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:d8f822fb6ecd5d88c42136561f82960612421154fc5bf23c57103a367bb91356"}, + {file = "Cython-0.29.34-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56866323f1660cecb4d5ff3a1fba92a56b91b7cfae0a8253777aa4bdb3bdf9a8"}, + {file = "Cython-0.29.34-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:e971db8aeb12e7c0697cefafe65eefcc33ff1224ae3d8c7f83346cbc42c6c270"}, + {file = "Cython-0.29.34-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:e4401270b0dc464c23671e2e9d52a60985f988318febaf51b047190e855bbe7d"}, + {file = "Cython-0.29.34-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:dce0a36d163c05ae8b21200059511217d79b47baf2b7b0f926e8367bd7a3cc24"}, + {file = "Cython-0.29.34-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dbd79221869ee9a6ccc4953b2c8838bb6ae08ab4d50ea4b60d7894f03739417b"}, + {file = "Cython-0.29.34-cp35-cp35m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a0f4229df10bc4545ebbeaaf96ebb706011d8b333e54ed202beb03f2bee0a50e"}, + {file = "Cython-0.29.34-cp35-cp35m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:fd1ea21f1cebf33ae288caa0f3e9b5563a709f4df8925d53bad99be693fc0d9b"}, + {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:d7ef5f68f4c5baa93349ea54a352f8716d18bee9a37f3e93eff38a5d4e9b7262"}, + {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:459994d1de0f99bb18fad9f2325f760c4b392b1324aef37bcc1cd94922dfce41"}, + {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:1d6c809e2f9ce5950bbc52a1d2352ef3d4fc56186b64cb0d50c8c5a3c1d17661"}, + {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f674ceb5f722d364395f180fbac273072fc1a266aab924acc9cfd5afc645aae1"}, + {file = "Cython-0.29.34-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:9489de5b2044dcdfd9d6ca8242a02d560137b3c41b1f5ae1c4f6707d66d6e44d"}, + {file = "Cython-0.29.34-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:5c121dc185040f4333bfded68963b4529698e1b6d994da56be32c97a90c896b6"}, + {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:b6149f7cc5b31bccb158c5b968e5a8d374fdc629792e7b928a9b66e08b03fca5"}, + {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0ab3cbf3d62b0354631a45dc93cfcdf79098663b1c65a6033af4a452b52217a7"}, + {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:4a2723447d1334484681d5aede34184f2da66317891f94b80e693a2f96a8f1a7"}, + {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e40cf86aadc29ecd1cb6de67b0d9488705865deea4fc185c7ad56d7a6fc78703"}, + {file = "Cython-0.29.34-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:8c3cd8bb8e880a3346f5685601004d96e0a2221e73edcaeea57ea848618b4ac6"}, + {file = "Cython-0.29.34-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0e9032cd650b0cb1d2c2ef2623f5714c14d14c28d7647d589c3eeed0baf7428e"}, + {file = "Cython-0.29.34-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:bdb3285660e3068438791ace7dd7b1efd6b442a10b5c8d7a4f0c9d184d08c8ed"}, + {file = "Cython-0.29.34-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:a8ad755f9364e720f10a36734a1c7a5ced5c679446718b589259261438a517c9"}, + {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:7595d29eaee95633dd8060f50f0e54b27472d01587659557ebcfe39da3ea946b"}, + {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e6ef7879668214d80ea3914c17e7d4e1ebf4242e0dd4dabe95ca5ccbe75589a5"}, + {file = "Cython-0.29.34-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:ccb223b5f0fd95d8d27561efc0c14502c0945f1a32274835831efa5d5baddfc1"}, + {file = "Cython-0.29.34-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:11b1b278b8edef215caaa5250ad65a10023bfa0b5a93c776552248fc6f60098d"}, + {file = "Cython-0.29.34-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:5718319a01489688fdd22ddebb8e2fcbbd60be5f30de4336ea7063c3ae29fbe5"}, + {file = "Cython-0.29.34-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:cfb2302ef617d647ee590a4c0a00ba3c2da05f301dcefe7721125565d2e51351"}, + {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_24_i686.whl", hash = "sha256:67b850cf46b861bc27226d31e1d87c0e69869a02f8d3cc5d5bef549764029879"}, + {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:0963266dad685812c1dbb758fcd4de78290e3adc7db271c8664dcde27380b13e"}, + {file = "Cython-0.29.34-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7879992487d9060a61393eeefe00d299210256928dce44d887b6be313d342bac"}, + {file = "Cython-0.29.34-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:44733366f1604b0c327613b6918469284878d2f5084297d10d26072fc6948d51"}, + {file = "Cython-0.29.34-py2.py3-none-any.whl", hash = "sha256:be4f6b7be75a201c290c8611c0978549c60353890204573078e865423dbe3c83"}, + {file = "Cython-0.29.34.tar.gz", hash = "sha256:1909688f5d7b521a60c396d20bba9e47a1b2d2784bfb085401e1e1e7d29a29a8"}, +] + +[[package]] +name = "dacite" +version = "1.7.0" +description = "Simple creation of data classes from dictionaries." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "dacite-1.7.0-py3-none-any.whl", hash = "sha256:1cf5f7f548aecaa80c800441f8b9f4d656f7601ccc06c9c8c96ee40016a021af"}, + {file = "dacite-1.7.0.tar.gz", hash = "sha256:683a1d72c8fa0c0341034ddabc7a5c6f6ddc188033a26784417a9429dd3ffa51"}, +] + +[package.extras] +dev = ["black", "coveralls", "mypy", "pylint", "pytest (>=5)", "pytest-cov"] + +[[package]] +name = "distlib" +version = "0.3.6" +description = "Distribution utilities" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "distlib-0.3.6-py2.py3-none-any.whl", hash = "sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e"}, + {file = "distlib-0.3.6.tar.gz", hash = "sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46"}, +] + +[[package]] +name = "docopt" +version = "0.6.2" +description = "Pythonic argument parser, that will make you smile" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "docopt-0.6.2.tar.gz", hash = "sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"}, +] + +[[package]] +name = "dulwich" +version = "0.20.50" +description = "Python Git Library" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "dulwich-0.20.50-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:97f02f8d500d4af08dc022d697c56e8539171acc3f575c2fe9acf3b078e5c8c9"}, + {file = "dulwich-0.20.50-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7301773e5cc16d521bc6490e73772a86a4d1d0263de506f08b54678cc4e2f061"}, + {file = "dulwich-0.20.50-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b70106580ed11f45f4c32d2831d0c9c9f359bc2415fff4a6be443e3a36811398"}, + {file = "dulwich-0.20.50-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0f9c4f2455f966cad94648278fa9972e4695b35d04f82792fa58e1ea15dd83f0"}, + {file = "dulwich-0.20.50-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9163fbb021a8ad9c35a0814a5eedf45a8eb3a0b764b865d7016d901fc5a947fc"}, + {file = "dulwich-0.20.50-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:322ff8ff6aa4d6d36294cd36de1c84767eb1903c7db3e7b4475ad091febf5363"}, + {file = "dulwich-0.20.50-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5d3290a45651c8e534f8e83ae2e30322aefdd162f0f338bae2e79a6ee5a87513"}, + {file = "dulwich-0.20.50-cp310-cp310-win32.whl", hash = "sha256:80ab07131a6e68594441f5c4767e9e44e87fceafc3e347e541c928a18c679bd8"}, + {file = "dulwich-0.20.50-cp310-cp310-win_amd64.whl", hash = "sha256:eefe786a6010f8546baac4912113eeed4e397ddb8c433a345b548a04d4176496"}, + {file = "dulwich-0.20.50-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:df3562dde3079d57287c233d45b790bc967c5aae975c9a7b07ca30e60e055512"}, + {file = "dulwich-0.20.50-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:e1ae18d5805f0c0c5dac65795f8d48660437166b12ee2c0ffea95bfdbf9c1051"}, + {file = "dulwich-0.20.50-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d2f7df39bd1378d3b0bfb3e7fc930fd0191924af1f0ef587bcd9946afe076c06"}, + {file = "dulwich-0.20.50-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:731e7f319b34251fadeb362ada1d52cc932369d9cdfa25c0e41150cda28773d0"}, + {file = "dulwich-0.20.50-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b4d11d44176e5d2fa8271fc86ad1e0a8731b9ad8f77df64c12846b30e16135eb"}, + {file = "dulwich-0.20.50-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:7aaabb8e4beadd53f75f853a981caaadef3ef130e5645c902705704eaf136daa"}, + {file = "dulwich-0.20.50-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c3dc9f97ec8d3db08d9723b9fd06f3e52c15b84c800d153cfb59b0a3dc8b8d40"}, + {file = "dulwich-0.20.50-cp311-cp311-win32.whl", hash = "sha256:3b1964fa80cafd5a1fd71615b0313daf6f3295c6ab05656ea0c1d2423539904a"}, + {file = "dulwich-0.20.50-cp311-cp311-win_amd64.whl", hash = "sha256:a24a3893108f3b97beb958670d5f3f2a3bec73a1fe18637a572a85abd949a1c4"}, + {file = "dulwich-0.20.50-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:6d409a282f8848fd6c8d7c7545ad2f75c16de5d5977de202642f1d50fdaac554"}, + {file = "dulwich-0.20.50-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5411d0f1092152e1c0bb916ae490fe181953ae1b8d13f4e68661253e10b78dbb"}, + {file = "dulwich-0.20.50-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6343569f998ce429e2a5d813c56768ac51b496522401db950f0aa44240bfa901"}, + {file = "dulwich-0.20.50-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a405cd236766060894411614a272cfb86fe86cde5ca73ef264fc4fa5a715fff4"}, + {file = "dulwich-0.20.50-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:ee0f9b02019c0ea84cdd31c00a0c283669b771c85612997a911715cf84e33d99"}, + {file = "dulwich-0.20.50-cp36-cp36m-win32.whl", hash = "sha256:2644466270267270f2157ea6f1c0aa224f6f3bf06a307fc39954e6b4b3d82bae"}, + {file = "dulwich-0.20.50-cp36-cp36m-win_amd64.whl", hash = "sha256:d4629635a97e3af1b5da48071e00c8e70fad85f3266fadabe1f5a8f49172c507"}, + {file = "dulwich-0.20.50-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0e4862f318d99cc8a500e3622a89613a88c07d957a0f628cdc2ed86addff790f"}, + {file = "dulwich-0.20.50-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c96e3fb9d48c0454dc242c7accc7819780c9a7f29e441a9eff12361ed0fa35f9"}, + {file = "dulwich-0.20.50-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cc6092a4f0bbbff2e553e87a9c6325955b64ea43fca21297c8182e19ae8a43c"}, + {file = "dulwich-0.20.50-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:519b627d49d273e2fd01c79d09e578675ca6cd05193c1787e9ef165c9a1d66ea"}, + {file = "dulwich-0.20.50-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:6a75cab01b909c4c683c2083e060e378bc01701b7366b5a7d9846ef6d3b9e3d5"}, + {file = "dulwich-0.20.50-cp37-cp37m-win32.whl", hash = "sha256:ea8ffe26d91dbcd5580dbd5a07270a12ea57b091604d77184da0a0d9fad50ed3"}, + {file = "dulwich-0.20.50-cp37-cp37m-win_amd64.whl", hash = "sha256:8f3af857f94021cae1322d86925bfc0dd31e501e885ab5db275473bfac0bb39d"}, + {file = "dulwich-0.20.50-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:3fb35cedb1243bc420d885ef5b4afd642c6ac8f07ddfc7fdbca1becf9948bf7e"}, + {file = "dulwich-0.20.50-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4bb23a9cec63e16c0e432335f068169b73dd44fa9318dd7cd7a4ca83607ff367"}, + {file = "dulwich-0.20.50-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5267619b34ddaf8d9a6b841492cd17a971fd25bf9a5657f2de928385c3a08b94"}, + {file = "dulwich-0.20.50-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9091f1d53a3c0747cbf0bd127c64e7f09b770264d8fb53e284383fcdf69154e7"}, + {file = "dulwich-0.20.50-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6ec7c8fea2b44187a3b545e6c11ab9947ffb122647b07abcdb7cc3aaa770c0e"}, + {file = "dulwich-0.20.50-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:11b180b80363b4fc70664197028181a17ae4c52df9965a29b62a6c52e40c2dbe"}, + {file = "dulwich-0.20.50-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:c83e7840d9d0a94d7033bc109efe0c22dfcdcd816bcd4469085e42809e3bf5ba"}, + {file = "dulwich-0.20.50-cp38-cp38-win32.whl", hash = "sha256:c075f69c2de19d9fd97e3b70832d2b42c6a4a5d909b3ffd1963b67d86029f95f"}, + {file = "dulwich-0.20.50-cp38-cp38-win_amd64.whl", hash = "sha256:06775c5713cfeda778c7c67d4422b5e7554d3a7f644f1dde646cdf486a30285a"}, + {file = "dulwich-0.20.50-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:49f66f1c057c18d7d60363f461f4ab8329320fbe1f02a7a33c255864a7d3c942"}, + {file = "dulwich-0.20.50-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4e541cd690a5e3d55082ed51732d755917e933cddeb4b0204f2a5ec5d5d7b60b"}, + {file = "dulwich-0.20.50-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:80e8750ee2fa0ab2784a095956077758e5f6107de27f637c4b9d18406652c22c"}, + {file = "dulwich-0.20.50-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fbb6368f18451dc44c95c55e1a609d1a01d3821f7ed480b22b2aea1baca0f4a7"}, + {file = "dulwich-0.20.50-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3ee45001411b638641819b7b3b33f31f13467c84066e432256580fcab7d8815"}, + {file = "dulwich-0.20.50-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4842e22ed863a776b36ef8ffe9ed7b772eb452b42c8d02975c29d27e3bc50ab4"}, + {file = "dulwich-0.20.50-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:790e4a641284a7fb4d56ebdaf8b324a5826fbbb9c54307c06f586f9f6a5e56db"}, + {file = "dulwich-0.20.50-cp39-cp39-win32.whl", hash = "sha256:f08406b6b789dea5c95ba1130a0801d8748a67f18be940fe7486a8b481fde875"}, + {file = "dulwich-0.20.50-cp39-cp39-win_amd64.whl", hash = "sha256:78c388ad421199000fb7b5ed5f0c7b509b3e31bd7cad303786a4d0bf89b82f60"}, + {file = "dulwich-0.20.50-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:cb194c53109131bcbcd1ca430fcd437cdaf2d33e204e45fbe121c47eaa43e9af"}, + {file = "dulwich-0.20.50-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7542a72c5640dd0620862d6df8688f02a6c336359b5af9b3fcfe11b7fa6652f"}, + {file = "dulwich-0.20.50-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4aa1d0861517ebbbe0e0084cc9ab4f7ab720624a3eda2bd10e45f774ab858db8"}, + {file = "dulwich-0.20.50-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:583c6bbc27f13fe2e41a19f6987a42681c6e4f6959beae0a6e5bb033b8b081a8"}, + {file = "dulwich-0.20.50-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0c61c193d02c0e1e0d758cdd57ae76685c368d09a01f00d704ba88bd96767cfe"}, + {file = "dulwich-0.20.50-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2edbff3053251985f10702adfafbee118298d383ef5b5b432a5f22d1f1915df"}, + {file = "dulwich-0.20.50-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a344230cadfc5d315752add6ce9d4cfcfc6c85e36bbf57fce9444bcc7c6ea8fb"}, + {file = "dulwich-0.20.50-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:57bff9bde0b6b05b00c6acbb1a94357caddb2908ed7026a48c715ff50d220335"}, + {file = "dulwich-0.20.50-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e29a3c2037761fa816aa556e78364dfc8e3f44b873db2d17aed96f9b06ac83a3"}, + {file = "dulwich-0.20.50-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2aa2a4a84029625bf9c63771f8a628db1f3be2d2ea3cb8b17942cd4317797152"}, + {file = "dulwich-0.20.50-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd9fa00971ecf059bb358085a942ecac5be4ff71acdf299f44c8cbc45c18659f"}, + {file = "dulwich-0.20.50-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:af4adac92fb95671ea3a24f2f8e5e5e8f638711ce9c33a3ca6cd68bf1ff7d99f"}, + {file = "dulwich-0.20.50.tar.gz", hash = "sha256:50a941796b2c675be39be728d540c16b5b7ce77eb9e1b3f855650ece6832d2be"}, +] + +[package.dependencies] +urllib3 = ">=1.25" + +[package.extras] +fastimport = ["fastimport"] +https = ["urllib3 (>=1.24.1)"] +paramiko = ["paramiko"] +pgp = ["gpg"] + +[[package]] +name = "fastapi" +version = "0.70.0" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +category = "main" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "fastapi-0.70.0-py3-none-any.whl", hash = "sha256:a36d5f2fad931aa3575c07a3472c784e81f3e664e3bb5c8b9c88d0ec1104f59c"}, + {file = "fastapi-0.70.0.tar.gz", hash = "sha256:66da43cfe5185ea1df99552acffd201f1832c6b364e0f4136c0a99f933466ced"}, +] + +[package.dependencies] +pydantic = ">=1.6.2,<1.7 || >1.7,<1.7.1 || >1.7.1,<1.7.2 || >1.7.2,<1.7.3 || >1.7.3,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0" +starlette = "0.16.0" + +[package.extras] +all = ["email_validator (>=1.1.1,<2.0.0)", "itsdangerous (>=1.1.0,<3.0.0)", "jinja2 (>=2.11.2,<4.0.0)", "orjson (>=3.2.1,<4.0.0)", "python-multipart (>=0.0.5,<0.0.6)", "pyyaml (>=5.3.1,<6.0.0)", "requests (>=2.24.0,<3.0.0)", "ujson (>=4.0.1,<5.0.0)", "uvicorn[standard] (>=0.12.0,<0.16.0)"] +dev = ["autoflake (>=1.4.0,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "uvicorn[standard] (>=0.12.0,<0.16.0)"] +doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=7.1.9,<8.0.0)", "pyyaml (>=5.3.1,<6.0.0)", "typer-cli (>=0.0.12,<0.0.13)"] +test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==21.9b0)", "databases[sqlite] (>=0.3.2,<0.6.0)", "email_validator (>=1.1.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.14.0,<0.19.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "orjson (>=3.2.1,<4.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=6.2.4,<7.0.0)", "pytest-cov (>=2.12.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.6)", "requests (>=2.24.0,<3.0.0)", "sqlalchemy (>=1.3.18,<1.5.0)", "types-dataclasses (==0.1.7)", "types-orjson (==3.6.0)", "types-ujson (==0.1.1)", "ujson (>=4.0.1,<5.0.0)"] + +[[package]] +name = "filelock" +version = "3.8.2" +description = "A platform independent file lock." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "filelock-3.8.2-py3-none-any.whl", hash = "sha256:8df285554452285f79c035efb0c861eb33a4bcfa5b7a137016e32e6a90f9792c"}, + {file = "filelock-3.8.2.tar.gz", hash = "sha256:7565f628ea56bfcd8e54e42bdc55da899c85c1abfe1b5bcfd147e9188cebb3b2"}, +] + +[package.extras] +docs = ["furo (>=2022.9.29)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] +testing = ["covdefaults (>=2.2.2)", "coverage (>=6.5)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-timeout (>=2.1)"] + +[[package]] +name = "flake8" +version = "6.0.0" +description = "the modular source code checker: pep8 pyflakes and co" +category = "dev" +optional = false +python-versions = ">=3.8.1" +files = [ + {file = "flake8-6.0.0-py2.py3-none-any.whl", hash = "sha256:3833794e27ff64ea4e9cf5d410082a8b97ff1a06c16aa3d2027339cd0f1195c7"}, + {file = "flake8-6.0.0.tar.gz", hash = "sha256:c61007e76655af75e6785a931f452915b371dc48f56efd765247c8fe68f2b181"}, +] + +[package.dependencies] +mccabe = ">=0.7.0,<0.8.0" +pycodestyle = ">=2.10.0,<2.11.0" +pyflakes = ">=3.0.0,<3.1.0" + +[[package]] +name = "flake8-bugbear" +version = "23.1.20" +description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "flake8-bugbear-23.1.20.tar.gz", hash = "sha256:55902ab5a48c5ea53d8689ecd146eda548e72f2724192b9c1d68f6d975d13c06"}, + {file = "flake8_bugbear-23.1.20-py3-none-any.whl", hash = "sha256:04a115e5f9c8e87c38bdbbcdf9f58223ffe05469c07c9a7bd8633330bc4d078b"}, +] + +[package.dependencies] +attrs = ">=19.2.0" +flake8 = ">=3.0.0" + +[package.extras] +dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "pytest", "tox"] + +[[package]] +name = "future" +version = "0.18.2" +description = "Clean single-source support for Python 3 and 2" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "future-0.18.2.tar.gz", hash = "sha256:b1bead90b70cf6ec3f0710ae53a525360fa360d306a86583adc6bf83a4db537d"}, +] + +[[package]] +name = "gitdb" +version = "4.0.10" +description = "Git Object Database" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "gitdb-4.0.10-py3-none-any.whl", hash = "sha256:c286cf298426064079ed96a9e4a9d39e7f3e9bf15ba60701e95f5492f28415c7"}, + {file = "gitdb-4.0.10.tar.gz", hash = "sha256:6eb990b69df4e15bad899ea868dc46572c3f75339735663b81de79b06f17eb9a"}, +] + +[package.dependencies] +smmap = ">=3.0.1,<6" + +[[package]] +name = "gitpython" +version = "3.1.29" +description = "GitPython is a python library used to interact with Git repositories" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "GitPython-3.1.29-py3-none-any.whl", hash = "sha256:41eea0deec2deea139b459ac03656f0dd28fc4a3387240ec1d3c259a2c47850f"}, + {file = "GitPython-3.1.29.tar.gz", hash = "sha256:cc36bfc4a3f913e66805a28e84703e419d9c264c1077e537b54f0e1af85dbefd"}, +] + +[package.dependencies] +gitdb = ">=4.0.1,<5" + +[[package]] +name = "h11" +version = "0.14.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"}, + {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, +] + +[[package]] +name = "html5lib" +version = "1.1" +description = "HTML parser based on the WHATWG HTML specification" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d"}, + {file = "html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f"}, +] + +[package.dependencies] +six = ">=1.9" +webencodings = "*" + +[package.extras] +all = ["chardet (>=2.2)", "genshi", "lxml"] +chardet = ["chardet (>=2.2)"] +genshi = ["genshi"] +lxml = ["lxml"] + +[[package]] +name = "identify" +version = "2.5.11" +description = "File identification library for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "identify-2.5.11-py2.py3-none-any.whl", hash = "sha256:e7db36b772b188099616aaf2accbee122949d1c6a1bac4f38196720d6f9f06db"}, + {file = "identify-2.5.11.tar.gz", hash = "sha256:14b7076b29c99b1b0b8b08e96d448c7b877a9b07683cd8cfda2ea06af85ffa1c"}, +] + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "main" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "importlib-metadata" +version = "4.13.0" +description = "Read metadata from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-4.13.0-py3-none-any.whl", hash = "sha256:8a8a81bcf996e74fee46f0d16bd3eaa382a7eb20fd82445c3ad11f4090334116"}, + {file = "importlib_metadata-4.13.0.tar.gz", hash = "sha256:dd0173e8f150d6815e098fd354f6414b0f079af4644ddfe90c71e2fc6174346d"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +perf = ["ipython"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] + +[[package]] +name = "iniconfig" +version = "1.1.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, + {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, +] + +[[package]] +name = "isort" +version = "5.12.0" +description = "A Python utility / library to sort Python imports." +category = "dev" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"}, + {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"}, +] + +[package.extras] +colors = ["colorama (>=0.4.3)"] +pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"] +plugins = ["setuptools"] +requirements-deprecated-finder = ["pip-api", "pipreqs"] + +[[package]] +name = "jaraco-classes" +version = "3.2.3" +description = "Utility functions for Python class constructs" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jaraco.classes-3.2.3-py3-none-any.whl", hash = "sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158"}, + {file = "jaraco.classes-3.2.3.tar.gz", hash = "sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +docs = ["jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "jeepney" +version = "0.8.0" +description = "Low-level, pure Python DBus protocol wrapper." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, + {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, +] + +[package.extras] +test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] +trio = ["async_generator", "trio"] + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "jsonschema" +version = "4.17.3" +description = "An implementation of JSON Schema validation for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, + {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, +] + +[package.dependencies] +attrs = ">=17.4.0" +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "keyring" +version = "23.13.1" +description = "Store and access your passwords safely." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "keyring-23.13.1-py3-none-any.whl", hash = "sha256:771ed2a91909389ed6148631de678f82ddc73737d85a927f382a8a1b157898cd"}, + {file = "keyring-23.13.1.tar.gz", hash = "sha256:ba2e15a9b35e21908d0aaf4e0a47acc52d6ae33444df0da2b49d41a46ef6d678"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} +"jaraco.classes" = "*" +jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} +pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} +SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} + +[package.extras] +completion = ["shtab"] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "lockfile" +version = "0.12.2" +description = "Platform-independent file locking module" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "lockfile-0.12.2-py2.py3-none-any.whl", hash = "sha256:6c3cb24f344923d30b2785d5ad75182c8ea7ac1b6171b08657258ec7429d50fa"}, + {file = "lockfile-0.12.2.tar.gz", hash = "sha256:6aed02de03cba24efabcd600b30540140634fc06cfa603822d508d5361e9f799"}, +] + +[[package]] +name = "macholib" +version = "1.16.2" +description = "Mach-O header analysis and editing" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "macholib-1.16.2-py2.py3-none-any.whl", hash = "sha256:44c40f2cd7d6726af8fa6fe22549178d3a4dfecc35a9cd15ea916d9c83a688e0"}, + {file = "macholib-1.16.2.tar.gz", hash = "sha256:557bbfa1bb255c20e9abafe7ed6cd8046b48d9525db2f9b77d3122a63a2a8bf8"}, +] + +[package.dependencies] +altgraph = ">=0.17" + +[[package]] +name = "markupsafe" +version = "2.1.2" +description = "Safely add untrusted strings to HTML/XML markup." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:665a36ae6f8f20a4676b53224e33d456a6f5a72657d9c83c2aa00765072f31f7"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:340bea174e9761308703ae988e982005aedf427de816d1afe98147668cc03036"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22152d00bf4a9c7c83960521fc558f55a1adbc0631fbb00a9471e097b19d72e1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:28057e985dace2f478e042eaa15606c7efccb700797660629da387eb289b9323"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca244fa73f50a800cf8c3ebf7fd93149ec37f5cb9596aa8873ae2c1d23498601"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d9d971ec1e79906046aa3ca266de79eac42f1dbf3612a05dc9368125952bd1a1"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7e007132af78ea9df29495dbf7b5824cb71648d7133cf7848a2a5dd00d36f9ff"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7313ce6a199651c4ed9d7e4cfb4aa56fe923b1adf9af3b420ee14e6d9a73df65"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win32.whl", hash = "sha256:c4a549890a45f57f1ebf99c067a4ad0cb423a05544accaf2b065246827ed9603"}, + {file = "MarkupSafe-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:835fb5e38fd89328e9c81067fd642b3593c33e1e17e2fdbf77f5676abb14a156"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:2ec4f2d48ae59bbb9d1f9d7efb9236ab81429a764dedca114f5fdabbc3788013"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:608e7073dfa9e38a85d38474c082d4281f4ce276ac0010224eaba11e929dd53a"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:65608c35bfb8a76763f37036547f7adfd09270fbdbf96608be2bead319728fcd"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2bfb563d0211ce16b63c7cb9395d2c682a23187f54c3d79bfec33e6705473c6"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:da25303d91526aac3672ee6d49a2f3db2d9502a4a60b55519feb1a4c7714e07d"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:9cad97ab29dfc3f0249b483412c85c8ef4766d96cdf9dcf5a1e3caa3f3661cf1"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:085fd3201e7b12809f9e6e9bc1e5c96a368c8523fad5afb02afe3c051ae4afcc"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1bea30e9bf331f3fef67e0a3877b2288593c98a21ccb2cf29b74c581a4eb3af0"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win32.whl", hash = "sha256:7df70907e00c970c60b9ef2938d894a9381f38e6b9db73c5be35e59d92e06625"}, + {file = "MarkupSafe-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:e55e40ff0cc8cc5c07996915ad367fa47da6b3fc091fdadca7f5403239c5fec3"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a6e40afa7f45939ca356f348c8e23048e02cb109ced1eb8420961b2f40fb373a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf877ab4ed6e302ec1d04952ca358b381a882fbd9d1b07cccbfd61783561f98a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:63ba06c9941e46fa389d389644e2d8225e0e3e5ebcc4ff1ea8506dce646f8c8a"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f1cd098434e83e656abf198f103a8207a8187c0fc110306691a2e94a78d0abb2"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:55f44b440d491028addb3b88f72207d71eeebfb7b5dbf0643f7c023ae1fba619"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:a6f2fcca746e8d5910e18782f976489939d54a91f9411c32051b4aab2bd7c513"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:0b462104ba25f1ac006fdab8b6a01ebbfbce9ed37fd37fd4acd70c67c973e460"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win32.whl", hash = "sha256:7668b52e102d0ed87cb082380a7e2e1e78737ddecdde129acadb0eccc5423859"}, + {file = "MarkupSafe-2.1.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6d6607f98fcf17e534162f0709aaad3ab7a96032723d8ac8750ffe17ae5a0666"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a806db027852538d2ad7555b203300173dd1b77ba116de92da9afbc3a3be3eed"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a4abaec6ca3ad8660690236d11bfe28dfd707778e2442b45addd2f086d6ef094"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f03a532d7dee1bed20bc4884194a16160a2de9ffc6354b3878ec9682bb623c54"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cf06cdc1dda95223e9d2d3c58d3b178aa5dacb35ee7e3bbac10e4e1faacb419"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:22731d79ed2eb25059ae3df1dfc9cb1546691cc41f4e3130fe6bfbc3ecbbecfa"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f8ffb705ffcf5ddd0e80b65ddf7bed7ee4f5a441ea7d3419e861a12eaf41af58"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:8db032bf0ce9022a8e41a22598eefc802314e81b879ae093f36ce9ddf39ab1ba"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2298c859cfc5463f1b64bd55cb3e602528db6fa0f3cfd568d3605c50678f8f03"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win32.whl", hash = "sha256:50c42830a633fa0cf9e7d27664637532791bfc31c731a87b202d2d8ac40c3ea2"}, + {file = "MarkupSafe-2.1.2-cp38-cp38-win_amd64.whl", hash = "sha256:bb06feb762bade6bf3c8b844462274db0c76acc95c52abe8dbed28ae3d44a147"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:99625a92da8229df6d44335e6fcc558a5037dd0a760e11d84be2260e6f37002f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8bca7e26c1dd751236cfb0c6c72d4ad61d986e9a41bbf76cb445f69488b2a2bd"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40627dcf047dadb22cd25ea7ecfe9cbf3bbbad0482ee5920b582f3809c97654f"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40dfd3fefbef579ee058f139733ac336312663c6706d1163b82b3003fb1925c4"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:090376d812fb6ac5f171e5938e82e7f2d7adc2b629101cec0db8b267815c85e2"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2e7821bffe00aa6bd07a23913b7f4e01328c3d5cc0b40b36c0bd81d362faeb65"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c0a33bc9f02c2b17c3ea382f91b4db0e6cde90b63b296422a939886a7a80de1c"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b8526c6d437855442cdd3d87eede9c425c4445ea011ca38d937db299382e6fa3"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win32.whl", hash = "sha256:137678c63c977754abe9086a3ec011e8fd985ab90631145dfb9294ad09c102a7"}, + {file = "MarkupSafe-2.1.2-cp39-cp39-win_amd64.whl", hash = "sha256:0576fe974b40a400449768941d5d0858cc624e3249dfd1e0c33674e5c7ca7aed"}, + {file = "MarkupSafe-2.1.2.tar.gz", hash = "sha256:abcabc8c2b26036d62d4c746381a6f7cf60aafcc653198ad678306986b09450d"}, +] + +[[package]] +name = "mccabe" +version = "0.7.0" +description = "McCabe checker, plugin for flake8" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, + {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, +] + +[[package]] +name = "more-itertools" +version = "9.0.0" +description = "More routines for operating on iterables, beyond itertools" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "more-itertools-9.0.0.tar.gz", hash = "sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab"}, + {file = "more_itertools-9.0.0-py3-none-any.whl", hash = "sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41"}, +] + +[[package]] +name = "msgpack" +version = "1.0.4" +description = "MessagePack serializer" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4ab251d229d10498e9a2f3b1e68ef64cb393394ec477e3370c457f9430ce9250"}, + {file = "msgpack-1.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:112b0f93202d7c0fef0b7810d465fde23c746a2d482e1e2de2aafd2ce1492c88"}, + {file = "msgpack-1.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:002b5c72b6cd9b4bafd790f364b8480e859b4712e91f43014fe01e4f957b8467"}, + {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:35bc0faa494b0f1d851fd29129b2575b2e26d41d177caacd4206d81502d4c6a6"}, + {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4733359808c56d5d7756628736061c432ded018e7a1dff2d35a02439043321aa"}, + {file = "msgpack-1.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb514ad14edf07a1dbe63761fd30f89ae79b42625731e1ccf5e1f1092950eaa6"}, + {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c23080fdeec4716aede32b4e0ef7e213c7b1093eede9ee010949f2a418ced6ba"}, + {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:49565b0e3d7896d9ea71d9095df15b7f75a035c49be733051c34762ca95bbf7e"}, + {file = "msgpack-1.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:aca0f1644d6b5a73eb3e74d4d64d5d8c6c3d577e753a04c9e9c87d07692c58db"}, + {file = "msgpack-1.0.4-cp310-cp310-win32.whl", hash = "sha256:0dfe3947db5fb9ce52aaea6ca28112a170db9eae75adf9339a1aec434dc954ef"}, + {file = "msgpack-1.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dea20515f660aa6b7e964433b1808d098dcfcabbebeaaad240d11f909298075"}, + {file = "msgpack-1.0.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e83f80a7fec1a62cf4e6c9a660e39c7f878f603737a0cdac8c13131d11d97f52"}, + {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c11a48cf5e59026ad7cb0dc29e29a01b5a66a3e333dc11c04f7e991fc5510a9"}, + {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1276e8f34e139aeff1c77a3cefb295598b504ac5314d32c8c3d54d24fadb94c9"}, + {file = "msgpack-1.0.4-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6c9566f2c39ccced0a38d37c26cc3570983b97833c365a6044edef3574a00c08"}, + {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:fcb8a47f43acc113e24e910399376f7277cf8508b27e5b88499f053de6b115a8"}, + {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:76ee788122de3a68a02ed6f3a16bbcd97bc7c2e39bd4d94be2f1821e7c4a64e6"}, + {file = "msgpack-1.0.4-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:0a68d3ac0104e2d3510de90a1091720157c319ceeb90d74f7b5295a6bee51bae"}, + {file = "msgpack-1.0.4-cp36-cp36m-win32.whl", hash = "sha256:85f279d88d8e833ec015650fd15ae5eddce0791e1e8a59165318f371158efec6"}, + {file = "msgpack-1.0.4-cp36-cp36m-win_amd64.whl", hash = "sha256:c1683841cd4fa45ac427c18854c3ec3cd9b681694caf5bff04edb9387602d661"}, + {file = "msgpack-1.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a75dfb03f8b06f4ab093dafe3ddcc2d633259e6c3f74bb1b01996f5d8aa5868c"}, + {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9667bdfdf523c40d2511f0e98a6c9d3603be6b371ae9a238b7ef2dc4e7a427b0"}, + {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11184bc7e56fd74c00ead4f9cc9a3091d62ecb96e97653add7a879a14b003227"}, + {file = "msgpack-1.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ac5bd7901487c4a1dd51a8c58f2632b15d838d07ceedaa5e4c080f7190925bff"}, + {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:1e91d641d2bfe91ba4c52039adc5bccf27c335356055825c7f88742c8bb900dd"}, + {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:2a2df1b55a78eb5f5b7d2a4bb221cd8363913830145fad05374a80bf0877cb1e"}, + {file = "msgpack-1.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:545e3cf0cf74f3e48b470f68ed19551ae6f9722814ea969305794645da091236"}, + {file = "msgpack-1.0.4-cp37-cp37m-win32.whl", hash = "sha256:2cc5ca2712ac0003bcb625c96368fd08a0f86bbc1a5578802512d87bc592fe44"}, + {file = "msgpack-1.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:eba96145051ccec0ec86611fe9cf693ce55f2a3ce89c06ed307de0e085730ec1"}, + {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:7760f85956c415578c17edb39eed99f9181a48375b0d4a94076d84148cf67b2d"}, + {file = "msgpack-1.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:449e57cc1ff18d3b444eb554e44613cffcccb32805d16726a5494038c3b93dab"}, + {file = "msgpack-1.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d603de2b8d2ea3f3bcb2efe286849aa7a81531abc52d8454da12f46235092bcb"}, + {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48f5d88c99f64c456413d74a975bd605a9b0526293218a3b77220a2c15458ba9"}, + {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6916c78f33602ecf0509cc40379271ba0f9ab572b066bd4bdafd7434dee4bc6e"}, + {file = "msgpack-1.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:81fc7ba725464651190b196f3cd848e8553d4d510114a954681fd0b9c479d7e1"}, + {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:d5b5b962221fa2c5d3a7f8133f9abffc114fe218eb4365e40f17732ade576c8e"}, + {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:77ccd2af37f3db0ea59fb280fa2165bf1b096510ba9fe0cc2bf8fa92a22fdb43"}, + {file = "msgpack-1.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:b17be2478b622939e39b816e0aa8242611cc8d3583d1cd8ec31b249f04623243"}, + {file = "msgpack-1.0.4-cp38-cp38-win32.whl", hash = "sha256:2bb8cdf50dd623392fa75525cce44a65a12a00c98e1e37bf0fb08ddce2ff60d2"}, + {file = "msgpack-1.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:26b8feaca40a90cbe031b03d82b2898bf560027160d3eae1423f4a67654ec5d6"}, + {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:462497af5fd4e0edbb1559c352ad84f6c577ffbbb708566a0abaaa84acd9f3ae"}, + {file = "msgpack-1.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2999623886c5c02deefe156e8f869c3b0aaeba14bfc50aa2486a0415178fce55"}, + {file = "msgpack-1.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f0029245c51fd9473dc1aede1160b0a29f4a912e6b1dd353fa6d317085b219da"}, + {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed6f7b854a823ea44cf94919ba3f727e230da29feb4a99711433f25800cf747f"}, + {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0df96d6eaf45ceca04b3f3b4b111b86b33785683d682c655063ef8057d61fd92"}, + {file = "msgpack-1.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6a4192b1ab40f8dca3f2877b70e63799d95c62c068c84dc028b40a6cb03ccd0f"}, + {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0e3590f9fb9f7fbc36df366267870e77269c03172d086fa76bb4eba8b2b46624"}, + {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1576bd97527a93c44fa856770197dec00d223b0b9f36ef03f65bac60197cedf8"}, + {file = "msgpack-1.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:63e29d6e8c9ca22b21846234913c3466b7e4ee6e422f205a2988083de3b08cae"}, + {file = "msgpack-1.0.4-cp39-cp39-win32.whl", hash = "sha256:fb62ea4b62bfcb0b380d5680f9a4b3f9a2d166d9394e9bbd9666c0ee09a3645c"}, + {file = "msgpack-1.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:4d5834a2a48965a349da1c5a79760d94a1a0172fbb5ab6b5b33cbf8447e109ce"}, + {file = "msgpack-1.0.4.tar.gz", hash = "sha256:f5d869c18f030202eb412f08b28d2afeea553d6613aee89e200d7aca7ef01f5f"}, +] + +[[package]] +name = "mypy" +version = "0.991" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mypy-0.991-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7d17e0a9707d0772f4a7b878f04b4fd11f6f5bcb9b3813975a9b13c9332153ab"}, + {file = "mypy-0.991-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0714258640194d75677e86c786e80ccf294972cc76885d3ebbb560f11db0003d"}, + {file = "mypy-0.991-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0c8f3be99e8a8bd403caa8c03be619544bc2c77a7093685dcf308c6b109426c6"}, + {file = "mypy-0.991-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc9ec663ed6c8f15f4ae9d3c04c989b744436c16d26580eaa760ae9dd5d662eb"}, + {file = "mypy-0.991-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4307270436fd7694b41f913eb09210faff27ea4979ecbcd849e57d2da2f65305"}, + {file = "mypy-0.991-cp310-cp310-win_amd64.whl", hash = "sha256:901c2c269c616e6cb0998b33d4adbb4a6af0ac4ce5cd078afd7bc95830e62c1c"}, + {file = "mypy-0.991-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d13674f3fb73805ba0c45eb6c0c3053d218aa1f7abead6e446d474529aafc372"}, + {file = "mypy-0.991-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1c8cd4fb70e8584ca1ed5805cbc7c017a3d1a29fb450621089ffed3e99d1857f"}, + {file = "mypy-0.991-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:209ee89fbb0deed518605edddd234af80506aec932ad28d73c08f1400ef80a33"}, + {file = "mypy-0.991-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37bd02ebf9d10e05b00d71302d2c2e6ca333e6c2a8584a98c00e038db8121f05"}, + {file = "mypy-0.991-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:26efb2fcc6b67e4d5a55561f39176821d2adf88f2745ddc72751b7890f3194ad"}, + {file = "mypy-0.991-cp311-cp311-win_amd64.whl", hash = "sha256:3a700330b567114b673cf8ee7388e949f843b356a73b5ab22dd7cff4742a5297"}, + {file = "mypy-0.991-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:1f7d1a520373e2272b10796c3ff721ea1a0712288cafaa95931e66aa15798813"}, + {file = "mypy-0.991-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:641411733b127c3e0dab94c45af15fea99e4468f99ac88b39efb1ad677da5711"}, + {file = "mypy-0.991-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3d80e36b7d7a9259b740be6d8d906221789b0d836201af4234093cae89ced0cd"}, + {file = "mypy-0.991-cp37-cp37m-win_amd64.whl", hash = "sha256:e62ebaad93be3ad1a828a11e90f0e76f15449371ffeecca4a0a0b9adc99abcef"}, + {file = "mypy-0.991-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:b86ce2c1866a748c0f6faca5232059f881cda6dda2a893b9a8373353cfe3715a"}, + {file = "mypy-0.991-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ac6e503823143464538efda0e8e356d871557ef60ccd38f8824a4257acc18d93"}, + {file = "mypy-0.991-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0cca5adf694af539aeaa6ac633a7afe9bbd760df9d31be55ab780b77ab5ae8bf"}, + {file = "mypy-0.991-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a12c56bf73cdab116df96e4ff39610b92a348cc99a1307e1da3c3768bbb5b135"}, + {file = "mypy-0.991-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:652b651d42f155033a1967739788c436491b577b6a44e4c39fb340d0ee7f0d70"}, + {file = "mypy-0.991-cp38-cp38-win_amd64.whl", hash = "sha256:4175593dc25d9da12f7de8de873a33f9b2b8bdb4e827a7cae952e5b1a342e243"}, + {file = "mypy-0.991-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:98e781cd35c0acf33eb0295e8b9c55cdbef64fcb35f6d3aa2186f289bed6e80d"}, + {file = "mypy-0.991-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6d7464bac72a85cb3491c7e92b5b62f3dcccb8af26826257760a552a5e244aa5"}, + {file = "mypy-0.991-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c9166b3f81a10cdf9b49f2d594b21b31adadb3d5e9db9b834866c3258b695be3"}, + {file = "mypy-0.991-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8472f736a5bfb159a5e36740847808f6f5b659960115ff29c7cecec1741c648"}, + {file = "mypy-0.991-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e80e758243b97b618cdf22004beb09e8a2de1af481382e4d84bc52152d1c476"}, + {file = "mypy-0.991-cp39-cp39-win_amd64.whl", hash = "sha256:74e259b5c19f70d35fcc1ad3d56499065c601dfe94ff67ae48b85596b9ec1461"}, + {file = "mypy-0.991-py3-none-any.whl", hash = "sha256:de32edc9b0a7e67c2775e574cb061a537660e51210fbf6006b0b36ea695ae9bb"}, + {file = "mypy-0.991.tar.gz", hash = "sha256:3c0165ba8f354a6d9881809ef29f1a9318a236a6d81c690094c5df32107bde06"}, +] + +[package.dependencies] +mypy-extensions = ">=0.4.3" +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +install-types = ["pip"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] + +[[package]] +name = "nodeenv" +version = "1.7.0" +description = "Node.js virtual environment builder" +category = "dev" +optional = false +python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*" +files = [ + {file = "nodeenv-1.7.0-py2.py3-none-any.whl", hash = "sha256:27083a7b96a25f2f5e1d8cb4b6317ee8aeda3bdd121394e5ac54e498028a042e"}, + {file = "nodeenv-1.7.0.tar.gz", hash = "sha256:e0e7f7dfb85fc5394c6fe1e8fa98131a2473e04311a45afb6508f7cf1836fa2b"}, +] + +[package.dependencies] +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.24.2" +description = "Fundamental package for array computing in Python" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.24.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eef70b4fc1e872ebddc38cddacc87c19a3709c0e3e5d20bf3954c147b1dd941d"}, + {file = "numpy-1.24.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d2859428712785e8a8b7d2b3ef0a1d1565892367b32f915c4a4df44d0e64f5"}, + {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6524630f71631be2dabe0c541e7675db82651eb998496bbe16bc4f77f0772253"}, + {file = "numpy-1.24.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a51725a815a6188c662fb66fb32077709a9ca38053f0274640293a14fdd22978"}, + {file = "numpy-1.24.2-cp310-cp310-win32.whl", hash = "sha256:2620e8592136e073bd12ee4536149380695fbe9ebeae845b81237f986479ffc9"}, + {file = "numpy-1.24.2-cp310-cp310-win_amd64.whl", hash = "sha256:97cf27e51fa078078c649a51d7ade3c92d9e709ba2bfb97493007103c741f1d0"}, + {file = "numpy-1.24.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:7de8fdde0003f4294655aa5d5f0a89c26b9f22c0a58790c38fae1ed392d44a5a"}, + {file = "numpy-1.24.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4173bde9fa2a005c2c6e2ea8ac1618e2ed2c1c6ec8a7657237854d42094123a0"}, + {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cecaed30dc14123020f77b03601559fff3e6cd0c048f8b5289f4eeabb0eb281"}, + {file = "numpy-1.24.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a23f8440561a633204a67fb44617ce2a299beecf3295f0d13c495518908e910"}, + {file = "numpy-1.24.2-cp311-cp311-win32.whl", hash = "sha256:e428c4fbfa085f947b536706a2fc349245d7baa8334f0c5723c56a10595f9b95"}, + {file = "numpy-1.24.2-cp311-cp311-win_amd64.whl", hash = "sha256:557d42778a6869c2162deb40ad82612645e21d79e11c1dc62c6e82a2220ffb04"}, + {file = "numpy-1.24.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d0a2db9d20117bf523dde15858398e7c0858aadca7c0f088ac0d6edd360e9ad2"}, + {file = "numpy-1.24.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:c72a6b2f4af1adfe193f7beb91ddf708ff867a3f977ef2ec53c0ffb8283ab9f5"}, + {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c29e6bd0ec49a44d7690ecb623a8eac5ab8a923bce0bea6293953992edf3a76a"}, + {file = "numpy-1.24.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2eabd64ddb96a1239791da78fa5f4e1693ae2dadc82a76bc76a14cbb2b966e96"}, + {file = "numpy-1.24.2-cp38-cp38-win32.whl", hash = "sha256:e3ab5d32784e843fc0dd3ab6dcafc67ef806e6b6828dc6af2f689be0eb4d781d"}, + {file = "numpy-1.24.2-cp38-cp38-win_amd64.whl", hash = "sha256:76807b4063f0002c8532cfeac47a3068a69561e9c8715efdad3c642eb27c0756"}, + {file = "numpy-1.24.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4199e7cfc307a778f72d293372736223e39ec9ac096ff0a2e64853b866a8e18a"}, + {file = "numpy-1.24.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:adbdce121896fd3a17a77ab0b0b5eedf05a9834a18699db6829a64e1dfccca7f"}, + {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:889b2cc88b837d86eda1b17008ebeb679d82875022200c6e8e4ce6cf549b7acb"}, + {file = "numpy-1.24.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f64bb98ac59b3ea3bf74b02f13836eb2e24e48e0ab0145bbda646295769bd780"}, + {file = "numpy-1.24.2-cp39-cp39-win32.whl", hash = "sha256:63e45511ee4d9d976637d11e6c9864eae50e12dc9598f531c035265991910468"}, + {file = "numpy-1.24.2-cp39-cp39-win_amd64.whl", hash = "sha256:a77d3e1163a7770164404607b7ba3967fb49b24782a6ef85d9b5f54126cc39e5"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:92011118955724465fb6853def593cf397b4a1367495e0b59a7e69d40c4eb71d"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9006288bcf4895917d02583cf3411f98631275bc67cce355a7f39f8c14338fa"}, + {file = "numpy-1.24.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:150947adbdfeceec4e5926d956a06865c1c690f2fd902efede4ca6fe2e657c3f"}, + {file = "numpy-1.24.2.tar.gz", hash = "sha256:003a9f530e880cb2cd177cba1af7220b9aa42def9c4afc2a2fc3ee6be7eb2b22"}, +] + +[[package]] +name = "packaging" +version = "22.0" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-22.0-py3-none-any.whl", hash = "sha256:957e2148ba0e1a3b282772e791ef1d8083648bc131c8ab0c1feba110ce1146c3"}, + {file = "packaging-22.0.tar.gz", hash = "sha256:2198ec20bd4c017b8f9717e00f0c8714076fc2fd93816750ab48e2c41de2cfd3"}, +] + +[[package]] +name = "pathspec" +version = "0.10.3" +description = "Utility library for gitignore style pattern matching of file paths." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pathspec-0.10.3-py3-none-any.whl", hash = "sha256:3c95343af8b756205e2aba76e843ba9520a24dd84f68c22b9f93251507509dd6"}, + {file = "pathspec-0.10.3.tar.gz", hash = "sha256:56200de4077d9d0791465aa9095a01d421861e405b5096955051deefd697d6f6"}, +] + +[[package]] +name = "pefile" +version = "2022.5.30" +description = "Python PE parsing module" +category = "dev" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "pefile-2022.5.30.tar.gz", hash = "sha256:a5488a3dd1fd021ce33f969780b88fe0f7eebb76eb20996d7318f307612a045b"}, +] + +[package.dependencies] +future = "*" + +[[package]] +name = "pexpect" +version = "4.8.0" +description = "Pexpect allows easy control of interactive console applications." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, + {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pip-licenses" +version = "4.2.0" +description = "Dump the software license list of Python packages installed with pip." +category = "dev" +optional = false +python-versions = "~=3.8" +files = [ + {file = "pip-licenses-4.2.0.tar.gz", hash = "sha256:cabf1d83391c42278f1887a76555246bfcd9478c53be26e01e93b66d364f18a9"}, + {file = "pip_licenses-4.2.0-py3-none-any.whl", hash = "sha256:b6e057e359c0c4c927b7f70b43c27f06d9732a7830bb0aa14ac01f7281d3a972"}, +] + +[package.dependencies] +prettytable = ">=2.3.0" + +[package.extras] +test = ["docutils", "mypy", "pytest-cov", "pytest-pycodestyle", "pytest-runner"] + +[[package]] +name = "pkginfo" +version = "1.9.4" +description = "Query metadatdata from sdists / bdists / installed packages." +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkginfo-1.9.4-py3-none-any.whl", hash = "sha256:7fc056f8e6b93355925083373b0f6bfbabe84ee3f29854fd155c9d6a4211267d"}, + {file = "pkginfo-1.9.4.tar.gz", hash = "sha256:e769fd353593d43e0c9f47e17e25f09a8efcddcdf9a71674ea3ba444ff31bb44"}, +] + +[package.extras] +testing = ["pytest", "pytest-cov"] + +[[package]] +name = "platformdirs" +version = "2.6.2" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-2.6.2-py3-none-any.whl", hash = "sha256:83c8f6d04389165de7c9b6f0c682439697887bca0aa2f1c87ef1826be3584490"}, + {file = "platformdirs-2.6.2.tar.gz", hash = "sha256:e1fea1fe471b9ff8332e229df3cb7de4f53eeea4998d3b6bfff542115e998bd2"}, +] + +[package.extras] +docs = ["furo (>=2022.12.7)", "proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-autodoc-typehints (>=1.19.5)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.2.2)", "pytest (>=7.2)", "pytest-cov (>=4)", "pytest-mock (>=3.10)"] + +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "poetry" +version = "1.3.1" +description = "Python dependency management and packaging made easy." +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "poetry-1.3.1-py3-none-any.whl", hash = "sha256:e8c24984af3e124ef31a5891c1c11871c948687368ee451e95f7f101ffbf8204"}, + {file = "poetry-1.3.1.tar.gz", hash = "sha256:fde98462ad5dc9879393157da93092206a3411117e25a4761a41c6d08f31aea8"}, +] + +[package.dependencies] +cachecontrol = {version = ">=0.12.9,<0.13.0", extras = ["filecache"]} +cleo = ">=2.0.0,<3.0.0" +crashtest = ">=0.4.1,<0.5.0" +dulwich = ">=0.20.46,<0.21.0" +filelock = ">=3.8.0,<4.0.0" +html5lib = ">=1.0,<2.0" +jsonschema = ">=4.10.0,<5.0.0" +keyring = ">=23.9.0,<24.0.0" +lockfile = ">=0.12.2,<0.13.0" +packaging = ">=20.4" +pexpect = ">=4.7.0,<5.0.0" +pkginfo = ">=1.5,<2.0" +platformdirs = ">=2.5.2,<3.0.0" +poetry-core = "1.4.0" +poetry-plugin-export = ">=1.2.0,<2.0.0" +requests = ">=2.18,<3.0" +requests-toolbelt = ">=0.9.1,<0.11.0" +shellingham = ">=1.5,<2.0" +tomlkit = ">=0.11.1,<0.11.2 || >0.11.2,<0.11.3 || >0.11.3,<1.0.0" +trove-classifiers = ">=2022.5.19" +urllib3 = ">=1.26.0,<2.0.0" +virtualenv = {version = ">=20.4.3,<20.4.5 || >20.4.5,<20.4.6 || >20.4.6,<21.0.0", markers = "sys_platform != \"win32\" or python_version != \"3.9\""} +xattr = {version = ">=0.10.0,<0.11.0", markers = "sys_platform == \"darwin\""} + +[[package]] +name = "poetry-core" +version = "1.4.0" +description = "Poetry PEP 517 Build Backend" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "poetry_core-1.4.0-py3-none-any.whl", hash = "sha256:5559ab80384ac021db329ef317086417e140ee1176bcfcb3a3838b544e213c8e"}, + {file = "poetry_core-1.4.0.tar.gz", hash = "sha256:514bd33c30e0bf56b0ed44ee15e120d7e47b61ad908b2b1011da68c48a84ada9"}, +] + +[[package]] +name = "poetry-plugin-export" +version = "1.2.0" +description = "Poetry plugin to export the dependencies to various formats" +category = "dev" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "poetry_plugin_export-1.2.0-py3-none-any.whl", hash = "sha256:109fb43ebfd0e79d8be2e7f9d43ba2ae357c4975a18dfc0cfdd9597dd086790e"}, + {file = "poetry_plugin_export-1.2.0.tar.gz", hash = "sha256:9a1dd42765408931d7831738749022651d43a2968b67c988db1b7a567dfe41ef"}, +] + +[package.dependencies] +poetry = ">=1.2.2,<2.0.0" +poetry-core = ">=1.3.0,<2.0.0" + +[[package]] +name = "pre-commit" +version = "2.16.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +category = "dev" +optional = false +python-versions = ">=3.6.1" +files = [ + {file = "pre_commit-2.16.0-py2.py3-none-any.whl", hash = "sha256:758d1dc9b62c2ed8881585c254976d66eae0889919ab9b859064fc2fe3c7743e"}, + {file = "pre_commit-2.16.0.tar.gz", hash = "sha256:fe9897cac830aa7164dbd02a4e7b90cae49630451ce88464bca73db486ba9f65"}, +] + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +toml = "*" +virtualenv = ">=20.0.8" + +[[package]] +name = "prettytable" +version = "3.7.0" +description = "A simple Python library for easily displaying tabular data in a visually appealing ASCII table format" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "prettytable-3.7.0-py3-none-any.whl", hash = "sha256:f4aaf2ed6e6062a82fd2e6e5289bbbe705ec2788fe401a3a1f62a1cea55526d2"}, + {file = "prettytable-3.7.0.tar.gz", hash = "sha256:ef8334ee40b7ec721651fc4d37ecc7bb2ef55fde5098d994438f0dfdaa385c0c"}, +] + +[package.dependencies] +wcwidth = "*" + +[package.extras] +tests = ["pytest", "pytest-cov", "pytest-lazy-fixture"] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] + +[[package]] +name = "pycodestyle" +version = "2.10.0" +description = "Python style guide checker" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pycodestyle-2.10.0-py2.py3-none-any.whl", hash = "sha256:8a4eaf0d0495c7395bdab3589ac2db602797d76207242c17d470186815706610"}, + {file = "pycodestyle-2.10.0.tar.gz", hash = "sha256:347187bdb476329d98f695c213d7295a846d1152ff4fe9bacb8a9590b8ee7053"}, +] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "1.10.2" +description = "Data validation and settings management using python type hints" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-1.10.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bb6ad4489af1bac6955d38ebcb95079a836af31e4c4f74aba1ca05bb9f6027bd"}, + {file = "pydantic-1.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a1f5a63a6dfe19d719b1b6e6106561869d2efaca6167f84f5ab9347887d78b98"}, + {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:352aedb1d71b8b0736c6d56ad2bd34c6982720644b0624462059ab29bd6e5912"}, + {file = "pydantic-1.10.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:19b3b9ccf97af2b7519c42032441a891a5e05c68368f40865a90eb88833c2559"}, + {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e9069e1b01525a96e6ff49e25876d90d5a563bc31c658289a8772ae186552236"}, + {file = "pydantic-1.10.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:355639d9afc76bcb9b0c3000ddcd08472ae75318a6eb67a15866b87e2efa168c"}, + {file = "pydantic-1.10.2-cp310-cp310-win_amd64.whl", hash = "sha256:ae544c47bec47a86bc7d350f965d8b15540e27e5aa4f55170ac6a75e5f73b644"}, + {file = "pydantic-1.10.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a4c805731c33a8db4b6ace45ce440c4ef5336e712508b4d9e1aafa617dc9907f"}, + {file = "pydantic-1.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d49f3db871575e0426b12e2f32fdb25e579dea16486a26e5a0474af87cb1ab0a"}, + {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:37c90345ec7dd2f1bcef82ce49b6235b40f282b94d3eec47e801baf864d15525"}, + {file = "pydantic-1.10.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b5ba54d026c2bd2cb769d3468885f23f43710f651688e91f5fb1edcf0ee9283"}, + {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:05e00dbebbe810b33c7a7362f231893183bcc4251f3f2ff991c31d5c08240c42"}, + {file = "pydantic-1.10.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2d0567e60eb01bccda3a4df01df677adf6b437958d35c12a3ac3e0f078b0ee52"}, + {file = "pydantic-1.10.2-cp311-cp311-win_amd64.whl", hash = "sha256:c6f981882aea41e021f72779ce2a4e87267458cc4d39ea990729e21ef18f0f8c"}, + {file = "pydantic-1.10.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4aac8e7103bf598373208f6299fa9a5cfd1fc571f2d40bf1dd1955a63d6eeb5"}, + {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a7b66c3f499108b448f3f004801fcd7d7165fb4200acb03f1c2402da73ce4c"}, + {file = "pydantic-1.10.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bedf309630209e78582ffacda64a21f96f3ed2e51fbf3962d4d488e503420254"}, + {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9300fcbebf85f6339a02c6994b2eb3ff1b9c8c14f502058b5bf349d42447dcf5"}, + {file = "pydantic-1.10.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:216f3bcbf19c726b1cc22b099dd409aa371f55c08800bcea4c44c8f74b73478d"}, + {file = "pydantic-1.10.2-cp37-cp37m-win_amd64.whl", hash = "sha256:dd3f9a40c16daf323cf913593083698caee97df2804aa36c4b3175d5ac1b92a2"}, + {file = "pydantic-1.10.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b97890e56a694486f772d36efd2ba31612739bc6f3caeee50e9e7e3ebd2fdd13"}, + {file = "pydantic-1.10.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9cabf4a7f05a776e7793e72793cd92cc865ea0e83a819f9ae4ecccb1b8aa6116"}, + {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06094d18dd5e6f2bbf93efa54991c3240964bb663b87729ac340eb5014310624"}, + {file = "pydantic-1.10.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cc78cc83110d2f275ec1970e7a831f4e371ee92405332ebfe9860a715f8336e1"}, + {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:1ee433e274268a4b0c8fde7ad9d58ecba12b069a033ecc4645bb6303c062d2e9"}, + {file = "pydantic-1.10.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:7c2abc4393dea97a4ccbb4ec7d8658d4e22c4765b7b9b9445588f16c71ad9965"}, + {file = "pydantic-1.10.2-cp38-cp38-win_amd64.whl", hash = "sha256:0b959f4d8211fc964772b595ebb25f7652da3f22322c007b6fed26846a40685e"}, + {file = "pydantic-1.10.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c33602f93bfb67779f9c507e4d69451664524389546bacfe1bee13cae6dc7488"}, + {file = "pydantic-1.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5760e164b807a48a8f25f8aa1a6d857e6ce62e7ec83ea5d5c5a802eac81bad41"}, + {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6eb843dcc411b6a2237a694f5e1d649fc66c6064d02b204a7e9d194dff81eb4b"}, + {file = "pydantic-1.10.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b8795290deaae348c4eba0cebb196e1c6b98bdbe7f50b2d0d9a4a99716342fe"}, + {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:e0bedafe4bc165ad0a56ac0bd7695df25c50f76961da29c050712596cf092d6d"}, + {file = "pydantic-1.10.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2e05aed07fa02231dbf03d0adb1be1d79cabb09025dd45aa094aa8b4e7b9dcda"}, + {file = "pydantic-1.10.2-cp39-cp39-win_amd64.whl", hash = "sha256:c1ba1afb396148bbc70e9eaa8c06c1716fdddabaf86e7027c5988bae2a829ab6"}, + {file = "pydantic-1.10.2-py3-none-any.whl", hash = "sha256:1b6ee725bd6e83ec78b1aa32c5b1fa67a3a65badddde3976bca5fe4568f27709"}, + {file = "pydantic-1.10.2.tar.gz", hash = "sha256:91b8e218852ef6007c2b98cd861601c6a09f1aa32bbbb74fab5b1c33d4a1e410"}, +] + +[package.dependencies] +typing-extensions = ">=4.1.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pyflakes" +version = "3.0.1" +description = "passive checker of Python programs" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyflakes-3.0.1-py2.py3-none-any.whl", hash = "sha256:ec55bf7fe21fff7f1ad2f7da62363d749e2a470500eab1b555334b67aa1ef8cf"}, + {file = "pyflakes-3.0.1.tar.gz", hash = "sha256:ec8b276a6b60bd80defed25add7e439881c19e64850afd9b346283d4165fd0fd"}, +] + +[[package]] +name = "pyinstaller" +version = "5.6.2" +description = "PyInstaller bundles a Python application and all its dependencies into a single package." +category = "dev" +optional = false +python-versions = "<3.12,>=3.7" +files = [ + {file = "pyinstaller-5.6.2-py3-none-macosx_10_13_universal2.whl", hash = "sha256:1b1e3b37a22fb36555d917f0c3dfb998159ff4af6d8fa7cc0074d630c6fe81ad"}, + {file = "pyinstaller-5.6.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:05df5d2b9ca645cc6ef61d8a85451d2aabe5501997f1f50cd94306fd6bc0485d"}, + {file = "pyinstaller-5.6.2-py3-none-manylinux2014_i686.whl", hash = "sha256:eb083c25f711769af0898852ea30dcb727ba43990bbdf9ffbaa9c77a7bd0d720"}, + {file = "pyinstaller-5.6.2-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:0d167d57036219914188f1400427dd297b975707e78c32a5511191e607be920a"}, + {file = "pyinstaller-5.6.2-py3-none-manylinux2014_s390x.whl", hash = "sha256:32727232f446aa96e394f01b0c35b3de0dc3513c6ba3e26d1ef64c57edb1e9e5"}, + {file = "pyinstaller-5.6.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:181856ade585b090379ae26b7017dc2c30620e36e3a804b381417a6dc3b2a82b"}, + {file = "pyinstaller-5.6.2-py3-none-musllinux_1_1_aarch64.whl", hash = "sha256:77888f52b61089caa0bee70809bbce9e9b1c613c88b6cb0742ff2a45f1511cbb"}, + {file = "pyinstaller-5.6.2-py3-none-musllinux_1_1_x86_64.whl", hash = "sha256:d888db9afedff290d362ee296d30eb339abeba707ca1565916ce1cd5947131c3"}, + {file = "pyinstaller-5.6.2-py3-none-win32.whl", hash = "sha256:e026adc92c60158741d0bfca27eefaa2414801f61328cb84d0c88241fe8c2087"}, + {file = "pyinstaller-5.6.2-py3-none-win_amd64.whl", hash = "sha256:04ecf805bde2ef25b8e3642410871e6747c22fa7254107f155b8cd179c2a13b6"}, + {file = "pyinstaller-5.6.2.tar.gz", hash = "sha256:865025b6809d777bb0f66d8f8ab50cc97dc3dbe0ff09a1ef1f2fd646432714fc"}, +] + +[package.dependencies] +altgraph = "*" +macholib = {version = ">=1.8", markers = "sys_platform == \"darwin\""} +pefile = {version = ">=2022.5.30", markers = "sys_platform == \"win32\""} +pyinstaller-hooks-contrib = ">=2021.4" +pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} +setuptools = "*" + +[package.extras] +encryption = ["tinyaes (>=1.0.0)"] +hook-testing = ["execnet (>=1.5.0)", "psutil", "pytest (>=2.7.3)"] + +[[package]] +name = "pyinstaller-hooks-contrib" +version = "2022.14" +description = "Community maintained hooks for PyInstaller" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyinstaller-hooks-contrib-2022.14.tar.gz", hash = "sha256:5ae8da3a92cf20e37b3e00604d0c3468896e7d746e5c1449473597a724331b0b"}, + {file = "pyinstaller_hooks_contrib-2022.14-py2.py3-none-any.whl", hash = "sha256:1a125838a22d7b35a18993c6e56d3c5cc3ad7da00954f95bc5606523939203f2"}, +] + +[[package]] +name = "pyopenjtalk" +version = "0.3.0" +description = "" +category = "main" +optional = false +python-versions = "*" +files = [] +develop = false + +[package.dependencies] +numpy = ">=1.20.0" +six = "*" +tqdm = "*" + +[package.extras] +docs = ["Jinja2 (>=3.0.1)", "ipython", "jupyter", "nbsphinx (>=0.8.6)", "pandoc", "sphinx_rtd_theme"] +lint = ["black (>=19.19b0,<=20.8)", "click (<8.1.0)", "flake8 (>=3.7,<4)", "flake8-bugbear", "isort (>=4.3,<5.2.0)", "mypy (<=0.910)", "pysen", "types-decorator", "types-setuptools"] +marine = ["marine (>=0.0.5)"] +test = ["pytest", "scipy"] + +[package.source] +type = "git" +url = "https://github.com/VOICEVOX/pyopenjtalk" +reference = "827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3" +resolved_reference = "827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3" + +[[package]] +name = "pyrsistent" +version = "0.19.3" +description = "Persistent/Functional/Immutable data structures" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, + {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, + {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, + {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, + {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, + {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, +] + +[[package]] +name = "pysen" +version = "0.10.3" +description = "Python linting made easy. Also a casual yet honorific way to address individuals who have entered an organization prior to you." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pysen-0.10.3-py3-none-any.whl", hash = "sha256:08e79588bf0aa1e9233554d84ac277679d4998dede7768bc7ca8cc5fee6fc846"}, + {file = "pysen-0.10.3.tar.gz", hash = "sha256:3023f48789a90fe660bcacc59a2e57d62297a04c50222ac5d72aff254b53e55c"}, +] + +[package.dependencies] +colorlog = ">=4.0.0,<5.0.0" +dacite = ">=1.1.0,<2.0.0" +GitPython = ">=3.0.0,<4.0.0" +tomlkit = ">=0.5.11,<1.0.0" +unidiff = ">=0.6.0,<1.0.0" + +[package.extras] +lint = ["black (>=19.10b0,<=22.10)", "flake8 (>=3.7,<5)", "flake8-bugbear", "isort (>=4.3,<5.2.0)", "mypy (>=0.770,<0.800)"] + +[[package]] +name = "pytest" +version = "6.2.5" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pytest-6.2.5-py3-none-any.whl", hash = "sha256:7310f8d27bc79ced999e760ca304d69f6ba6c6649c0b60fb0e04a4a77cacc134"}, + {file = "pytest-6.2.5.tar.gz", hash = "sha256:131b36680866a76e6781d13f101efb86cf674ebb9762eb70d3082b6f29889e89"}, +] + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +py = ">=1.8.2" +toml = "*" + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "requests", "xmlschema"] + +[[package]] +name = "python-multipart" +version = "0.0.5" +description = "A streaming multipart parser for Python" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "python-multipart-0.0.5.tar.gz", hash = "sha256:f7bb5f611fc600d15fa47b3974c8aa16e93724513b49b5f95c81e6624c83fa43"}, +] + +[package.dependencies] +six = ">=1.4.0" + +[[package]] +name = "pywin32-ctypes" +version = "0.2.0" +description = "" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-ctypes-0.2.0.tar.gz", hash = "sha256:24ffc3b341d457d48e8922352130cf2644024a4ff09762a2261fd34c36ee5942"}, + {file = "pywin32_ctypes-0.2.0-py2.py3-none-any.whl", hash = "sha256:9dc2d991b3479cc2df15930958b674a48a227d5361d413827a4cfd0b5876fc98"}, +] + +[[package]] +name = "pyworld" +version = "0.3.2" +description = "PyWorld is a Python wrapper for WORLD vocoder." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "pyworld-0.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:688730fa5394709a185061e5a58e7a614b4548d814eeecc1dc825f73af53a9aa"}, + {file = "pyworld-0.3.2-cp36-cp36m-win32.whl", hash = "sha256:1e110e2f95d45b0765f4ba4e49b389f9b931c9c438cd69774dce20699cc6dc7d"}, + {file = "pyworld-0.3.2-cp36-cp36m-win_amd64.whl", hash = "sha256:e858668185a177e9e30c0ff12de3e166b39124c14b424ba3be31418694dcb2b7"}, + {file = "pyworld-0.3.2-cp37-cp37m-win32.whl", hash = "sha256:b5325e7a08f104a9bf533d54423546bd3ef05953b80b79a8ced34efbb892862b"}, + {file = "pyworld-0.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:fddd503ac264810221d9460bfdc1454c5c1313214e1c58a4ddd9417699f99bc8"}, + {file = "pyworld-0.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:502fbf577f4e56a497b3ad8c29434ec423eabc4674b93fa11046837d297c97be"}, + {file = "pyworld-0.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:a8ea62219b9bce0e514ff05ee80cfbc1248b165d8d802f00b9b8754510701f3e"}, + {file = "pyworld-0.3.2.tar.gz", hash = "sha256:668d09842c3cfa74b1f6edabdb0058a64c04f9cf17b93883e6da811e1204ad4d"}, +] + +[package.dependencies] +cython = "*" +numpy = "*" + +[package.extras] +sdist = ["cython", "numpy"] +test = ["nose"] + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] + +[[package]] +name = "rapidfuzz" +version = "2.13.7" +description = "rapid fuzzy string matching" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "rapidfuzz-2.13.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b75dd0928ce8e216f88660ab3d5c5ffe990f4dd682fd1709dba29d5dafdde6de"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:24d3fea10680d085fd0a4d76e581bfb2b1074e66e78fd5964d4559e1fcd2a2d4"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8109e0324d21993d5b2d111742bf5958f3516bf8c59f297c5d1cc25a2342eb66"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b5f705652360d520c2de52bee11100c92f59b3e3daca308ebb150cbc58aecdad"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7496e8779905b02abc0ab4ba2a848e802ab99a6e20756ffc967a0de4900bd3da"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:24eb6b843492bdc63c79ee4b2f104059b7a2201fef17f25177f585d3be03405a"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:467c1505362823a5af12b10234cb1c4771ccf124c00e3fc9a43696512bd52293"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:53dcae85956853b787c27c1cb06f18bb450e22cf57a4ad3444cf03b8ff31724a"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:46b9b8aa09998bc48dd800854e8d9b74bc534d7922c1d6e1bbf783e7fa6ac29c"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:1fbad8fb28d98980f5bff33c7842efef0315d42f0cd59082108482a7e6b61410"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:43fb8cb030f888c3f076d40d428ed5eb4331f5dd6cf1796cfa39c67bf0f0fc1e"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:b6bad92de071cbffa2acd4239c1779f66851b60ffbbda0e4f4e8a2e9b17e7eef"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d00df2e4a81ffa56a6b1ec4d2bc29afdcb7f565e0b8cd3092fece2290c4c7a79"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-win32.whl", hash = "sha256:2c836f0f2d33d4614c3fbaf9a1eb5407c0fe23f8876f47fd15b90f78daa64c34"}, + {file = "rapidfuzz-2.13.7-cp310-cp310-win_amd64.whl", hash = "sha256:c36fd260084bb636b9400bb92016c6bd81fd80e59ed47f2466f85eda1fc9f782"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:b34e8c0e492949ecdd5da46a1cfc856a342e2f0389b379b1a45a3cdcd3176a6e"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:875d51b3497439a72e2d76183e1cb5468f3f979ab2ddfc1d1f7dde3b1ecfb42f"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ae33a72336059213996fe4baca4e0e4860913905c2efb7c991eab33b95a98a0a"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5585189b3d90d81ccd62d4f18530d5ac8972021f0aaaa1ffc6af387ff1dce75"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42085d4b154a8232767de8296ac39c8af5bccee6b823b0507de35f51c9cbc2d7"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:585206112c294e335d84de5d5f179c0f932837752d7420e3de21db7fdc476278"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f891b98f8bc6c9d521785816085e9657212621e93f223917fb8e32f318b2957e"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:08590905a95ccfa43f4df353dcc5d28c15d70664299c64abcad8721d89adce4f"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b5dd713a1734574c2850c566ac4286594bacbc2d60b9170b795bee4b68656625"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:988f8f6abfba7ee79449f8b50687c174733b079521c3cc121d65ad2d38831846"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b3210869161a864f3831635bb13d24f4708c0aa7208ef5baac1ac4d46e9b4208"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f6fe570e20e293eb50491ae14ddeef71a6a7e5f59d7e791393ffa99b13f1f8c2"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6120f2995f5154057454c5de99d86b4ef3b38397899b5da1265467e8980b2f60"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-win32.whl", hash = "sha256:b20141fa6cee041917801de0bab503447196d372d4c7ee9a03721b0a8edf5337"}, + {file = "rapidfuzz-2.13.7-cp311-cp311-win_amd64.whl", hash = "sha256:ec55a81ac2b0f41b8d6fb29aad16e55417036c7563bad5568686931aa4ff08f7"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:7d005e058d86f2a968a8d28ca6f2052fab1f124a39035aa0523261d6baf21e1f"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fe59a0c21a032024edb0c8e43f5dee5623fef0b65a1e3c1281836d9ce199af3b"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfc04f7647c29fb48da7a04082c34cdb16f878d3c6d098d62d5715c0ad3000c"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:68a89bb06d5a331511961f4d3fa7606f8e21237467ba9997cae6f67a1c2c2b9e"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:effe182767d102cb65dfbbf74192237dbd22d4191928d59415aa7d7c861d8c88"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25b4cedf2aa19fb7212894ce5f5219010cce611b60350e9a0a4d492122e7b351"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:3a9bd02e1679c0fd2ecf69b72d0652dbe2a9844eaf04a36ddf4adfbd70010e95"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5e2b3d020219baa75f82a4e24b7c8adcb598c62f0e54e763c39361a9e5bad510"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:cf62dacb3f9234f3fddd74e178e6d25c68f2067fde765f1d95f87b1381248f58"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:fa263135b892686e11d5b84f6a1892523123a00b7e5882eff4fbdabb38667347"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fa4c598ed77f74ec973247ca776341200b0f93ec3883e34c222907ce72cb92a4"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-win32.whl", hash = "sha256:c2523f8180ebd9796c18d809e9a19075a1060b1a170fde3799e83db940c1b6d5"}, + {file = "rapidfuzz-2.13.7-cp37-cp37m-win_amd64.whl", hash = "sha256:5ada0a14c67452358c1ee52ad14b80517a87b944897aaec3e875279371a9cb96"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ca8a23097c1f50e0fdb4de9e427537ca122a18df2eead06ed39c3a0bef6d9d3a"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9be02162af0376d64b840f2fc8ee3366794fc149f1e06d095a6a1d42447d97c5"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af4f7c3c904ca709493eb66ca9080b44190c38e9ecb3b48b96d38825d5672559"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f50d1227e6e2a0e3ae1fb1c9a2e1c59577d3051af72c7cab2bcc430cb5e18da"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c71d9d512b76f05fa00282227c2ae884abb60e09f08b5ca3132b7e7431ac7f0d"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b52ac2626945cd21a2487aeefed794c14ee31514c8ae69b7599170418211e6f6"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ca00fafd2756bc9649bf80f1cf72c647dce38635f0695d7ce804bc0f759aa756"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d248a109699ce9992304e79c1f8735c82cc4c1386cd8e27027329c0549f248a2"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c88adbcb933f6b8612f6c593384bf824e562bb35fc8a0f55fac690ab5b3486e5"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c8601a66fbfc0052bb7860d2eacd303fcde3c14e87fdde409eceff516d659e77"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:27be9c63215d302ede7d654142a2e21f0d34ea6acba512a4ae4cfd52bbaa5b59"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3dcffe1f3cbda0dc32133a2ae2255526561ca594f15f9644384549037b355245"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8450d15f7765482e86ef9be2ad1a05683cd826f59ad236ef7b9fb606464a56aa"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-win32.whl", hash = "sha256:460853983ab88f873173e27cc601c5276d469388e6ad6e08c4fd57b2a86f1064"}, + {file = "rapidfuzz-2.13.7-cp38-cp38-win_amd64.whl", hash = "sha256:424f82c35dbe4f83bdc3b490d7d696a1dc6423b3d911460f5493b7ffae999fd2"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c3fbe449d869ea4d0909fc9d862007fb39a584fb0b73349a6aab336f0d90eaed"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:16080c05a63d6042643ae9b6cfec1aefd3e61cef53d0abe0df3069b9d4b72077"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:dbcf5371ea704759fcce772c66a07647751d1f5dbdec7818331c9b31ae996c77"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:114810491efb25464016fd554fdf1e20d390309cecef62587494fc474d4b926f"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:99a84ab9ac9a823e7e93b4414f86344052a5f3e23b23aa365cda01393ad895bd"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:81642a24798851b118f82884205fc1bd9ff70b655c04018c467824b6ecc1fabc"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3741cb0bf9794783028e8b0cf23dab917fa5e37a6093b94c4c2f805f8e36b9f"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:759a3361711586a29bc753d3d1bdb862983bd9b9f37fbd7f6216c24f7c972554"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1333fb3d603d6b1040e365dca4892ba72c7e896df77a54eae27dc07db90906e3"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:916bc2e6cf492c77ad6deb7bcd088f0ce9c607aaeabc543edeb703e1fbc43e31"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:23524635840500ce6f4d25005c9529a97621689c85d2f727c52eed1782839a6a"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:ebe303cd9839af69dd1f7942acaa80b1ba90bacef2e7ded9347fbed4f1654672"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fe56659ccadbee97908132135de4b875543353351e0c92e736b7c57aee298b5a"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-win32.whl", hash = "sha256:3f11a7eff7bc6301cd6a5d43f309e22a815af07e1f08eeb2182892fca04c86cb"}, + {file = "rapidfuzz-2.13.7-cp39-cp39-win_amd64.whl", hash = "sha256:e8914dad106dacb0775718e54bf15e528055c4e92fb2677842996f2d52da5069"}, + {file = "rapidfuzz-2.13.7-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f7930adf84301797c3f09c94b9c5a9ed90a9e8b8ed19b41d2384937e0f9f5bd"}, + {file = "rapidfuzz-2.13.7-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c31022d9970177f6affc6d5dd757ed22e44a10890212032fabab903fdee3bfe7"}, + {file = "rapidfuzz-2.13.7-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f42b82f268689f429def9ecfb86fa65ceea0eaf3fed408b570fe113311bf5ce7"}, + {file = "rapidfuzz-2.13.7-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8b477b43ced896301665183a5e0faec0f5aea2373005648da8bdcb3c4b73f280"}, + {file = "rapidfuzz-2.13.7-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:d63def9bbc6b35aef4d76dc740301a4185867e8870cbb8719ec9de672212fca8"}, + {file = "rapidfuzz-2.13.7-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:c66546e30addb04a16cd864f10f5821272a1bfe6462ee5605613b4f1cb6f7b48"}, + {file = "rapidfuzz-2.13.7-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f799d1d6c33d81e983d3682571cc7d993ae7ff772c19b3aabb767039c33f6d1e"}, + {file = "rapidfuzz-2.13.7-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82f20c0060ffdaadaf642b88ab0aa52365b56dffae812e188e5bdb998043588"}, + {file = "rapidfuzz-2.13.7-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:042644133244bfa7b20de635d500eb9f46af7097f3d90b1724f94866f17cb55e"}, + {file = "rapidfuzz-2.13.7-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:75c45dcd595f8178412367e302fd022860ea025dc4a78b197b35428081ed33d5"}, + {file = "rapidfuzz-2.13.7-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3d8b081988d0a49c486e4e845a547565fee7c6e7ad8be57ff29c3d7c14c6894c"}, + {file = "rapidfuzz-2.13.7-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:16ffad751f43ab61001187b3fb4a9447ec2d1aedeff7c5bac86d3b95f9980cc3"}, + {file = "rapidfuzz-2.13.7-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:020858dd89b60ce38811cd6e37875c4c3c8d7fcd8bc20a0ad2ed1f464b34dc4e"}, + {file = "rapidfuzz-2.13.7-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cda1e2f66bb4ba7261a0f4c2d052d5d909798fca557cbff68f8a79a87d66a18f"}, + {file = "rapidfuzz-2.13.7-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:b6389c50d8d214c9cd11a77f6d501529cb23279a9c9cafe519a3a4b503b5f72a"}, + {file = "rapidfuzz-2.13.7.tar.gz", hash = "sha256:8d3e252d4127c79b4d7c2ae47271636cbaca905c8bb46d80c7930ab906cf4b5c"}, +] + +[package.extras] +full = ["numpy"] + +[[package]] +name = "requests" +version = "2.28.1" +description = "Python HTTP for Humans." +category = "main" +optional = false +python-versions = ">=3.7, <4" +files = [ + {file = "requests-2.28.1-py3-none-any.whl", hash = "sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349"}, + {file = "requests-2.28.1.tar.gz", hash = "sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<3" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "requests-toolbelt" +version = "0.10.1" +description = "A utility belt for advanced users of python-requests" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "requests-toolbelt-0.10.1.tar.gz", hash = "sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d"}, + {file = "requests_toolbelt-0.10.1-py2.py3-none-any.whl", hash = "sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7"}, +] + +[package.dependencies] +requests = ">=2.0.1,<3.0.0" + +[[package]] +name = "scipy" +version = "1.10.1" +description = "Fundamental algorithms for scientific computing in Python" +category = "main" +optional = false +python-versions = "<3.12,>=3.8" +files = [ + {file = "scipy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e7354fd7527a4b0377ce55f286805b34e8c54b91be865bac273f527e1b839019"}, + {file = "scipy-1.10.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4b3f429188c66603a1a5c549fb414e4d3bdc2a24792e061ffbd607d3d75fd84e"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1553b5dcddd64ba9a0d95355e63fe6c3fc303a8fd77c7bc91e77d61363f7433f"}, + {file = "scipy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c0ff64b06b10e35215abce517252b375e580a6125fd5fdf6421b98efbefb2d2"}, + {file = "scipy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:fae8a7b898c42dffe3f7361c40d5952b6bf32d10c4569098d276b4c547905ee1"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0f1564ea217e82c1bbe75ddf7285ba0709ecd503f048cb1236ae9995f64217bd"}, + {file = "scipy-1.10.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:d925fa1c81b772882aa55bcc10bf88324dadb66ff85d548c71515f6689c6dac5"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaea0a6be54462ec027de54fca511540980d1e9eea68b2d5c1dbfe084797be35"}, + {file = "scipy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:15a35c4242ec5f292c3dd364a7c71a61be87a3d4ddcc693372813c0b73c9af1d"}, + {file = "scipy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:43b8e0bcb877faf0abfb613d51026cd5cc78918e9530e375727bf0625c82788f"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5678f88c68ea866ed9ebe3a989091088553ba12c6090244fdae3e467b1139c35"}, + {file = "scipy-1.10.1-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:39becb03541f9e58243f4197584286e339029e8908c46f7221abeea4b749fa88"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bce5869c8d68cf383ce240e44c1d9ae7c06078a9396df68ce88a1230f93a30c1"}, + {file = "scipy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07c3457ce0b3ad5124f98a86533106b643dd811dd61b548e78cf4c8786652f6f"}, + {file = "scipy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:049a8bbf0ad95277ffba9b3b7d23e5369cc39e66406d60422c8cfef40ccc8415"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cd9f1027ff30d90618914a64ca9b1a77a431159df0e2a195d8a9e8a04c78abf9"}, + {file = "scipy-1.10.1-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:79c8e5a6c6ffaf3a2262ef1be1e108a035cf4f05c14df56057b64acc5bebffb6"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:51af417a000d2dbe1ec6c372dfe688e041a7084da4fdd350aeb139bd3fb55353"}, + {file = "scipy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b4735d6c28aad3cdcf52117e0e91d6b39acd4272f3f5cd9907c24ee931ad601"}, + {file = "scipy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ff7f37b1bf4417baca958d254e8e2875d0cc23aaadbe65b3d5b3077b0eb23ea"}, + {file = "scipy-1.10.1.tar.gz", hash = "sha256:2cf9dfb80a7b4589ba4c40ce7588986d6d5cebc5457cad2c2880f6bc2d42f3a5"}, +] + +[package.dependencies] +numpy = ">=1.19.5,<1.27.0" + +[package.extras] +dev = ["click", "doit (>=0.36.0)", "flake8", "mypy", "pycodestyle", "pydevtool", "rich-click", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"] +test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "secretstorage" +version = "3.3.3" +description = "Python bindings to FreeDesktop.org Secret Service API" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, + {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, +] + +[package.dependencies] +cryptography = ">=2.0" +jeepney = ">=0.6" + +[[package]] +name = "semver" +version = "3.0.0" +description = "Python helper for Semantic Versioning (https://semver.org)" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "semver-3.0.0-py3-none-any.whl", hash = "sha256:ab4f69fb1d1ecfb5d81f96411403d7a611fa788c45d252cf5b408025df3ab6ce"}, + {file = "semver-3.0.0.tar.gz", hash = "sha256:94df43924c4521ec7d307fc86da1531db6c2c33d9d5cdc3e64cca0eb68569269"}, +] + +[[package]] +name = "setuptools" +version = "65.6.3" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-65.6.3-py3-none-any.whl", hash = "sha256:57f6f22bde4e042978bcd50176fdb381d7c21a9efa4041202288d3737a0c6a54"}, + {file = "setuptools-65.6.3.tar.gz", hash = "sha256:a7620757bf984b58deaf32fc8a4577a9bbc0850cf92c20e1ce41c38c19e5fb75"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8 (<5)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shellingham" +version = "1.5.0.post1" +description = "Tool to Detect Surrounding Shell" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "shellingham-1.5.0.post1-py2.py3-none-any.whl", hash = "sha256:368bf8c00754fd4f55afb7bbb86e272df77e4dc76ac29dbcbb81a59e9fc15744"}, + {file = "shellingham-1.5.0.post1.tar.gz", hash = "sha256:823bc5fb5c34d60f285b624e7264f4dda254bc803a3774a147bf99c0e3004a28"}, +] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "smmap" +version = "5.0.0" +description = "A pure Python implementation of a sliding window memory map manager" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "smmap-5.0.0-py3-none-any.whl", hash = "sha256:2aba19d6a040e78d8b09de5c57e96207b09ed71d8e55ce0959eeee6c8e190d94"}, + {file = "smmap-5.0.0.tar.gz", hash = "sha256:c840e62059cd3be204b0c9c9f74be2c09d5648eddd4580d9314c3ecde0b30936"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "soundfile" +version = "0.10.3.post1" +description = "An audio library based on libsndfile, CFFI and NumPy" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "SoundFile-0.10.3.post1-py2.py3-none-any.whl", hash = "sha256:2d17e0a6fc2af0d6c1d868bafa5ec80aae6e186a97fec8db07ad6af29842fbc7"}, + {file = "SoundFile-0.10.3.post1-py2.py3.cp26.cp27.cp32.cp33.cp34.cp35.cp36.pp27.pp32.pp33-none-macosx_10_5_x86_64.macosx_10_6_intel.macosx_10_9_intel.macosx_10_9_x86_64.whl", hash = "sha256:5e342ee293b896d31da67617fe65d0bdca217af193991b0cb6052353b1e0e506"}, + {file = "SoundFile-0.10.3.post1-py2.py3.cp26.cp27.cp32.cp33.cp34.cp35.cp36.pp27.pp32.pp33-none-win32.whl", hash = "sha256:4555438c2c4f02b39fea2ed40f6ddeda88a80cd1ee9dd129be4d5f5134698cc2"}, + {file = "SoundFile-0.10.3.post1-py2.py3.cp26.cp27.cp32.cp33.cp34.cp35.cp36.pp27.pp32.pp33-none-win_amd64.whl", hash = "sha256:b361d4ac1519a2e516cabafa6bf7e93492f999f35d7d25350cd87fdc3e5cb27e"}, + {file = "SoundFile-0.10.3.post1.tar.gz", hash = "sha256:490cff42650733d1832728b937fe99fa1802896f5ef4d61bcf78cf7ebecb107b"}, +] + +[package.dependencies] +cffi = ">=1.0" + +[package.extras] +numpy = ["numpy"] + +[[package]] +name = "starlette" +version = "0.16.0" +description = "The little ASGI library that shines." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "starlette-0.16.0-py3-none-any.whl", hash = "sha256:38eb24bf705a2c317e15868e384c1b8a12ca396e5a3c3a003db7e667c43f939f"}, + {file = "starlette-0.16.0.tar.gz", hash = "sha256:e1904b5d0007aee24bdd3c43994be9b3b729f4f58e740200de1d623f8c3a8870"}, +] + +[package.dependencies] +anyio = ">=3.0.0,<4" + +[package.extras] +full = ["graphene", "itsdangerous", "jinja2", "python-multipart", "pyyaml", "requests"] + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] + +[[package]] +name = "tomlkit" +version = "0.11.6" +description = "Style preserving TOML library" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "tomlkit-0.11.6-py3-none-any.whl", hash = "sha256:07de26b0d8cfc18f871aec595fda24d95b08fef89d147caa861939f37230bf4b"}, + {file = "tomlkit-0.11.6.tar.gz", hash = "sha256:71b952e5721688937fb02cf9d354dbcf0785066149d2855e44531ebdd2b65d73"}, +] + +[[package]] +name = "tqdm" +version = "4.64.1" +description = "Fast, Extensible Progress Meter" +category = "main" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7" +files = [ + {file = "tqdm-4.64.1-py2.py3-none-any.whl", hash = "sha256:6fee160d6ffcd1b1c68c65f14c829c22832bc401726335ce92c52d395944a6a1"}, + {file = "tqdm-4.64.1.tar.gz", hash = "sha256:5f4f682a004951c1b450bc753c710e9280c5746ce6ffedee253ddbcbf54cf1e4"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "wheel"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "trove-classifiers" +version = "2022.12.22" +description = "Canonical source for classifiers on PyPI (pypi.org)." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "trove-classifiers-2022.12.22.tar.gz", hash = "sha256:fe0fe3f085987161aee2a5a853c7cc7cdf64515c5965d57ad968fdd8cc3b0362"}, + {file = "trove_classifiers-2022.12.22-py3-none-any.whl", hash = "sha256:f0013fd4ce06cfae879a2580ab6acc3d51ec93ecb364d6c061b6c77b44dd72de"}, +] + +[[package]] +name = "typing-extensions" +version = "4.4.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, + {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, +] + +[[package]] +name = "unidiff" +version = "0.7.4" +description = "Unified diff parsing/metadata extraction library." +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "unidiff-0.7.4-py2.py3-none-any.whl", hash = "sha256:688622add422f84a873498cc4ff9bf50da5ea6c23dea908f19d2190fa39a8e39"}, + {file = "unidiff-0.7.4.tar.gz", hash = "sha256:2bbcbc986e1fb97f04b1d7b864aa6002ab02f4d8a996bf03aa6e5a81447d1fc5"}, +] + +[[package]] +name = "urllib3" +version = "1.26.13" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "main" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" +files = [ + {file = "urllib3-1.26.13-py2.py3-none-any.whl", hash = "sha256:47cc05d99aaa09c9e72ed5809b60e7ba354e64b59c9c173ac3018642d8bb41fc"}, + {file = "urllib3-1.26.13.tar.gz", hash = "sha256:c083dd0dce68dbfbe1129d5271cb90f9447dea7d52097c6e0126120c521ddea8"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] +secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "uvicorn" +version = "0.15.0" +description = "The lightning-fast ASGI server." +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "uvicorn-0.15.0-py3-none-any.whl", hash = "sha256:17f898c64c71a2640514d4089da2689e5db1ce5d4086c2d53699bf99513421c1"}, + {file = "uvicorn-0.15.0.tar.gz", hash = "sha256:d9a3c0dd1ca86728d3e235182683b4cf94cd53a867c288eaeca80ee781b2caff"}, +] + +[package.dependencies] +asgiref = ">=3.4.0" +click = ">=7.0" +h11 = ">=0.8" + +[package.extras] +standard = ["PyYAML (>=5.1)", "colorama (>=0.4)", "httptools (>=0.2.0,<0.3.0)", "python-dotenv (>=0.13)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchgod (>=0.6)", "websockets (>=9.1)"] + +[[package]] +name = "virtualenv" +version = "20.17.1" +description = "Virtual Python Environment builder" +category = "dev" +optional = false +python-versions = ">=3.6" +files = [ + {file = "virtualenv-20.17.1-py3-none-any.whl", hash = "sha256:ce3b1684d6e1a20a3e5ed36795a97dfc6af29bc3970ca8dab93e11ac6094b3c4"}, + {file = "virtualenv-20.17.1.tar.gz", hash = "sha256:f8b927684efc6f1cc206c9db297a570ab9ad0e51c16fa9e45487d36d1905c058"}, +] + +[package.dependencies] +distlib = ">=0.3.6,<1" +filelock = ">=3.4.1,<4" +platformdirs = ">=2.4,<3" + +[package.extras] +docs = ["proselint (>=0.13)", "sphinx (>=5.3)", "sphinx-argparse (>=0.3.2)", "sphinx-rtd-theme (>=1)", "towncrier (>=22.8)"] +testing = ["coverage (>=6.2)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=21.3)", "pytest (>=7.0.1)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.2)", "pytest-mock (>=3.6.1)", "pytest-randomly (>=3.10.3)", "pytest-timeout (>=2.1)"] + +[[package]] +name = "wcwidth" +version = "0.2.6" +description = "Measures the displayed width of unicode strings in a terminal" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, + {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "xattr" +version = "0.10.1" +description = "Python wrapper for extended filesystem attributes" +category = "dev" +optional = false +python-versions = "*" +files = [ + {file = "xattr-0.10.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:16a660a883e703b311d1bbbcafc74fa877585ec081cd96e8dd9302c028408ab1"}, + {file = "xattr-0.10.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:1e2973e72faa87ca29d61c23b58c3c89fe102d1b68e091848b0e21a104123503"}, + {file = "xattr-0.10.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:13279fe8f7982e3cdb0e088d5cb340ce9cbe5ef92504b1fd80a0d3591d662f68"}, + {file = "xattr-0.10.1-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:1dc9b9f580ef4b8ac5e2c04c16b4d5086a611889ac14ecb2e7e87170623a0b75"}, + {file = "xattr-0.10.1-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:485539262c2b1f5acd6b6ea56e0da2bc281a51f74335c351ea609c23d82c9a79"}, + {file = "xattr-0.10.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:295b3ab335fcd06ca0a9114439b34120968732e3f5e9d16f456d5ec4fa47a0a2"}, + {file = "xattr-0.10.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:a126eb38e14a2f273d584a692fe36cff760395bf7fc061ef059224efdb4eb62c"}, + {file = "xattr-0.10.1-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:b0e919c24f5b74428afa91507b15e7d2ef63aba98e704ad13d33bed1288dca81"}, + {file = "xattr-0.10.1-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:e31d062cfe1aaeab6ba3db6bd255f012d105271018e647645941d6609376af18"}, + {file = "xattr-0.10.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:209fb84c09b41c2e4cf16dd2f481bb4a6e2e81f659a47a60091b9bcb2e388840"}, + {file = "xattr-0.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c4120090dac33eddffc27e487f9c8f16b29ff3f3f8bcb2251b2c6c3f974ca1e1"}, + {file = "xattr-0.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3e739d624491267ec5bb740f4eada93491de429d38d2fcdfb97b25efe1288eca"}, + {file = "xattr-0.10.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2677d40b95636f3482bdaf64ed9138fb4d8376fb7933f434614744780e46e42d"}, + {file = "xattr-0.10.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40039f1532c4456fd0f4c54e9d4e01eb8201248c321c6c6856262d87e9a99593"}, + {file = "xattr-0.10.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:148466e5bb168aba98f80850cf976e931469a3c6eb11e9880d9f6f8b1e66bd06"}, + {file = "xattr-0.10.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0aedf55b116beb6427e6f7958ccd80a8cbc80e82f87a4cd975ccb61a8d27b2ee"}, + {file = "xattr-0.10.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:c3024a9ff157247c8190dd0eb54db4a64277f21361b2f756319d9d3cf20e475f"}, + {file = "xattr-0.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f1be6e733e9698f645dbb98565bb8df9b75e80e15a21eb52787d7d96800e823b"}, + {file = "xattr-0.10.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7880c8a54c18bc091a4ce0adc5c6d81da1c748aec2fe7ac586d204d6ec7eca5b"}, + {file = "xattr-0.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:89c93b42c3ba8aedbc29da759f152731196c2492a2154371c0aae3ef8ba8301b"}, + {file = "xattr-0.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6b905e808df61b677eb972f915f8a751960284358b520d0601c8cbc476ba2df6"}, + {file = "xattr-0.10.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d1ef954d0655f93a34d07d0cc7e02765ec779ff0b59dc898ee08c6326ad614d5"}, + {file = "xattr-0.10.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:199b20301b6acc9022661412346714ce764d322068ef387c4de38062474db76c"}, + {file = "xattr-0.10.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ec0956a8ab0f0d3f9011ba480f1e1271b703d11542375ef73eb8695a6bd4b78b"}, + {file = "xattr-0.10.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:ffcb57ca1be338d69edad93cf59aac7c6bb4dbb92fd7bf8d456c69ea42f7e6d2"}, + {file = "xattr-0.10.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:1f0563196ee54756fe2047627d316977dc77d11acd7a07970336e1a711e934db"}, + {file = "xattr-0.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc354f086f926a1c7f04886f97880fed1a26d20e3bc338d0d965fd161dbdb8ab"}, + {file = "xattr-0.10.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:c0cd2d02ef2fb45ecf2b0da066a58472d54682c6d4f0452dfe7ae2f3a76a42ea"}, + {file = "xattr-0.10.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:49626096ddd72dcc1654aadd84b103577d8424f26524a48d199847b5d55612d0"}, + {file = "xattr-0.10.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ceaa26bef8fcb17eb59d92a7481c2d15d20211e217772fb43c08c859b01afc6a"}, + {file = "xattr-0.10.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e8c014c371391f28f8cd27d73ea59f42b30772cd640b5a2538ad4f440fd9190b"}, + {file = "xattr-0.10.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:46c32cd605673606b9388a313b0050ee7877a0640d7561eea243ace4fa2cc5a6"}, + {file = "xattr-0.10.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:772b22c4ff791fe5816a7c2a1c9fcba83f9ab9bea138eb44d4d70f34676232b4"}, + {file = "xattr-0.10.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:183ad611a2d70b5a3f5f7aadef0fcef604ea33dcf508228765fd4ddac2c7321d"}, + {file = "xattr-0.10.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8068df3ebdfa9411e58d5ae4a05d807ec5994645bb01af66ec9f6da718b65c5b"}, + {file = "xattr-0.10.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bc40570155beb85e963ae45300a530223d9822edfdf09991b880e69625ba38a"}, + {file = "xattr-0.10.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:436e1aaf23c07e15bed63115f1712d2097e207214fc6bcde147c1efede37e2c5"}, + {file = "xattr-0.10.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7298455ccf3a922d403339781b10299b858bb5ec76435445f2da46fb768e31a5"}, + {file = "xattr-0.10.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:986c2305c6c1a08f78611eb38ef9f1f47682774ce954efb5a4f3715e8da00d5f"}, + {file = "xattr-0.10.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5dc6099e76e33fa3082a905fe59df766b196534c705cf7a2e3ad9bed2b8a180e"}, + {file = "xattr-0.10.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:042ad818cda6013162c0bfd3816f6b74b7700e73c908cde6768da824686885f8"}, + {file = "xattr-0.10.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:9d4c306828a45b41b76ca17adc26ac3dc00a80e01a5ba85d71df2a3e948828f2"}, + {file = "xattr-0.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:a606280b0c9071ef52572434ecd3648407b20df3d27af02c6592e84486b05894"}, + {file = "xattr-0.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5b49d591cf34cda2079fd7a5cb2a7a1519f54dc2e62abe3e0720036f6ed41a85"}, + {file = "xattr-0.10.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b8705ac6791426559c1a5c2b88bb2f0e83dc5616a09b4500899bfff6a929302"}, + {file = "xattr-0.10.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a5ea974930e876bc5c146f54ac0f85bb39b7b5de2b6fc63f90364712ae368ebe"}, + {file = "xattr-0.10.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f55a2dd73a12a1ae5113c5d9cd4b4ab6bf7950f4d76d0a1a0c0c4264d50da61d"}, + {file = "xattr-0.10.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:475c38da0d3614cc5564467c4efece1e38bd0705a4dbecf8deeb0564a86fb010"}, + {file = "xattr-0.10.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:925284a4a28e369459b2b7481ea22840eed3e0573a4a4c06b6b0614ecd27d0a7"}, + {file = "xattr-0.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:aa32f1b45fed9122bed911de0fcc654da349e1f04fa4a9c8ef9b53e1cc98b91e"}, + {file = "xattr-0.10.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c5d3d0e728bace64b74c475eb4da6148cd172b2d23021a1dcd055d92f17619ac"}, + {file = "xattr-0.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8faaacf311e2b5cc67c030c999167a78a9906073e6abf08eaa8cf05b0416515c"}, + {file = "xattr-0.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:cc6b8d5ca452674e1a96e246a3d2db5f477aecbc7c945c73f890f56323e75203"}, + {file = "xattr-0.10.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3725746a6502f40f72ef27e0c7bfc31052a239503ff3eefa807d6b02a249be22"}, + {file = "xattr-0.10.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:789bd406d1aad6735e97b20c6d6a1701e1c0661136be9be862e6a04564da771f"}, + {file = "xattr-0.10.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a9a7a807ab538210ff8532220d8fc5e2d51c212681f63dbd4e7ede32543b070f"}, + {file = "xattr-0.10.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3e5825b5fc99ecdd493b0cc09ec35391e7a451394fdf623a88b24726011c950d"}, + {file = "xattr-0.10.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:80638d1ce7189dc52f26c234cee3522f060fadab6a8bc3562fe0ddcbe11ba5a4"}, + {file = "xattr-0.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3ff0dbe4a6ce2ce065c6de08f415bcb270ecfd7bf1655a633ddeac695ce8b250"}, + {file = "xattr-0.10.1-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5267e5f9435c840d2674194150b511bef929fa7d3bc942a4a75b9eddef18d8d8"}, + {file = "xattr-0.10.1-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b27dfc13b193cb290d5d9e62f806bb9a99b00cd73bb6370d556116ad7bb5dc12"}, + {file = "xattr-0.10.1-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:636ebdde0277bce4d12d2ef2550885804834418fee0eb456b69be928e604ecc4"}, + {file = "xattr-0.10.1-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d60c27922ec80310b45574351f71e0dd3a139c5295e8f8b19d19c0010196544f"}, + {file = "xattr-0.10.1-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:b34df5aad035d0343bd740a95ca30db99b776e2630dca9cc1ba8e682c9cc25ea"}, + {file = "xattr-0.10.1-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f24a7c04ff666d0fe905dfee0a84bc899d624aeb6dccd1ea86b5c347f15c20c1"}, + {file = "xattr-0.10.1-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a3878e1aff8eca64badad8f6d896cb98c52984b1e9cd9668a3ab70294d1ef92d"}, + {file = "xattr-0.10.1-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4abef557028c551d59cf2fb3bf63f2a0c89f00d77e54c1c15282ecdd56943496"}, + {file = "xattr-0.10.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0e14bd5965d3db173d6983abdc1241c22219385c22df8b0eb8f1846c15ce1fee"}, + {file = "xattr-0.10.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f9be588a4b6043b03777d50654c6079af3da60cc37527dbb80d36ec98842b1e"}, + {file = "xattr-0.10.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7bc4ae264aa679aacf964abf3ea88e147eb4a22aea6af8c6d03ebdebd64cfd6"}, + {file = "xattr-0.10.1-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:827b5a97673b9997067fde383a7f7dc67342403093b94ea3c24ae0f4f1fec649"}, + {file = "xattr-0.10.1.tar.gz", hash = "sha256:c12e7d81ffaa0605b3ac8c22c2994a8e18a9cf1c59287a1b7722a2289c952ec5"}, +] + +[package.dependencies] +cffi = ">=1.0" + +[[package]] +name = "zipp" +version = "3.11.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "dev" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.11.0-py3-none-any.whl", hash = "sha256:83a28fcb75844b5c0cdaf5aa4003c2d728c77e05f5aeabe8e95e56727005fbaa"}, + {file = "zipp-3.11.0.tar.gz", hash = "sha256:a7a22e05929290a67401440b39690ae6563279bced5f314609d9d03798f56766"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "2.0" +python-versions = "~3.11" +content-hash = "bee5ebb211d4814a813a618a1325354c79d66b94bf799c8077339738d567e11f" diff --git a/presets.yaml b/presets.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f554e61cba1005bba8af9de5bda1f1740ef4dbe --- /dev/null +++ b/presets.yaml @@ -0,0 +1,10 @@ +- id: 1 + name: サンプルプリセット + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 0 + speedScale: 1 + pitchScale: 0 + intonationScale: 1 + volumeScale: 1 + prePhonemeLength: 0.1 + postPhonemeLength: 0.1 diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..923e8985029002960e2538be40e56c5eee041f26 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,82 @@ +[tool.pysen] +version = "0.10.3" + +[tool.pysen.lint] +enable_black = true +enable_flake8 = true +enable_isort = true +enable_mypy = false # TODO: eliminate errors and enable at CI +mypy_preset = "entry" # TODO: "strict" +line_length = 88 +py_version = "py310" # TODO: update to py311 after pysen supports Python 3.11 +isort_known_first_party = ["voicevox_engine"] +isort_known_third_party = ["numpy"] +[[tool.pysen.lint.mypy_targets]] + paths = [".", "voicevox_engine/"] + +[tool.black] # automatically generated by pysen +# pysen ignores and overwrites any modifications +line-length = 88 +target-version = ["py310"] # TODO: update to py311 after pysen supports Python 3.11 + + +[tool.isort] # automatically generated by pysen +# pysen ignores and overwrites any modifications +default_section = "THIRDPARTY" +ensure_newline_before_comments = true +force_grid_wrap = 0 +force_single_line = false +include_trailing_comma = true +known_first_party = ["voicevox_engine"] +known_third_party = ["numpy"] +line_length = 88 +multi_line_output = 3 +use_parentheses = true + +[tool.poetry] +name = "voicevox_engine" +version = "0.0.0" +description = "" +authors = ["Hiroshiba "] + +[tool.poetry.dependencies] +python = "~3.11" +numpy = "^1.20.0" +fastapi = "^0.70.0" +python-multipart = "^0.0.5" +uvicorn = "^0.15.0" +aiofiles = "^0.7.0" +soundfile = "^0.10.3.post1" +scipy = "^1.7.1" +pyyaml = "^6.0" +pyworld = "^0.3.0" +appdirs = "^1.4.4" +requests = "^2.28.1" +jinja2 = "^3.1.2" +pyopenjtalk = {git = "https://github.com/VOICEVOX/pyopenjtalk", rev = "827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3"} +semver = "^3.0.0" + +[tool.poetry.group.dev.dependencies] +cython = "^0.29.34,>=0.29.33" # NOTE: for Python 3.11 +pyinstaller = "^5.6,<5.7.0" # NOTE: 5.7.0 or higher will fail to build the bootloader. +pre-commit = "^2.16.0" +atomicwrites = "^1.4.0" +colorama = "^0.4.4" +poetry = "^1.3.1" + +[tool.poetry.group.test.dependencies] +pysen = "~0.10.3" +black = "^22.12.0" +flake8-bugbear = "^23.1.0" +flake8 = "^6.0.0" +isort = "^5.12.0" +mypy = "~0.991" +pytest = "^6.2.5" +coveralls = "^3.2.0" + +[tool.poetry.group.license.dependencies] +pip-licenses = "^4.2.0" + +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 0000000000000000000000000000000000000000..8dc061e50c9fbdb97e63c8d7a108219e3d905f50 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,83 @@ +aiofiles==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +altgraph==0.17.3 ; python_version >= "3.11" and python_version < "3.12" +anyio==3.6.2 ; python_version >= "3.11" and python_version < "3.12" +appdirs==1.4.4 ; python_version >= "3.11" and python_version < "3.12" +asgiref==3.6.0 ; python_version >= "3.11" and python_version < "3.12" +atomicwrites==1.4.0 ; python_version >= "3.11" and python_version < "3.12" +attrs==22.2.0 ; python_version >= "3.11" and python_version < "3.12" +cachecontrol[filecache]==0.12.11 ; python_version >= "3.11" and python_version < "3.12" +certifi==2022.12.7 ; python_version >= "3.11" and python_version < "3.12" +cffi==1.15.1 ; python_version >= "3.11" and python_version < "3.12" +cfgv==3.3.1 ; python_version >= "3.11" and python_version < "3.12" +charset-normalizer==2.1.1 ; python_version >= "3.11" and python_version < "3.12" +cleo==2.0.1 ; python_version >= "3.11" and python_version < "3.12" +click==8.0.4 ; python_version >= "3.11" and python_version < "3.12" +colorama==0.4.4 ; python_version >= "3.11" and python_version < "3.12" +crashtest==0.4.1 ; python_version >= "3.11" and python_version < "3.12" +cryptography==39.0.0 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "linux" +cython==0.29.34 ; python_version >= "3.11" and python_version < "3.12" +distlib==0.3.6 ; python_version >= "3.11" and python_version < "3.12" +dulwich==0.20.50 ; python_version >= "3.11" and python_version < "3.12" +fastapi==0.70.0 ; python_version >= "3.11" and python_version < "3.12" +filelock==3.8.2 ; python_version >= "3.11" and python_version < "3.12" +future==0.18.2 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "win32" +h11==0.14.0 ; python_version >= "3.11" and python_version < "3.12" +html5lib==1.1 ; python_version >= "3.11" and python_version < "3.12" +identify==2.5.11 ; python_version >= "3.11" and python_version < "3.12" +idna==3.4 ; python_version >= "3.11" and python_version < "3.12" +importlib-metadata==4.13.0 ; python_version >= "3.11" and python_version < "3.12" +jaraco-classes==3.2.3 ; python_version >= "3.11" and python_version < "3.12" +jeepney==0.8.0 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "linux" +jinja2==3.1.2 ; python_version >= "3.11" and python_version < "3.12" +jsonschema==4.17.3 ; python_version >= "3.11" and python_version < "3.12" +keyring==23.13.1 ; python_version >= "3.11" and python_version < "3.12" +lockfile==0.12.2 ; python_version >= "3.11" and python_version < "3.12" +macholib==1.16.2 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "darwin" +markupsafe==2.1.2 ; python_version >= "3.11" and python_version < "3.12" +more-itertools==9.0.0 ; python_version >= "3.11" and python_version < "3.12" +msgpack==1.0.4 ; python_version >= "3.11" and python_version < "3.12" +nodeenv==1.7.0 ; python_version >= "3.11" and python_version < "3.12" +numpy==1.24.2 ; python_version >= "3.11" and python_version < "3.12" +packaging==22.0 ; python_version >= "3.11" and python_version < "3.12" +pefile==2022.5.30 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "win32" +pexpect==4.8.0 ; python_version >= "3.11" and python_version < "3.12" +pkginfo==1.9.4 ; python_version >= "3.11" and python_version < "3.12" +platformdirs==2.6.2 ; python_version >= "3.11" and python_version < "3.12" +poetry-core==1.4.0 ; python_version >= "3.11" and python_version < "3.12" +poetry-plugin-export==1.2.0 ; python_version >= "3.11" and python_version < "3.12" +poetry==1.3.1 ; python_version >= "3.11" and python_version < "3.12" +pre-commit==2.16.0 ; python_version >= "3.11" and python_version < "3.12" +ptyprocess==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +pycparser==2.21 ; python_version >= "3.11" and python_version < "3.12" +pydantic==1.10.2 ; python_version >= "3.11" and python_version < "3.12" +pyinstaller-hooks-contrib==2022.14 ; python_version >= "3.11" and python_version < "3.12" +pyinstaller==5.6.2 ; python_version >= "3.11" and python_version < "3.12" +pyopenjtalk @ git+https://github.com/VOICEVOX/pyopenjtalk@827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3 ; python_version >= "3.11" and python_version < "3.12" +pyrsistent==0.19.3 ; python_version >= "3.11" and python_version < "3.12" +python-multipart==0.0.5 ; python_version >= "3.11" and python_version < "3.12" +pywin32-ctypes==0.2.0 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "win32" +pyworld==0.3.2 ; python_version >= "3.11" and python_version < "3.12" +pyyaml==6.0 ; python_version >= "3.11" and python_version < "3.12" +rapidfuzz==2.13.7 ; python_version >= "3.11" and python_version < "3.12" +requests-toolbelt==0.10.1 ; python_version >= "3.11" and python_version < "3.12" +requests==2.28.1 ; python_version >= "3.11" and python_version < "3.12" +scipy==1.10.1 ; python_version >= "3.11" and python_version < "3.12" +secretstorage==3.3.3 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "linux" +semver==3.0.0 ; python_version >= "3.11" and python_version < "3.12" +setuptools==65.6.3 ; python_version >= "3.11" and python_version < "3.12" +shellingham==1.5.0.post1 ; python_version >= "3.11" and python_version < "3.12" +six==1.16.0 ; python_version >= "3.11" and python_version < "3.12" +sniffio==1.3.0 ; python_version >= "3.11" and python_version < "3.12" +soundfile==0.10.3.post1 ; python_version >= "3.11" and python_version < "3.12" +starlette==0.16.0 ; python_version >= "3.11" and python_version < "3.12" +toml==0.10.2 ; python_version >= "3.11" and python_version < "3.12" +tomlkit==0.11.6 ; python_version >= "3.11" and python_version < "3.12" +tqdm==4.64.1 ; python_version >= "3.11" and python_version < "3.12" +trove-classifiers==2022.12.22 ; python_version >= "3.11" and python_version < "3.12" +typing-extensions==4.4.0 ; python_version >= "3.11" and python_version < "3.12" +urllib3==1.26.13 ; python_version >= "3.11" and python_version < "3.12" +uvicorn==0.15.0 ; python_version >= "3.11" and python_version < "3.12" +virtualenv==20.17.1 ; python_version >= "3.11" and python_version < "3.12" +webencodings==0.5.1 ; python_version >= "3.11" and python_version < "3.12" +xattr==0.10.1 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "darwin" +zipp==3.11.0 ; python_version >= "3.11" and python_version < "3.12" diff --git a/requirements-license.txt b/requirements-license.txt new file mode 100644 index 0000000000000000000000000000000000000000..f5086da09324460baab261fc86270a64ea57092a --- /dev/null +++ b/requirements-license.txt @@ -0,0 +1,36 @@ +aiofiles==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +anyio==3.6.2 ; python_version >= "3.11" and python_version < "3.12" +appdirs==1.4.4 ; python_version >= "3.11" and python_version < "3.12" +asgiref==3.6.0 ; python_version >= "3.11" and python_version < "3.12" +certifi==2022.12.7 ; python_version >= "3.11" and python_version < "3.12" +cffi==1.15.1 ; python_version >= "3.11" and python_version < "3.12" +charset-normalizer==2.1.1 ; python_version >= "3.11" and python_version < "3.12" +click==8.0.4 ; python_version >= "3.11" and python_version < "3.12" +colorama==0.4.4 ; python_version >= "3.11" and python_version < "3.12" and platform_system == "Windows" +cython==0.29.34 ; python_version >= "3.11" and python_version < "3.12" +fastapi==0.70.0 ; python_version >= "3.11" and python_version < "3.12" +h11==0.14.0 ; python_version >= "3.11" and python_version < "3.12" +idna==3.4 ; python_version >= "3.11" and python_version < "3.12" +jinja2==3.1.2 ; python_version >= "3.11" and python_version < "3.12" +markupsafe==2.1.2 ; python_version >= "3.11" and python_version < "3.12" +numpy==1.24.2 ; python_version >= "3.11" and python_version < "3.12" +pip-licenses==4.2.0 ; python_version >= "3.11" and python_version < "3.12" +prettytable==3.7.0 ; python_version >= "3.11" and python_version < "3.12" +pycparser==2.21 ; python_version >= "3.11" and python_version < "3.12" +pydantic==1.10.2 ; python_version >= "3.11" and python_version < "3.12" +pyopenjtalk @ git+https://github.com/VOICEVOX/pyopenjtalk@827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3 ; python_version >= "3.11" and python_version < "3.12" +python-multipart==0.0.5 ; python_version >= "3.11" and python_version < "3.12" +pyworld==0.3.2 ; python_version >= "3.11" and python_version < "3.12" +pyyaml==6.0 ; python_version >= "3.11" and python_version < "3.12" +requests==2.28.1 ; python_version >= "3.11" and python_version < "3.12" +scipy==1.10.1 ; python_version >= "3.11" and python_version < "3.12" +semver==3.0.0 ; python_version >= "3.11" and python_version < "3.12" +six==1.16.0 ; python_version >= "3.11" and python_version < "3.12" +sniffio==1.3.0 ; python_version >= "3.11" and python_version < "3.12" +soundfile==0.10.3.post1 ; python_version >= "3.11" and python_version < "3.12" +starlette==0.16.0 ; python_version >= "3.11" and python_version < "3.12" +tqdm==4.64.1 ; python_version >= "3.11" and python_version < "3.12" +typing-extensions==4.4.0 ; python_version >= "3.11" and python_version < "3.12" +urllib3==1.26.13 ; python_version >= "3.11" and python_version < "3.12" +uvicorn==0.15.0 ; python_version >= "3.11" and python_version < "3.12" +wcwidth==0.2.6 ; python_version >= "3.11" and python_version < "3.12" diff --git a/requirements-test.txt b/requirements-test.txt new file mode 100644 index 0000000000000000000000000000000000000000..703bb73f85a489cfb978c1b355be400795bdeae4 --- /dev/null +++ b/requirements-test.txt @@ -0,0 +1,63 @@ +aiofiles==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +anyio==3.6.2 ; python_version >= "3.11" and python_version < "3.12" +appdirs==1.4.4 ; python_version >= "3.11" and python_version < "3.12" +asgiref==3.6.0 ; python_version >= "3.11" and python_version < "3.12" +atomicwrites==1.4.0 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "win32" +attrs==22.2.0 ; python_version >= "3.11" and python_version < "3.12" +black==22.12.0 ; python_version >= "3.11" and python_version < "3.12" +certifi==2022.12.7 ; python_version >= "3.11" and python_version < "3.12" +cffi==1.15.1 ; python_version >= "3.11" and python_version < "3.12" +charset-normalizer==2.1.1 ; python_version >= "3.11" and python_version < "3.12" +click==8.0.4 ; python_version >= "3.11" and python_version < "3.12" +colorama==0.4.4 ; python_version >= "3.11" and python_version < "3.12" and sys_platform == "win32" or python_version >= "3.11" and python_version < "3.12" and platform_system == "Windows" +colorlog==4.8.0 ; python_version >= "3.11" and python_version < "3.12" +coverage==5.5 ; python_version >= "3.11" and python_version < "3.12" +coveralls==3.2.0 ; python_version >= "3.11" and python_version < "3.12" +cython==0.29.34 ; python_version >= "3.11" and python_version < "3.12" +dacite==1.7.0 ; python_version >= "3.11" and python_version < "3.12" +docopt==0.6.2 ; python_version >= "3.11" and python_version < "3.12" +fastapi==0.70.0 ; python_version >= "3.11" and python_version < "3.12" +flake8-bugbear==23.1.20 ; python_version >= "3.11" and python_version < "3.12" +flake8==6.0.0 ; python_version >= "3.11" and python_version < "3.12" +gitdb==4.0.10 ; python_version >= "3.11" and python_version < "3.12" +gitpython==3.1.29 ; python_version >= "3.11" and python_version < "3.12" +h11==0.14.0 ; python_version >= "3.11" and python_version < "3.12" +idna==3.4 ; python_version >= "3.11" and python_version < "3.12" +iniconfig==1.1.1 ; python_version >= "3.11" and python_version < "3.12" +isort==5.12.0 ; python_version >= "3.11" and python_version < "3.12" +jinja2==3.1.2 ; python_version >= "3.11" and python_version < "3.12" +markupsafe==2.1.2 ; python_version >= "3.11" and python_version < "3.12" +mccabe==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +mypy-extensions==0.4.3 ; python_version >= "3.11" and python_version < "3.12" +mypy==0.991 ; python_version >= "3.11" and python_version < "3.12" +numpy==1.24.2 ; python_version >= "3.11" and python_version < "3.12" +packaging==22.0 ; python_version >= "3.11" and python_version < "3.12" +pathspec==0.10.3 ; python_version >= "3.11" and python_version < "3.12" +platformdirs==2.6.2 ; python_version >= "3.11" and python_version < "3.12" +pluggy==1.0.0 ; python_version >= "3.11" and python_version < "3.12" +py==1.11.0 ; python_version >= "3.11" and python_version < "3.12" +pycodestyle==2.10.0 ; python_version >= "3.11" and python_version < "3.12" +pycparser==2.21 ; python_version >= "3.11" and python_version < "3.12" +pydantic==1.10.2 ; python_version >= "3.11" and python_version < "3.12" +pyflakes==3.0.1 ; python_version >= "3.11" and python_version < "3.12" +pyopenjtalk @ git+https://github.com/VOICEVOX/pyopenjtalk@827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3 ; python_version >= "3.11" and python_version < "3.12" +pysen==0.10.3 ; python_version >= "3.11" and python_version < "3.12" +pytest==6.2.5 ; python_version >= "3.11" and python_version < "3.12" +python-multipart==0.0.5 ; python_version >= "3.11" and python_version < "3.12" +pyworld==0.3.2 ; python_version >= "3.11" and python_version < "3.12" +pyyaml==6.0 ; python_version >= "3.11" and python_version < "3.12" +requests==2.28.1 ; python_version >= "3.11" and python_version < "3.12" +scipy==1.10.1 ; python_version >= "3.11" and python_version < "3.12" +semver==3.0.0 ; python_version >= "3.11" and python_version < "3.12" +six==1.16.0 ; python_version >= "3.11" and python_version < "3.12" +smmap==5.0.0 ; python_version >= "3.11" and python_version < "3.12" +sniffio==1.3.0 ; python_version >= "3.11" and python_version < "3.12" +soundfile==0.10.3.post1 ; python_version >= "3.11" and python_version < "3.12" +starlette==0.16.0 ; python_version >= "3.11" and python_version < "3.12" +toml==0.10.2 ; python_version >= "3.11" and python_version < "3.12" +tomlkit==0.11.6 ; python_version >= "3.11" and python_version < "3.12" +tqdm==4.64.1 ; python_version >= "3.11" and python_version < "3.12" +typing-extensions==4.4.0 ; python_version >= "3.11" and python_version < "3.12" +unidiff==0.7.4 ; python_version >= "3.11" and python_version < "3.12" +urllib3==1.26.13 ; python_version >= "3.11" and python_version < "3.12" +uvicorn==0.15.0 ; python_version >= "3.11" and python_version < "3.12" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..ea8f4ca2584773afde1f131f052cbdca8c7abb81 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,33 @@ +aiofiles==0.7.0 ; python_version >= "3.11" and python_version < "3.12" +anyio==3.6.2 ; python_version >= "3.11" and python_version < "3.12" +appdirs==1.4.4 ; python_version >= "3.11" and python_version < "3.12" +asgiref==3.6.0 ; python_version >= "3.11" and python_version < "3.12" +certifi==2022.12.7 ; python_version >= "3.11" and python_version < "3.12" +cffi==1.15.1 ; python_version >= "3.11" and python_version < "3.12" +charset-normalizer==2.1.1 ; python_version >= "3.11" and python_version < "3.12" +click==8.0.4 ; python_version >= "3.11" and python_version < "3.12" +colorama==0.4.4 ; python_version >= "3.11" and python_version < "3.12" and platform_system == "Windows" +cython==0.29.34 ; python_version >= "3.11" and python_version < "3.12" +fastapi==0.70.0 ; python_version >= "3.11" and python_version < "3.12" +h11==0.14.0 ; python_version >= "3.11" and python_version < "3.12" +idna==3.4 ; python_version >= "3.11" and python_version < "3.12" +jinja2==3.1.2 ; python_version >= "3.11" and python_version < "3.12" +markupsafe==2.1.2 ; python_version >= "3.11" and python_version < "3.12" +numpy==1.24.2 ; python_version >= "3.11" and python_version < "3.12" +pycparser==2.21 ; python_version >= "3.11" and python_version < "3.12" +pydantic==1.10.2 ; python_version >= "3.11" and python_version < "3.12" +pyopenjtalk @ git+https://github.com/VOICEVOX/pyopenjtalk@827a3fc5c7dda7bbe832c0c69da98e39cc8cb2c3 ; python_version >= "3.11" and python_version < "3.12" +python-multipart==0.0.5 ; python_version >= "3.11" and python_version < "3.12" +pyworld==0.3.2 ; python_version >= "3.11" and python_version < "3.12" +pyyaml==6.0 ; python_version >= "3.11" and python_version < "3.12" +requests==2.28.1 ; python_version >= "3.11" and python_version < "3.12" +scipy==1.10.1 ; python_version >= "3.11" and python_version < "3.12" +semver==3.0.0 ; python_version >= "3.11" and python_version < "3.12" +six==1.16.0 ; python_version >= "3.11" and python_version < "3.12" +sniffio==1.3.0 ; python_version >= "3.11" and python_version < "3.12" +soundfile==0.10.3.post1 ; python_version >= "3.11" and python_version < "3.12" +starlette==0.16.0 ; python_version >= "3.11" and python_version < "3.12" +tqdm==4.64.1 ; python_version >= "3.11" and python_version < "3.12" +typing-extensions==4.4.0 ; python_version >= "3.11" and python_version < "3.12" +urllib3==1.26.13 ; python_version >= "3.11" and python_version < "3.12" +uvicorn==0.15.0 ; python_version >= "3.11" and python_version < "3.12" diff --git a/run.py b/run.py new file mode 100644 index 0000000000000000000000000000000000000000..f989d926683a347fcf6795dc879c3b2961fc7503 --- /dev/null +++ b/run.py @@ -0,0 +1,1243 @@ +import argparse +import asyncio +import base64 +import json +import multiprocessing +import os +import re +import sys +import traceback +import zipfile +from functools import lru_cache +from io import BytesIO, TextIOWrapper +from pathlib import Path +from tempfile import NamedTemporaryFile, TemporaryFile +from typing import Dict, List, Optional + +import soundfile +import uvicorn +from fastapi import FastAPI, Form, HTTPException, Query, Request, Response +from fastapi.middleware.cors import CORSMiddleware +from fastapi.responses import HTMLResponse, JSONResponse +from fastapi.templating import Jinja2Templates +from pydantic import ValidationError, conint +from starlette.background import BackgroundTask +from starlette.responses import FileResponse + +from voicevox_engine import __version__ +from voicevox_engine.cancellable_engine import CancellableEngine +from voicevox_engine.downloadable_library import LibraryManager +from voicevox_engine.engine_manifest import EngineManifestLoader +from voicevox_engine.engine_manifest.EngineManifest import EngineManifest +from voicevox_engine.kana_parser import create_kana, parse_kana +from voicevox_engine.metas.MetasStore import MetasStore, construct_lookup +from voicevox_engine.model import ( + AccentPhrase, + AudioQuery, + DownloadableLibrary, + MorphableTargetInfo, + ParseKanaBadRequest, + ParseKanaError, + Speaker, + SpeakerInfo, + SpeakerNotFoundError, + SupportedDevicesInfo, + UserDictWord, + WordTypes, +) +from voicevox_engine.morphing import ( + get_morphable_targets, + is_synthesis_morphing_permitted, + synthesis_morphing, +) +from voicevox_engine.morphing import ( + synthesis_morphing_parameter as _synthesis_morphing_parameter, +) +from voicevox_engine.part_of_speech_data import MAX_PRIORITY, MIN_PRIORITY +from voicevox_engine.preset import Preset, PresetError, PresetManager +from voicevox_engine.setting import ( + USER_SETTING_PATH, + CorsPolicyMode, + Setting, + SettingLoader, +) +from voicevox_engine.synthesis_engine import SynthesisEngineBase, make_synthesis_engines +from voicevox_engine.user_dict import ( + apply_word, + delete_word, + import_user_dict, + read_dict, + rewrite_word, + update_dict, +) +from voicevox_engine.utility import ( + ConnectBase64WavesException, + connect_base64_waves, + delete_file, + engine_root, + get_latest_core_version, + get_save_dir, +) + + +def b64encode_str(s): + return base64.b64encode(s).decode("utf-8") + + +def set_output_log_utf8() -> None: + """ + stdout/stderrのエンコーディングをUTF-8に切り替える関数 + """ + # コンソールがない環境だとNone https://docs.python.org/ja/3/library/sys.html#sys.__stdin__ + if sys.stdout is not None: + # 必ずしもreconfigure()が実装されているとは限らない + try: + sys.stdout.reconfigure(encoding="utf-8") + except AttributeError: + # バッファを全て出力する + sys.stdout.flush() + sys.stdout = TextIOWrapper( + sys.stdout.buffer, encoding="utf-8", errors="backslashreplace" + ) + if sys.stderr is not None: + try: + sys.stderr.reconfigure(encoding="utf-8") + except AttributeError: + sys.stderr.flush() + sys.stderr = TextIOWrapper( + sys.stderr.buffer, encoding="utf-8", errors="backslashreplace" + ) + + +def generate_app( + synthesis_engines: Dict[str, SynthesisEngineBase], + latest_core_version: str, + setting_loader: SettingLoader, + root_dir: Optional[Path] = None, + cors_policy_mode: CorsPolicyMode = CorsPolicyMode.localapps, + allow_origin: Optional[List[str]] = None, +) -> FastAPI: + if root_dir is None: + root_dir = engine_root() + + default_sampling_rate = synthesis_engines[latest_core_version].default_sampling_rate + + app = FastAPI( + title="VOICEVOX Engine", + description="VOICEVOXの音声合成エンジンです。", + version=__version__, + ) + + # CORS用のヘッダを生成するミドルウェア + localhost_regex = "^https?://(localhost|127\\.0\\.0\\.1)(:[0-9]+)?$" + compiled_localhost_regex = re.compile(localhost_regex) + allowed_origins = ["*"] + if cors_policy_mode == "localapps": + allowed_origins = ["app://."] + if allow_origin is not None: + allowed_origins += allow_origin + if "*" in allow_origin: + print( + 'WARNING: Deprecated use of argument "*" in allow_origin. ' + 'Use option "--cors_policy_mod all" instead. See "--help" for more.', + file=sys.stderr, + ) + + app.add_middleware( + CORSMiddleware, + allow_origins=allowed_origins, + allow_credentials=True, + allow_origin_regex=localhost_regex, + allow_methods=["*"], + allow_headers=["*"], + ) + + # 許可されていないOriginを遮断するミドルウェア + @app.middleware("http") + async def block_origin_middleware(request: Request, call_next): + isValidOrigin: bool = False + if "Origin" not in request.headers: # Originのない純粋なリクエストの場合 + isValidOrigin = True + elif "*" in allowed_origins: # すべてを許可する設定の場合 + isValidOrigin = True + elif request.headers["Origin"] in allowed_origins: # Originが許可されている場合 + isValidOrigin = True + elif compiled_localhost_regex.fullmatch( + request.headers["Origin"] + ): # localhostの場合 + isValidOrigin = True + + if isValidOrigin: + return await call_next(request) + else: + return JSONResponse( + status_code=403, content={"detail": "Origin not allowed"} + ) + + preset_manager = PresetManager( + preset_path=root_dir / "presets.yaml", + ) + engine_manifest_loader = EngineManifestLoader( + root_dir / "engine_manifest.json", root_dir + ) + library_manager = LibraryManager(get_save_dir() / "installed_libraries") + + metas_store = MetasStore(root_dir / "speaker_info") + + setting_ui_template = Jinja2Templates(directory=engine_root() / "ui_template") + + # キャッシュを有効化 + # モジュール側でlru_cacheを指定するとキャッシュを制御しにくいため、HTTPサーバ側で指定する + # TODO: キャッシュを管理するモジュール側API・HTTP側APIを用意する + synthesis_morphing_parameter = lru_cache(maxsize=4)(_synthesis_morphing_parameter) + + # @app.on_event("startup") + # async def start_catch_disconnection(): + # if args.enable_cancellable_synthesis: + # loop = asyncio.get_event_loop() + # _ = loop.create_task(cancellable_engine.catch_disconnection()) + + @app.on_event("startup") + def apply_user_dict(): + update_dict() + + def get_engine(core_version: Optional[str]) -> SynthesisEngineBase: + if core_version is None: + return synthesis_engines[latest_core_version] + if core_version in synthesis_engines: + return synthesis_engines[core_version] + raise HTTPException(status_code=422, detail="不明なバージョンです") + + @app.post( + "/audio_query", + response_model=AudioQuery, + tags=["クエリ作成"], + summary="音声合成用のクエリを作成する", + ) + def audio_query(text: str, speaker: int, core_version: Optional[str] = None): + """ + クエリの初期値を得ます。ここで得られたクエリはそのまま音声合成に利用できます。各値の意味は`Schemas`を参照してください。 + """ + engine = get_engine(core_version) + accent_phrases = engine.create_accent_phrases(text, speaker_id=speaker) + return AudioQuery( + accent_phrases=accent_phrases, + speedScale=1, + pitchScale=0, + intonationScale=1, + volumeScale=1, + prePhonemeLength=0.1, + postPhonemeLength=0.1, + outputSamplingRate=default_sampling_rate, + outputStereo=False, + kana=create_kana(accent_phrases), + ) + + @app.post( + "/audio_query_from_preset", + response_model=AudioQuery, + tags=["クエリ作成"], + summary="音声合成用のクエリをプリセットを用いて作成する", + ) + def audio_query_from_preset( + text: str, preset_id: int, core_version: Optional[str] = None + ): + """ + クエリの初期値を得ます。ここで得られたクエリはそのまま音声合成に利用できます。各値の意味は`Schemas`を参照してください。 + """ + engine = get_engine(core_version) + try: + presets = preset_manager.load_presets() + except PresetError as err: + raise HTTPException(status_code=422, detail=str(err)) + for preset in presets: + if preset.id == preset_id: + selected_preset = preset + break + else: + raise HTTPException(status_code=422, detail="該当するプリセットIDが見つかりません") + + accent_phrases = engine.create_accent_phrases( + text, speaker_id=selected_preset.style_id + ) + return AudioQuery( + accent_phrases=accent_phrases, + speedScale=selected_preset.speedScale, + pitchScale=selected_preset.pitchScale, + intonationScale=selected_preset.intonationScale, + volumeScale=selected_preset.volumeScale, + prePhonemeLength=selected_preset.prePhonemeLength, + postPhonemeLength=selected_preset.postPhonemeLength, + outputSamplingRate=default_sampling_rate, + outputStereo=False, + kana=create_kana(accent_phrases), + ) + + @app.post( + "/accent_phrases", + response_model=List[AccentPhrase], + tags=["クエリ編集"], + summary="テキストからアクセント句を得る", + responses={ + 400: { + "description": "読み仮名のパースに失敗", + "model": ParseKanaBadRequest, + } + }, + ) + def accent_phrases( + text: str, + speaker: int, + is_kana: bool = False, + core_version: Optional[str] = None, + ): + """ + テキストからアクセント句を得ます。 + is_kanaが`true`のとき、テキストは次のようなAquesTalkライクな記法に従う読み仮名として処理されます。デフォルトは`false`です。 + * 全てのカナはカタカナで記述される + * アクセント句は`/`または`、`で区切る。`、`で区切った場合に限り無音区間が挿入される。 + * カナの手前に`_`を入れるとそのカナは無声化される + * アクセント位置を`'`で指定する。全てのアクセント句にはアクセント位置を1つ指定する必要がある。 + * アクセント句末に`?`(全角)を入れることにより疑問文の発音ができる。 + """ + engine = get_engine(core_version) + if is_kana: + try: + accent_phrases = parse_kana(text) + except ParseKanaError as err: + raise HTTPException( + status_code=400, + detail=ParseKanaBadRequest(err).dict(), + ) + accent_phrases = engine.replace_mora_data( + accent_phrases=accent_phrases, speaker_id=speaker + ) + + return accent_phrases + else: + return engine.create_accent_phrases(text, speaker_id=speaker) + + @app.post( + "/mora_data", + response_model=List[AccentPhrase], + tags=["クエリ編集"], + summary="アクセント句から音高・音素長を得る", + ) + def mora_data( + accent_phrases: List[AccentPhrase], + speaker: int, + core_version: Optional[str] = None, + ): + engine = get_engine(core_version) + return engine.replace_mora_data(accent_phrases, speaker_id=speaker) + + @app.post( + "/mora_length", + response_model=List[AccentPhrase], + tags=["クエリ編集"], + summary="アクセント句から音素長を得る", + ) + def mora_length( + accent_phrases: List[AccentPhrase], + speaker: int, + core_version: Optional[str] = None, + ): + engine = get_engine(core_version) + return engine.replace_phoneme_length( + accent_phrases=accent_phrases, speaker_id=speaker + ) + + @app.post( + "/mora_pitch", + response_model=List[AccentPhrase], + tags=["クエリ編集"], + summary="アクセント句から音高を得る", + ) + def mora_pitch( + accent_phrases: List[AccentPhrase], + speaker: int, + core_version: Optional[str] = None, + ): + engine = get_engine(core_version) + return engine.replace_mora_pitch( + accent_phrases=accent_phrases, speaker_id=speaker + ) + + @app.post( + "/synthesis", + response_class=FileResponse, + responses={ + 200: { + "content": { + "audio/wav": {"schema": {"type": "string", "format": "binary"}} + }, + } + }, + tags=["音声合成"], + summary="音声合成する", + ) + def synthesis( + query: AudioQuery, + speaker: int, + enable_interrogative_upspeak: bool = Query( # noqa: B008 + default=True, + description="疑問系のテキストが与えられたら語尾を自動調整する", + ), + core_version: Optional[str] = None, + ): + engine = get_engine(core_version) + wave = engine.synthesis( + query=query, + speaker_id=speaker, + enable_interrogative_upspeak=enable_interrogative_upspeak, + ) + + with NamedTemporaryFile(delete=False) as f: + soundfile.write( + file=f, data=wave, samplerate=query.outputSamplingRate, format="WAV" + ) + + return FileResponse( + f.name, + media_type="audio/wav", + background=BackgroundTask(delete_file, f.name), + ) + + @app.post( + "/cancellable_synthesis", + response_class=FileResponse, + responses={ + 200: { + "content": { + "audio/wav": {"schema": {"type": "string", "format": "binary"}} + }, + } + }, + tags=["音声合成"], + summary="音声合成する(キャンセル可能)", + ) + def cancellable_synthesis( + query: AudioQuery, + speaker: int, + request: Request, + core_version: Optional[str] = None, + ): + if not args.enable_cancellable_synthesis: + raise HTTPException( + status_code=404, + detail="実験的機能はデフォルトで無効になっています。使用するには引数を指定してください。", + ) + f_name = cancellable_engine._synthesis_impl( + query=query, + speaker_id=speaker, + request=request, + core_version=core_version, + ) + if f_name == "": + raise HTTPException(status_code=422, detail="不明なバージョンです") + + return FileResponse( + f_name, + media_type="audio/wav", + background=BackgroundTask(delete_file, f_name), + ) + + @app.post( + "/multi_synthesis", + response_class=FileResponse, + responses={ + 200: { + "content": { + "application/zip": { + "schema": {"type": "string", "format": "binary"} + } + }, + } + }, + tags=["音声合成"], + summary="複数まとめて音声合成する", + ) + def multi_synthesis( + queries: List[AudioQuery], + speaker: int, + core_version: Optional[str] = None, + ): + engine = get_engine(core_version) + sampling_rate = queries[0].outputSamplingRate + + with NamedTemporaryFile(delete=False) as f: + + with zipfile.ZipFile(f, mode="a") as zip_file: + + for i in range(len(queries)): + + if queries[i].outputSamplingRate != sampling_rate: + raise HTTPException( + status_code=422, detail="サンプリングレートが異なるクエリがあります" + ) + + with TemporaryFile() as wav_file: + + wave = engine.synthesis(query=queries[i], speaker_id=speaker) + soundfile.write( + file=wav_file, + data=wave, + samplerate=sampling_rate, + format="WAV", + ) + wav_file.seek(0) + zip_file.writestr(f"{str(i + 1).zfill(3)}.wav", wav_file.read()) + + return FileResponse( + f.name, + media_type="application/zip", + background=BackgroundTask(delete_file, f.name), + ) + + @app.post( + "/morphable_targets", + response_model=List[Dict[str, MorphableTargetInfo]], + tags=["音声合成"], + summary="指定した話者に対してエンジン内の話者がモーフィングが可能か判定する", + ) + def morphable_targets( + base_speakers: List[int], + core_version: Optional[str] = None, + ): + """ + 指定されたベース話者に対してエンジン内の各話者がモーフィング機能を利用可能か返します。 + モーフィングの許可/禁止は`/speakers`の`speaker.supported_features.synthesis_morphing`に記載されています。 + プロパティが存在しない場合は、モーフィングが許可されているとみなします。 + 返り値の話者はstring型なので注意。 + """ + engine = get_engine(core_version) + + try: + speakers = metas_store.load_combined_metas(engine=engine) + morphable_targets = get_morphable_targets( + speakers=speakers, base_speakers=base_speakers + ) + # jsonはint型のキーを持てないので、string型に変換する + return [ + {str(k): v for k, v in morphable_target.items()} + for morphable_target in morphable_targets + ] + except SpeakerNotFoundError as e: + raise HTTPException( + status_code=404, detail=f"該当する話者(speaker={e.speaker})が見つかりません" + ) + + @app.post( + "/synthesis_morphing", + response_class=FileResponse, + responses={ + 200: { + "content": { + "audio/wav": {"schema": {"type": "string", "format": "binary"}} + }, + } + }, + tags=["音声合成"], + summary="2人の話者でモーフィングした音声を合成する", + ) + def _synthesis_morphing( + query: AudioQuery, + base_speaker: int, + target_speaker: int, + morph_rate: float = Query(..., ge=0.0, le=1.0), # noqa: B008 + core_version: Optional[str] = None, + ): + """ + 指定された2人の話者で音声を合成、指定した割合でモーフィングした音声を得ます。 + モーフィングの割合は`morph_rate`で指定でき、0.0でベースの話者、1.0でターゲットの話者に近づきます。 + """ + engine = get_engine(core_version) + + try: + speakers = metas_store.load_combined_metas(engine=engine) + speaker_lookup = construct_lookup(speakers=speakers) + is_permitted = is_synthesis_morphing_permitted( + speaker_lookup, base_speaker, target_speaker + ) + if not is_permitted: + raise HTTPException( + status_code=400, + detail="指定された話者ペアでのモーフィングはできません", + ) + except SpeakerNotFoundError as e: + raise HTTPException( + status_code=404, detail=f"該当する話者(speaker={e.speaker})が見つかりません" + ) + + # 生成したパラメータはキャッシュされる + morph_param = synthesis_morphing_parameter( + engine=engine, + query=query, + base_speaker=base_speaker, + target_speaker=target_speaker, + ) + + morph_wave = synthesis_morphing( + morph_param=morph_param, + morph_rate=morph_rate, + output_fs=query.outputSamplingRate, + output_stereo=query.outputStereo, + ) + + with NamedTemporaryFile(delete=False) as f: + soundfile.write( + file=f, + data=morph_wave, + samplerate=query.outputSamplingRate, + format="WAV", + ) + + return FileResponse( + f.name, + media_type="audio/wav", + background=BackgroundTask(delete_file, f.name), + ) + + @app.post( + "/connect_waves", + response_class=FileResponse, + responses={ + 200: { + "content": { + "audio/wav": {"schema": {"type": "string", "format": "binary"}} + }, + } + }, + tags=["その他"], + summary="base64エンコードされた複数のwavデータを一つに結合する", + ) + def connect_waves(waves: List[str]): + """ + base64エンコードされたwavデータを一纏めにし、wavファイルで返します。 + """ + try: + waves_nparray, sampling_rate = connect_base64_waves(waves) + except ConnectBase64WavesException as err: + return HTTPException(status_code=422, detail=str(err)) + + with NamedTemporaryFile(delete=False) as f: + soundfile.write( + file=f, + data=waves_nparray, + samplerate=sampling_rate, + format="WAV", + ) + + return FileResponse( + f.name, + media_type="audio/wav", + background=BackgroundTask(delete_file, f.name), + ) + + @app.get("/presets", response_model=List[Preset], tags=["その他"]) + def get_presets(): + """ + エンジンが保持しているプリセットの設定を返します + + Returns + ------- + presets: List[Preset] + プリセットのリスト + """ + try: + presets = preset_manager.load_presets() + except PresetError as err: + raise HTTPException(status_code=422, detail=str(err)) + return presets + + @app.post("/add_preset", response_model=int, tags=["その他"]) + def add_preset(preset: Preset): + """ + 新しいプリセットを追加します + + Parameters + ------- + preset: Preset + 新しいプリセット。 + プリセットIDが既存のものと重複している場合は、新規のプリセットIDが採番されます。 + + Returns + ------- + id: int + 追加したプリセットのプリセットID + """ + try: + id = preset_manager.add_preset(preset) + except PresetError as err: + raise HTTPException(status_code=422, detail=str(err)) + return id + + @app.post("/update_preset", response_model=int, tags=["その他"]) + def update_preset(preset: Preset): + """ + 既存のプリセットを更新します + + Parameters + ------- + preset: Preset + 更新するプリセット。 + プリセットIDが更新対象と一致している必要があります。 + + Returns + ------- + id: int + 更新したプリセットのプリセットID + """ + try: + id = preset_manager.update_preset(preset) + except PresetError as err: + raise HTTPException(status_code=422, detail=str(err)) + return id + + @app.post("/delete_preset", status_code=204, tags=["その他"]) + def delete_preset(id: int): + """ + 既存のプリセットを削除します + + Parameters + ------- + id: int + 削除するプリセットのプリセットID + + """ + try: + preset_manager.delete_preset(id) + except PresetError as err: + raise HTTPException(status_code=422, detail=str(err)) + return Response(status_code=204) + + @app.get("/version", tags=["その他"]) + def version() -> str: + return __version__ + + @app.get("/core_versions", response_model=List[str], tags=["その他"]) + def core_versions() -> List[str]: + return Response( + content=json.dumps(list(synthesis_engines.keys())), + media_type="application/json", + ) + + @app.get("/speakers", response_model=List[Speaker], tags=["その他"]) + def speakers( + core_version: Optional[str] = None, + ): + engine = get_engine(core_version) + return metas_store.load_combined_metas(engine=engine) + + @app.get("/speaker_info", response_model=SpeakerInfo, tags=["その他"]) + def speaker_info(speaker_uuid: str, core_version: Optional[str] = None): + """ + 指定されたspeaker_uuidに関する情報をjson形式で返します。 + 画像や音声はbase64エンコードされたものが返されます。 + + Returns + ------- + ret_data: SpeakerInfo + """ + speakers = json.loads(get_engine(core_version).speakers) + for i in range(len(speakers)): + if speakers[i]["speaker_uuid"] == speaker_uuid: + speaker = speakers[i] + break + else: + raise HTTPException(status_code=404, detail="該当する話者が見つかりません") + + try: + policy = (root_dir / f"speaker_info/{speaker_uuid}/policy.md").read_text( + "utf-8" + ) + portrait = b64encode_str( + (root_dir / f"speaker_info/{speaker_uuid}/portrait.png").read_bytes() + ) + style_infos = [] + for style in speaker["styles"]: + id = style["id"] + icon = b64encode_str( + ( + root_dir / f"speaker_info/{speaker_uuid}/icons/{id}.png" + ).read_bytes() + ) + style_portrait_path = ( + root_dir / f"speaker_info/{speaker_uuid}/portraits/{id}.png" + ) + style_portrait = ( + b64encode_str(style_portrait_path.read_bytes()) + if style_portrait_path.exists() + else None + ) + voice_samples = [ + b64encode_str( + ( + root_dir + / "speaker_info/{}/voice_samples/{}_{}.wav".format( + speaker_uuid, id, str(j + 1).zfill(3) + ) + ).read_bytes() + ) + for j in range(3) + ] + style_infos.append( + { + "id": id, + "icon": icon, + "portrait": style_portrait, + "voice_samples": voice_samples, + } + ) + except FileNotFoundError: + import traceback + + traceback.print_exc() + raise HTTPException(status_code=500, detail="追加情報が見つかりませんでした") + + ret_data = {"policy": policy, "portrait": portrait, "style_infos": style_infos} + return ret_data + + @app.get( + "/downloadable_libraries", + response_model=List[DownloadableLibrary], + tags=["音声ライブラリ管理"], + ) + def downloadable_libraries(): + """ + ダウンロード可能な音声ライブラリの情報を返します。 + + Returns + ------- + ret_data: List[DownloadableLibrary] + """ + manifest = engine_manifest_loader.load_manifest() + if not manifest.supported_features.manage_library: + raise HTTPException(status_code=404, detail="この機能は実装されていません") + return library_manager.downloadable_libraries() + + @app.get( + "/installed_libraries", + response_model=List[DownloadableLibrary], + tags=["音声ライブラリ管理"], + ) + def installed_libraries(): + """ + インストールした音声ライブラリの情報を返します。 + + Returns + ------- + ret_data: List[DownloadableLibrary] + """ + manifest = engine_manifest_loader.load_manifest() + if not manifest.supported_features.manage_library: + raise HTTPException(status_code=404, detail="この機能は実装されていません") + return library_manager.installed_libraries() + + @app.post( + "/install_library/{library_uuid}", + status_code=204, + tags=["音声ライブラリ管理"], + ) + async def install_library(library_uuid: str, request: Request): + """ + 音声ライブラリをインストールします。 + 音声ライブラリのZIPファイルをリクエストボディとして送信してください。 + + Parameters + ---------- + library_uuid: str + 音声ライブラリのID + """ + manifest = engine_manifest_loader.load_manifest() + if not manifest.supported_features.manage_library: + raise HTTPException(status_code=404, detail="この機能は実装されていません") + archive = BytesIO(await request.body()) + loop = asyncio.get_event_loop() + await loop.run_in_executor( + None, library_manager.install_library, library_uuid, archive + ) + return Response(status_code=204) + + @app.post("/initialize_speaker", status_code=204, tags=["その他"]) + def initialize_speaker( + speaker: int, + skip_reinit: bool = Query( # noqa: B008 + False, description="既に初期化済みの話者の再初期化をスキップするかどうか" + ), + core_version: Optional[str] = None, + ): + """ + 指定されたspeaker_idの話者を初期化します。 + 実行しなくても他のAPIは使用できますが、初回実行時に時間がかかることがあります。 + """ + engine = get_engine(core_version) + engine.initialize_speaker_synthesis(speaker_id=speaker, skip_reinit=skip_reinit) + return Response(status_code=204) + + @app.get("/is_initialized_speaker", response_model=bool, tags=["その他"]) + def is_initialized_speaker(speaker: int, core_version: Optional[str] = None): + """ + 指定されたspeaker_idの話者が初期化されているかどうかを返します。 + """ + engine = get_engine(core_version) + return engine.is_initialized_speaker_synthesis(speaker) + + @app.get("/user_dict", response_model=Dict[str, UserDictWord], tags=["ユーザー辞書"]) + def get_user_dict_words(): + """ + ユーザー辞書に登録されている単語の一覧を返します。 + 単語の表層形(surface)は正規化済みの物を返します。 + + Returns + ------- + Dict[str, UserDictWord] + 単語のUUIDとその詳細 + """ + try: + return read_dict() + except Exception: + traceback.print_exc() + raise HTTPException(status_code=422, detail="辞書の読み込みに失敗しました。") + + @app.post("/user_dict_word", response_model=str, tags=["ユーザー辞書"]) + def add_user_dict_word( + surface: str, + pronunciation: str, + accent_type: int, + word_type: Optional[WordTypes] = None, + priority: Optional[conint(ge=MIN_PRIORITY, le=MAX_PRIORITY)] = None, + ): + """ + ユーザー辞書に言葉を追加します。 + + Parameters + ---------- + surface : str + 言葉の表層形 + pronunciation: str + 言葉の発音(カタカナ) + accent_type: int + アクセント型(音が下がる場所を指す) + word_type: WordTypes, optional + PROPER_NOUN(固有名詞)、COMMON_NOUN(普通名詞)、VERB(動詞)、ADJECTIVE(形容詞)、SUFFIX(語尾)のいずれか + priority: int, optional + 単語の優先度(0から10までの整数) + 数字が大きいほど優先度が高くなる + 1から9までの値を指定することを推奨 + """ + try: + word_uuid = apply_word( + surface=surface, + pronunciation=pronunciation, + accent_type=accent_type, + word_type=word_type, + priority=priority, + ) + return Response(content=word_uuid) + except ValidationError as e: + raise HTTPException(status_code=422, detail="パラメータに誤りがあります。\n" + str(e)) + except Exception: + traceback.print_exc() + raise HTTPException(status_code=422, detail="ユーザー辞書への追加に失敗しました。") + + @app.put("/user_dict_word/{word_uuid}", status_code=204, tags=["ユーザー辞書"]) + def rewrite_user_dict_word( + surface: str, + pronunciation: str, + accent_type: int, + word_uuid: str, + word_type: Optional[WordTypes] = None, + priority: Optional[conint(ge=MIN_PRIORITY, le=MAX_PRIORITY)] = None, + ): + """ + ユーザー辞書に登録されている言葉を更新します。 + + Parameters + ---------- + surface : str + 言葉の表層形 + pronunciation: str + 言葉の発音(カタカナ) + accent_type: int + アクセント型(音が下がる場所を指す) + word_uuid: str + 更新する言葉のUUID + word_type: WordTypes, optional + PROPER_NOUN(固有名詞)、COMMON_NOUN(普通名詞)、VERB(動詞)、ADJECTIVE(形容詞)、SUFFIX(語尾)のいずれか + priority: int, optional + 単語の優先度(0から10までの整数) + 数字が大きいほど優先度が高くなる + 1から9までの値を指定することを推奨 + """ + try: + rewrite_word( + surface=surface, + pronunciation=pronunciation, + accent_type=accent_type, + word_uuid=word_uuid, + word_type=word_type, + priority=priority, + ) + return Response(status_code=204) + except HTTPException: + raise + except ValidationError as e: + raise HTTPException(status_code=422, detail="パラメータに誤りがあります。\n" + str(e)) + except Exception: + traceback.print_exc() + raise HTTPException(status_code=422, detail="ユーザー辞書の更新に失敗しました。") + + @app.delete("/user_dict_word/{word_uuid}", status_code=204, tags=["ユーザー辞書"]) + def delete_user_dict_word(word_uuid: str): + """ + ユーザー辞書に登録されている言葉を削除します。 + + Parameters + ---------- + word_uuid: str + 削除する言葉のUUID + """ + try: + delete_word(word_uuid=word_uuid) + return Response(status_code=204) + except HTTPException: + raise + except Exception: + traceback.print_exc() + raise HTTPException(status_code=422, detail="ユーザー辞書の更新に失敗しました。") + + @app.post("/import_user_dict", status_code=204, tags=["ユーザー辞書"]) + def import_user_dict_words( + import_dict_data: Dict[str, UserDictWord], override: bool + ): + """ + 他のユーザー辞書をインポートします。 + + Parameters + ---------- + import_dict_data: Dict[str, UserDictWord] + インポートするユーザー辞書のデータ + override: bool + 重複したエントリがあった場合、上書きするかどうか + """ + try: + import_user_dict(dict_data=import_dict_data, override=override) + return Response(status_code=204) + except Exception: + traceback.print_exc() + raise HTTPException(status_code=422, detail="ユーザー辞書のインポートに失敗しました。") + + @app.get("/supported_devices", response_model=SupportedDevicesInfo, tags=["その他"]) + def supported_devices( + core_version: Optional[str] = None, + ): + supported_devices = get_engine(core_version).supported_devices + if supported_devices is None: + raise HTTPException(status_code=422, detail="非対応の機能です。") + return Response( + content=supported_devices, + media_type="application/json", + ) + + @app.get("/engine_manifest", response_model=EngineManifest, tags=["その他"]) + def engine_manifest(): + return engine_manifest_loader.load_manifest() + + @app.get("/setting", response_class=HTMLResponse, tags=["設定"]) + def setting_get(request: Request): + settings = setting_loader.load_setting_file() + + cors_policy_mode = settings.cors_policy_mode + allow_origin = settings.allow_origin + + if allow_origin is None: + allow_origin = "" + + return setting_ui_template.TemplateResponse( + "ui.html", + { + "request": request, + "cors_policy_mode": cors_policy_mode, + "allow_origin": allow_origin, + }, + ) + + @app.post("/setting", response_class=HTMLResponse, tags=["設定"]) + def setting_post( + request: Request, + cors_policy_mode: Optional[str] = Form(None), # noqa: B008 + allow_origin: Optional[str] = Form(None), # noqa: B008 + ): + settings = Setting( + cors_policy_mode=cors_policy_mode, + allow_origin=allow_origin, + ) + + # 更新した設定へ上書き + setting_loader.dump_setting_file(settings) + + if allow_origin is None: + allow_origin = "" + + return setting_ui_template.TemplateResponse( + "ui.html", + { + "request": request, + "cors_policy_mode": cors_policy_mode, + "allow_origin": allow_origin, + }, + ) + + return app + + +if __name__ == "__main__": + multiprocessing.freeze_support() + + output_log_utf8 = os.getenv("VV_OUTPUT_LOG_UTF8", default="") + if output_log_utf8 == "1": + set_output_log_utf8() + elif not (output_log_utf8 == "" or output_log_utf8 == "0"): + print( + "WARNING: invalid VV_OUTPUT_LOG_UTF8 environment variable value", + file=sys.stderr, + ) + + default_cors_policy_mode = CorsPolicyMode.localapps + + parser = argparse.ArgumentParser(description="VOICEVOX のエンジンです。") + parser.add_argument( + "--host", type=str, default="127.0.0.1", help="接続を受け付けるホストアドレスです。" + ) + parser.add_argument("--port", type=int, default=50021, help="接続を受け付けるポート番号です。") + parser.add_argument( + "--use_gpu", action="store_true", help="指定するとGPUを使って音声合成するようになります。" + ) + parser.add_argument( + "--voicevox_dir", type=Path, default=None, help="VOICEVOXのディレクトリパスです。" + ) + parser.add_argument( + "--voicelib_dir", + type=Path, + default=None, + action="append", + help="VOICEVOX COREのディレクトリパスです。", + ) + parser.add_argument( + "--runtime_dir", + type=Path, + default=None, + action="append", + help="VOICEVOX COREで使用するライブラリのディレクトリパスです。", + ) + parser.add_argument( + "--enable_mock", + action="store_true", + help="指定するとVOICEVOX COREを使わずモックで音声合成を行います。", + ) + parser.add_argument( + "--enable_cancellable_synthesis", + action="store_true", + help="指定すると音声合成を途中でキャンセルできるようになります。", + ) + parser.add_argument("--init_processes", type=int, default=2) + parser.add_argument( + "--load_all_models", action="store_true", help="指定すると起動時に全ての音声合成モデルを読み込みます。" + ) + + # 引数へcpu_num_threadsの指定がなければ、環境変数をロールします。 + # 環境変数にもない場合は、Noneのままとします。 + # VV_CPU_NUM_THREADSが空文字列でなく数値でもない場合、エラー終了します。 + parser.add_argument( + "--cpu_num_threads", + type=int, + default=os.getenv("VV_CPU_NUM_THREADS") or None, + help=( + "音声合成を行うスレッド数です。指定しないと、代わりに環境変数VV_CPU_NUM_THREADSの値が使われます。" + "VV_CPU_NUM_THREADSが空文字列でなく数値でもない場合はエラー終了します。" + ), + ) + + parser.add_argument( + "--output_log_utf8", + action="store_true", + help=( + "指定するとログ出力をUTF-8でおこないます。指定しないと、代わりに環境変数 VV_OUTPUT_LOG_UTF8 の値が使われます。" + "VV_OUTPUT_LOG_UTF8 の値が1の場合はUTF-8で、0または空文字、値がない場合は環境によって自動的に決定されます。" + ), + ) + + parser.add_argument( + "--cors_policy_mode", + type=CorsPolicyMode, + choices=list(CorsPolicyMode), + default=None, + help=( + "CORSの許可モード。allまたはlocalappsが指定できます。allはすべてを許可します。" + "localappsはオリジン間リソース共有ポリシーを、app://.とlocalhost関連に限定します。" + "その他のオリジンはallow_originオプションで追加できます。デフォルトはlocalapps。" + ), + ) + + parser.add_argument( + "--allow_origin", nargs="*", help="許可するオリジンを指定します。スペースで区切ることで複数指定できます。" + ) + + parser.add_argument( + "--setting_file", type=Path, default=USER_SETTING_PATH, help="設定ファイルを指定できます。" + ) + + args = parser.parse_args() + + if args.output_log_utf8: + set_output_log_utf8() + + cpu_num_threads: Optional[int] = args.cpu_num_threads + + synthesis_engines = make_synthesis_engines( + use_gpu=args.use_gpu, + voicelib_dirs=args.voicelib_dir, + voicevox_dir=args.voicevox_dir, + runtime_dirs=args.runtime_dir, + cpu_num_threads=cpu_num_threads, + enable_mock=args.enable_mock, + load_all_models=args.load_all_models, + ) + assert len(synthesis_engines) != 0, "音声合成エンジンがありません。" + latest_core_version = get_latest_core_version(versions=synthesis_engines.keys()) + + cancellable_engine = None + if args.enable_cancellable_synthesis: + cancellable_engine = CancellableEngine(args) + + root_dir = args.voicevox_dir if args.voicevox_dir is not None else engine_root() + + setting_loader = SettingLoader(args.setting_file) + + settings = setting_loader.load_setting_file() + + cors_policy_mode = ( + args.cors_policy_mode + if args.cors_policy_mode is not None + else settings.cors_policy_mode + ) + + allow_origin = None + if args.allow_origin is not None: + allow_origin = args.allow_origin + elif settings.allow_origin is not None: + allow_origin = settings.allow_origin.split(" ") + + uvicorn.run( + generate_app( + synthesis_engines, + latest_core_version, + setting_loader, + root_dir=root_dir, + cors_policy_mode=cors_policy_mode, + allow_origin=allow_origin, + ), + host=args.host, + port=args.port, + ) diff --git a/run.spec b/run.spec new file mode 100644 index 0000000000000000000000000000000000000000..e77c686140bfe1c705e86046f27e8e57adaeff2c --- /dev/null +++ b/run.spec @@ -0,0 +1,85 @@ +# -*- mode: python ; coding: utf-8 -*- +# このファイルはPyInstallerによって自動生成されたもので、それをカスタマイズして使用しています。 +from PyInstaller.utils.hooks import collect_data_files +import os + +datas = [ + ('engine_manifest_assets', 'engine_manifest_assets'), + ('speaker_info', 'speaker_info'), + ('engine_manifest.json', '.'), + ('default.csv', '.'), + ('licenses.json', '.'), + ('presets.yaml', '.'), + ('default_setting.yml', '.'), + ('ui_template', 'ui_template'), + ('model', 'model'), +] +datas += collect_data_files('pyopenjtalk') + +# コアとONNX Runtimeはバイナリであるが、`binaries`に加えると +# 依存関係のパスがPyInstallerに書き換えらるので、`datas`に加える +# 参考: https://github.com/VOICEVOX/voicevox_engine/pull/446#issuecomment-1210052318 +libcore_path = os.environ.get('LIBCORE_PATH') +if libcore_path: + print('LIBCORE_PATH is found:', libcore_path) + if not os.path.isfile(libcore_path): + raise Exception("LIBCORE_PATH was found, but it is not file!") + datas += [(libcore_path, ".")] + +libonnxruntime_path = os.environ.get('LIBONNXRUNTIME_PATH') +if libonnxruntime_path: + print('LIBONNXRUNTIME_PATH is found:', libonnxruntime_path) + if not os.path.isfile(libonnxruntime_path): + raise Exception("LIBCORE_PATH was found, but it is not file!") + datas += [(libonnxruntime_path, ".")] + + +block_cipher = None + + +a = Analysis( + ['run.py'], + pathex=[], + binaries=[], + datas=datas, + hiddenimports=[], + hookspath=[], + hooksconfig={}, + runtime_hooks=[], + excludes=[], + win_no_prefer_redirects=False, + win_private_assemblies=False, + cipher=block_cipher, + noarchive=False, +) + +pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher) + +exe = EXE( + pyz, + a.scripts, + [], + exclude_binaries=True, + name='run', + debug=False, + bootloader_ignore_signals=False, + strip=False, + upx=True, + console=True, + disable_windowed_traceback=False, + argv_emulation=False, + target_arch=None, + codesign_identity=None, + entitlements_file=None, +) + +coll = COLLECT( + exe, + a.binaries, + a.zipfiles, + a.datas, + strip=False, + upx=True, + upx_exclude=[], + name='run', +) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..2a1a913e07ccb1c75d7b9561e4f548d3eee7643a --- /dev/null +++ b/setup.cfg @@ -0,0 +1,36 @@ +[flake8] +# automatically generated by pysen +# pysen ignores and overwrites any modifications +# e203: black treats : as a binary operator +# e231: black doesn't put a space after , +# e501: black may exceed the line-length to follow other style rules +# w503 or w504: either one needs to be disabled to select w error codes +ignore = E203,E231,E501,W503 +max-line-length = 88 +select = B,B950,C,E,F,W + +[mypy] +# automatically generated by pysen +# pysen ignores and overwrites any modifications +check_untyped_defs = True +disallow_any_decorated = False +disallow_any_generics = False +disallow_any_unimported = False +disallow_incomplete_defs = True +disallow_subclassing_any = True +disallow_untyped_calls = False +disallow_untyped_decorators = False +disallow_untyped_defs = False +ignore_errors = False +ignore_missing_imports = True +no_implicit_optional = True +python_version = 3.10 +show_error_codes = True +strict_equality = True +strict_optional = True +warn_redundant_casts = True +warn_return_any = False +warn_unreachable = True +warn_unused_configs = True +warn_unused_ignores = False + diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/icons/8.png b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/icons/8.png new file mode 100644 index 0000000000000000000000000000000000000000..4cee4b8062d297f0e69218ecd2022890b333a013 Binary files /dev/null and b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/icons/8.png differ diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/metas.json b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/metas.json new file mode 100644 index 0000000000000000000000000000000000000000..577af6f5806b6294acccfdf500a0ba2df2d26f76 --- /dev/null +++ b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/metas.json @@ -0,0 +1,3 @@ +{ + "supported_features": { "permitted_synthesis_morphing": "NOTHING" } +} diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md new file mode 100644 index 0000000000000000000000000000000000000000..32a15afd7544b8cfecb727231432376aa8c9917e --- /dev/null +++ b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/policy.md @@ -0,0 +1,3 @@ +dummy3 policy + +https://voicevox.hiroshiba.jp/ diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/portrait.png b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/portrait.png new file mode 100644 index 0000000000000000000000000000000000000000..f0d2a7254107ceb2f2b0767f4d17d0127c0330ea Binary files /dev/null and b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/portrait.png differ diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/portraits/8.png b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/portraits/8.png new file mode 100644 index 0000000000000000000000000000000000000000..c399d6cb936b8778fa692ea2dc726cb0ee3c57d8 Binary files /dev/null and b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/portraits/8.png differ diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_001.wav b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..6afbb4e3a695e2535dbbee75fb445c102cd2603f Binary files /dev/null and b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_001.wav differ diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_002.wav b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..aeea8d2445485529ee02f59b20ac09860e8e6999 Binary files /dev/null and b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_002.wav differ diff --git a/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_003.wav b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..2419bd9d9e7508a89d28d666706b5637340054af Binary files /dev/null and b/speaker_info/35b2c544-660e-401e-b503-0e14c635303a/voice_samples/8_003.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/1.png b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/1.png new file mode 100644 index 0000000000000000000000000000000000000000..11af05543fbd6b1b255800190d6ef2d48f2f0c89 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/1.png differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/3.png b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/3.png new file mode 100644 index 0000000000000000000000000000000000000000..ff5032e911b1072232befd347efc6b8ae9e45b9e Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/3.png differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/5.png b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/5.png new file mode 100644 index 0000000000000000000000000000000000000000..3452a88fa39c935c056b26d933693e8cd24aefba Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/5.png differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/7.png b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/7.png new file mode 100644 index 0000000000000000000000000000000000000000..63b1e7e53296b1bc02366b0ecfba997ae5b0bde2 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/icons/7.png differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/metas.json b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/metas.json new file mode 100644 index 0000000000000000000000000000000000000000..41570a1fc2e6b3f1ca0592a92ab71daf30f342d7 --- /dev/null +++ b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/metas.json @@ -0,0 +1,3 @@ +{ + "supported_features": { "permitted_synthesis_morphing": "SELF_ONLY" } +} diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/policy.md b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/policy.md new file mode 100644 index 0000000000000000000000000000000000000000..0328c63112a40f44145440562c8fe2d56ac86e38 --- /dev/null +++ b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/policy.md @@ -0,0 +1,3 @@ +dummy2 policy + +https://voicevox.hiroshiba.jp/ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/portrait.png b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/portrait.png new file mode 100644 index 0000000000000000000000000000000000000000..23970a6c67453ea010e837b97319d8497bdf0eff Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/portrait.png differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/portraits/3.png b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/portraits/3.png new file mode 100644 index 0000000000000000000000000000000000000000..8431858d7cc7a5d8f66720655e439e073d5b2f99 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/portraits/3.png differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_001.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..9b5bfa40f60526592528969635af09e3ddcebe81 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_001.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_002.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..8736022ccc10942df0cefbdf844972009663207c Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_002.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_003.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..3730464daeadd58085becdedd0cf0188d4dffcc8 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/1_003.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_001.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..507acbaa6faf3d520b8b3dbe705571d51a4bf99c Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_001.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_002.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..e14d80bcbcde0819dcb262bf2bfc118ddd1e8cb7 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_002.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_003.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..8d42657c680e14589bc8e8f90ef1c52f92959107 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/3_003.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_001.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..c616d692a65affdff488d9aef53b34cc1b32232e Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_001.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_002.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..4086db4809326bd6689cb34ef8859fb49f862499 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_002.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_003.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..d3823d9fd2fcbd4c46ab3d6547cdcbeb735bccf2 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/5_003.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_001.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..88194d7e260bd62dd92f819a0d0e3deed852788b Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_001.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_002.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..61d5a7eae88985a23b1e0b318dc26c75f33c88dc Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_002.wav differ diff --git a/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_003.wav b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..71fcdbeaa2b495fe56bf894ddd9c89e9ab21ae00 Binary files /dev/null and b/speaker_info/388f246b-8c41-4ac1-8e2d-5d79f3ff56d9/voice_samples/7_003.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/0.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/0.png new file mode 100644 index 0000000000000000000000000000000000000000..84cc0f222215ec00a1554c827913be8b283ff7da Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/0.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/2.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/2.png new file mode 100644 index 0000000000000000000000000000000000000000..ffd29cc5196663b95e74fda71fadd7c223bb6fbb Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/2.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/4.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/4.png new file mode 100644 index 0000000000000000000000000000000000000000..722a466f498371a81f8d12989695a27316cc9c6f Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/4.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/6.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/6.png new file mode 100644 index 0000000000000000000000000000000000000000..1f6d0e107939be97d85768952d4ff96497b83d11 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/icons/6.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/metas.json b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/metas.json new file mode 100644 index 0000000000000000000000000000000000000000..0967ef424bce6791893e9a57bb952f80fd536e93 --- /dev/null +++ b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/metas.json @@ -0,0 +1 @@ +{} diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/policy.md b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/policy.md new file mode 100644 index 0000000000000000000000000000000000000000..c9bcc2cea42f727c8e43c934fc38163144848882 --- /dev/null +++ b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/policy.md @@ -0,0 +1,3 @@ +dummy1 policy + +https://voicevox.hiroshiba.jp/ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portrait.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portrait.png new file mode 100644 index 0000000000000000000000000000000000000000..74af491ccb22a442aec8064cc9ffc06f9305b35a Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portrait.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/0.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/0.png new file mode 100644 index 0000000000000000000000000000000000000000..0ad715aa946adbae1e0f412341e071796da94056 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/0.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/2.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/2.png new file mode 100644 index 0000000000000000000000000000000000000000..07c5503cc153e015c7a01009a592758d2b03fe7d Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/2.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/4.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/4.png new file mode 100644 index 0000000000000000000000000000000000000000..88810bbcfa672bd4823551b42404039f2b37bd2d Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/4.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/6.png b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/6.png new file mode 100644 index 0000000000000000000000000000000000000000..b681cde2bad3b1dd738f5bd9ecd0282a5db5f4f9 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/portraits/6.png differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_001.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..ac83689ca80dbbfc9971991da4180673ef0b2de2 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_001.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_002.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..85069f834e8275c5e25abdc2ea3f91f9a30e4858 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_002.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_003.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..d052174cd253e77013e29e3cb2fe132f58f15553 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/0_003.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_001.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..2051995d4a11e1e50f5446d4047023b876876e50 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_001.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_002.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..3b9b8a842d0544905c2875c5bd51ac7274542c6d Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_002.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_003.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..cc30278ba73d38666d84ed296062e3b17256e1ec Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/2_003.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_001.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..06a048df4bbed3e6df8333e80e8fc1ecb8bb7071 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_001.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_002.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..4430e6bcc7f9ef30ae099f026314163001be2271 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_002.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_003.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..83bb44af0ffb1d487608d8819a40ee32da69f11c Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/4_003.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_001.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..2385a1b188da76509d7837c3c9cda38bea5968c5 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_001.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_002.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..ff8c6c6b3d106e865e23a42c690258b8cec1295f Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_002.wav differ diff --git a/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_003.wav b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..a671ac74038e0c40e973940ff2f9cb1379a352e9 Binary files /dev/null and b/speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff/voice_samples/6_003.wav differ diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/icons/9.png b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/icons/9.png new file mode 100644 index 0000000000000000000000000000000000000000..5616d057b7f0333858eec97bad1cc4cf9616af7e Binary files /dev/null and b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/icons/9.png differ diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/metas.json b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/metas.json new file mode 100644 index 0000000000000000000000000000000000000000..218fe4c1f710f9c31f7825b11fa259acf5a9758b --- /dev/null +++ b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/metas.json @@ -0,0 +1,3 @@ +{ + "supported_features": { "permitted_synthesis_morphing": "ALL" } +} diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/policy.md b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/policy.md new file mode 100644 index 0000000000000000000000000000000000000000..68114802c449a6799db4cf7aae3cecbb71db0e70 --- /dev/null +++ b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/policy.md @@ -0,0 +1,3 @@ +dummy4 policy + +https://voicevox.hiroshiba.jp/ diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/portrait.png b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/portrait.png new file mode 100644 index 0000000000000000000000000000000000000000..0d529a93d905b670170ba2f442f7e13388137b80 Binary files /dev/null and b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/portrait.png differ diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_001.wav b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_001.wav new file mode 100644 index 0000000000000000000000000000000000000000..9b7ee16f079f21336ced2650f727c2c4b5db27be Binary files /dev/null and b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_001.wav differ diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_002.wav b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_002.wav new file mode 100644 index 0000000000000000000000000000000000000000..0e95e7a82c8db7fdce3e8968778ea61fa3a1f4b1 Binary files /dev/null and b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_002.wav differ diff --git a/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_003.wav b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_003.wav new file mode 100644 index 0000000000000000000000000000000000000000..83cdf67f7387293b5810f50a346dcfa95f866f4f Binary files /dev/null and b/speaker_info/b1a81618-b27b-40d2-b0ea-27a9ad408c4b/voice_samples/9_003.wav differ diff --git a/test/__init__.py b/test/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/presets-test-1.yaml b/test/presets-test-1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..18ea6942db167966148e32eb2de30a22217a1ef5 --- /dev/null +++ b/test/presets-test-1.yaml @@ -0,0 +1,21 @@ +- id: 1 + name: test + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 0 + speedScale: 1 + pitchScale: 0 + intonationScale: 1 + volumeScale: 1 + prePhonemeLength: 0.1 + postPhonemeLength: 0.1 + +- id: 2 + name: test2 + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 2 + speedScale: 1.5 + pitchScale: 0 + intonationScale: 1 + volumeScale: 0.7 + prePhonemeLength: 0.5 + postPhonemeLength: 0.5 diff --git a/test/presets-test-2.yaml b/test/presets-test-2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..289894c3b151421f00c30407eb9c869c11579ca6 --- /dev/null +++ b/test/presets-test-2.yaml @@ -0,0 +1,21 @@ +- id: 1 + name: test + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: not_int + speedScale: 1 + pitchScale: 0 + intonationScale: 1 + volumeScale: 1 + prePhonemeLength: 0.1 + postPhonemeLength: 0.1 + +- id: 2 + name: test2 + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 2 + speedScale: 1.5 + pitchScale: 0 + intonationScale: 1 + volumeScale: 0.7 + prePhonemeLength: 0.5 + postPhonemeLength: 0.5 diff --git a/test/presets-test-3.yaml b/test/presets-test-3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f4bfc37fcd80f4a86799c5ffe1bbf2b2fded069e --- /dev/null +++ b/test/presets-test-3.yaml @@ -0,0 +1,21 @@ +- id: 1 + name: test + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 0 + speedScale: 1 + pitchScale: 0 + intonationScale: 1 + volumeScale: 1 + prePhonemeLength: 0.1 + postPhonemeLength: 0.1 + +- id: 1 + name: test2 + speaker_uuid: 7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff + style_id: 2 + speedScale: 1.5 + pitchScale: 0 + intonationScale: 1 + volumeScale: 0.7 + prePhonemeLength: 0.5 + postPhonemeLength: 0.5 diff --git a/test/presets-test-4.yaml b/test/presets-test-4.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/test/setting-test-load-1.yaml b/test/setting-test-load-1.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3421e7a6a32073e3413444495b3bb37d80d4d351 --- /dev/null +++ b/test/setting-test-load-1.yaml @@ -0,0 +1,2 @@ +allow_origin: null +cors_policy_mode: localapps diff --git a/test/setting-test-load-2.yaml b/test/setting-test-load-2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1066e5d8ba36edec19cdbc54ac8c27963bbc3056 --- /dev/null +++ b/test/setting-test-load-2.yaml @@ -0,0 +1,2 @@ +allow_origin: null +cors_policy_mode: all diff --git a/test/setting-test-load-3.yaml b/test/setting-test-load-3.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47bc8fb9ac2ac7c4a480660da65b465ef4d72533 --- /dev/null +++ b/test/setting-test-load-3.yaml @@ -0,0 +1,2 @@ +allow_origin: "192.168.254.255 192.168.255.255" +cors_policy_mode: localapps diff --git a/test/test_acoustic_feature_extractor.py b/test/test_acoustic_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..a82e7afe62eed4f1be1506d7cd34335c769d17d0 --- /dev/null +++ b/test/test_acoustic_feature_extractor.py @@ -0,0 +1,266 @@ +import os +from pathlib import Path +from typing import List, Type +from unittest import TestCase + +from voicevox_engine.acoustic_feature_extractor import ( + BasePhoneme, + JvsPhoneme, + OjtPhoneme, +) + + +class TestBasePhoneme(TestCase): + def setUp(self): + super().setUp() + self.str_hello_hiho = "sil k o N n i ch i w a pau h i h o d e s U sil" + self.base_hello_hiho = [ + BasePhoneme(s, i, i + 1) for i, s in enumerate(self.str_hello_hiho.split()) + ] + self.lab_str = """ + 0.00 1.00 pau + 1.00 2.00 k + 2.00 3.00 o + 3.00 4.00 N + 4.00 5.00 n + 5.00 6.00 i + 6.00 7.00 ch + 7.00 8.00 i + 8.00 9.00 w + 9.00 10.00 a + 10.00 11.00 pau + 11.00 12.00 h + 12.00 13.00 i + 13.00 14.00 h + 14.00 15.00 o + 15.00 16.00 d + 16.00 17.00 e + 17.00 18.00 s + 18.00 19.00 U + 19.00 20.00 pau + """.replace( + " ", "" + )[ + 1:-1 + ] # ダブルクオーテーションx3で囲われている部分で、空白をすべて置き換え、先頭と最後の"\n"を除外する + + def test_repr_(self): + self.assertEqual( + self.base_hello_hiho[1].__repr__(), "Phoneme(phoneme='k', start=1, end=2)" + ) + self.assertEqual( + self.base_hello_hiho[10].__repr__(), + "Phoneme(phoneme='pau', start=10, end=11)", + ) + + def test_convert(self): + with self.assertRaises(NotImplementedError): + BasePhoneme.convert(self.base_hello_hiho) + + def test_duration(self): + self.assertEqual(self.base_hello_hiho[1].duration, 1) + + def test_parse(self): + parse_str_1 = "0 1 pau" + parse_str_2 = "32.67543 33.48933 e" + parsed_base_1 = BasePhoneme.parse(parse_str_1) + parsed_base_2 = BasePhoneme.parse(parse_str_2) + self.assertEqual(parsed_base_1.phoneme, "pau") + self.assertEqual(parsed_base_1.start, 0.0) + self.assertEqual(parsed_base_1.end, 1.0) + self.assertEqual(parsed_base_2.phoneme, "e") + self.assertEqual(parsed_base_2.start, 32.68) + self.assertEqual(parsed_base_2.end, 33.49) + + def lab_test_base( + self, + file_path: str, + phonemes: List["BasePhoneme"], + phoneme_class: Type["BasePhoneme"], + ): + phoneme_class.save_lab_list(phonemes, Path(file_path)) + with open(file_path, mode="r") as f: + self.assertEqual(f.read(), self.lab_str) + result_phoneme = phoneme_class.load_lab_list(Path(file_path)) + self.assertEqual(result_phoneme, phonemes) + os.remove(file_path) + + +class TestJvsPhoneme(TestBasePhoneme): + def setUp(self): + super().setUp() + base_hello_hiho = [ + JvsPhoneme(s, i, i + 1) for i, s in enumerate(self.str_hello_hiho.split()) + ] + self.jvs_hello_hiho = JvsPhoneme.convert(base_hello_hiho) + + def test_phoneme_list(self): + self.assertEqual(JvsPhoneme.phoneme_list[1], "I") + self.assertEqual(JvsPhoneme.phoneme_list[14], "gy") + self.assertEqual(JvsPhoneme.phoneme_list[26], "p") + self.assertEqual(JvsPhoneme.phoneme_list[38], "z") + + def test_const(self): + self.assertEqual(JvsPhoneme.num_phoneme, 39) + self.assertEqual(JvsPhoneme.space_phoneme, "pau") + + def test_convert(self): + converted_str_hello_hiho = " ".join([p.phoneme for p in self.jvs_hello_hiho]) + self.assertEqual( + converted_str_hello_hiho, "pau k o N n i ch i w a pau h i h o d e s U pau" + ) + + def test_equal(self): + # jvs_hello_hihoの2番目の"k"と比較 + true_jvs_phoneme = JvsPhoneme("k", 1, 2) + # OjtPhonemeと比べる、比較はBasePhoneme内で実装されているので、比較結果はTrue + true_ojt_phoneme = OjtPhoneme("k", 1, 2) + + false_jvs_phoneme_1 = JvsPhoneme("a", 1, 2) + false_jvs_phoneme_2 = JvsPhoneme("k", 2, 3) + self.assertTrue(self.jvs_hello_hiho[1] == true_jvs_phoneme) + self.assertTrue(self.jvs_hello_hiho[1] == true_ojt_phoneme) + self.assertFalse(self.jvs_hello_hiho[1] == false_jvs_phoneme_1) + self.assertFalse(self.jvs_hello_hiho[1] == false_jvs_phoneme_2) + + def test_verify(self): + for phoneme in self.jvs_hello_hiho: + phoneme.verify() + + def test_phoneme_id(self): + jvs_str_hello_hiho = " ".join([str(p.phoneme_id) for p in self.jvs_hello_hiho]) + self.assertEqual( + jvs_str_hello_hiho, "0 19 25 2 23 17 7 17 36 4 0 15 17 15 25 9 11 30 3 0" + ) + + def test_onehot(self): + phoneme_id_list = [ + 0, + 19, + 25, + 2, + 23, + 17, + 7, + 17, + 36, + 4, + 0, + 15, + 17, + 15, + 25, + 9, + 11, + 30, + 3, + 0, + ] + for i, phoneme in enumerate(self.jvs_hello_hiho): + for j in range(JvsPhoneme.num_phoneme): + if phoneme_id_list[i] == j: + self.assertEqual(phoneme.onehot[j], True) + else: + self.assertEqual(phoneme.onehot[j], False) + + def test_parse(self): + parse_str_1 = "0 1 pau" + parse_str_2 = "15.32654 16.39454 a" + parsed_jvs_1 = JvsPhoneme.parse(parse_str_1) + parsed_jvs_2 = JvsPhoneme.parse(parse_str_2) + self.assertEqual(parsed_jvs_1.phoneme_id, 0) + self.assertEqual(parsed_jvs_2.phoneme_id, 4) + + def test_lab_list(self): + self.lab_test_base("./jvs_lab_test", self.jvs_hello_hiho, JvsPhoneme) + + +class TestOjtPhoneme(TestBasePhoneme): + def setUp(self): + super().setUp() + self.str_hello_hiho = "sil k o N n i ch i w a pau h i h o d e s U sil" + base_hello_hiho = [ + OjtPhoneme(s, i, i + 1) for i, s in enumerate(self.str_hello_hiho.split()) + ] + self.ojt_hello_hiho = OjtPhoneme.convert(base_hello_hiho) + + def test_phoneme_list(self): + self.assertEqual(OjtPhoneme.phoneme_list[1], "A") + self.assertEqual(OjtPhoneme.phoneme_list[14], "e") + self.assertEqual(OjtPhoneme.phoneme_list[26], "m") + self.assertEqual(OjtPhoneme.phoneme_list[38], "ts") + self.assertEqual(OjtPhoneme.phoneme_list[41], "v") + + def test_const(self): + self.assertEqual(OjtPhoneme.num_phoneme, 45) + self.assertEqual(OjtPhoneme.space_phoneme, "pau") + + def test_convert(self): + ojt_str_hello_hiho = " ".join([p.phoneme for p in self.ojt_hello_hiho]) + self.assertEqual( + ojt_str_hello_hiho, "pau k o N n i ch i w a pau h i h o d e s U pau" + ) + + def test_equal(self): + # ojt_hello_hihoの10番目の"a"と比較 + true_ojt_phoneme = OjtPhoneme("a", 9, 10) + # JvsPhonemeと比べる、比較はBasePhoneme内で実装されているので、比較結果はTrue + true_jvs_phoneme = JvsPhoneme("a", 9, 10) + + false_ojt_phoneme_1 = OjtPhoneme("k", 9, 10) + false_ojt_phoneme_2 = OjtPhoneme("a", 10, 11) + self.assertTrue(self.ojt_hello_hiho[9] == true_ojt_phoneme) + self.assertTrue(self.ojt_hello_hiho[9] == true_jvs_phoneme) + self.assertFalse(self.ojt_hello_hiho[9] == false_ojt_phoneme_1) + self.assertFalse(self.ojt_hello_hiho[9] == false_ojt_phoneme_2) + + def test_verify(self): + for phoneme in self.ojt_hello_hiho: + phoneme.verify() + + def test_phoneme_id(self): + ojt_str_hello_hiho = " ".join([str(p.phoneme_id) for p in self.ojt_hello_hiho]) + self.assertEqual( + ojt_str_hello_hiho, "0 23 30 4 28 21 10 21 42 7 0 19 21 19 30 12 14 35 6 0" + ) + + def test_onehot(self): + phoneme_id_list = [ + 0, + 23, + 30, + 4, + 28, + 21, + 10, + 21, + 42, + 7, + 0, + 19, + 21, + 19, + 30, + 12, + 14, + 35, + 6, + 0, + ] + for i, phoneme in enumerate(self.ojt_hello_hiho): + for j in range(OjtPhoneme.num_phoneme): + if phoneme_id_list[i] == j: + self.assertEqual(phoneme.onehot[j], True) + else: + self.assertEqual(phoneme.onehot[j], False) + + def test_parse(self): + parse_str_1 = "0 1 pau" + parse_str_2 = "32.67543 33.48933 e" + parsed_ojt_1 = OjtPhoneme.parse(parse_str_1) + parsed_ojt_2 = OjtPhoneme.parse(parse_str_2) + self.assertEqual(parsed_ojt_1.phoneme_id, 0) + self.assertEqual(parsed_ojt_2.phoneme_id, 14) + + def tes_lab_list(self): + self.lab_test_base("./ojt_lab_test", self.ojt_hello_hiho, OjtPhoneme) diff --git a/test/test_connect_base64_waves.py b/test/test_connect_base64_waves.py new file mode 100644 index 0000000000000000000000000000000000000000..e50c8f517e64e178f180abab0ed2372878848f86 --- /dev/null +++ b/test/test_connect_base64_waves.py @@ -0,0 +1,130 @@ +import base64 +import io +from unittest import TestCase + +import numpy as np +import numpy.testing +import soundfile +from scipy.signal import resample + +from voicevox_engine.utility import ConnectBase64WavesException, connect_base64_waves + + +def generate_sine_wave_ndarray( + seconds: float, samplerate: int, frequency: float +) -> np.ndarray: + x = np.linspace(0, seconds, int(seconds * samplerate), endpoint=False) + wave = np.sin(2 * np.pi * frequency * x).astype(np.float32) + + return wave + + +def encode_bytes(wave_ndarray: np.ndarray, samplerate: int) -> bytes: + wave_bio = io.BytesIO() + soundfile.write( + file=wave_bio, + data=wave_ndarray, + samplerate=samplerate, + format="WAV", + subtype="FLOAT", + ) + wave_bio.seek(0) + + return wave_bio.getvalue() + + +def generate_sine_wave_bytes( + seconds: float, samplerate: int, frequency: float +) -> bytes: + wave_ndarray = generate_sine_wave_ndarray(seconds, samplerate, frequency) + return encode_bytes(wave_ndarray, samplerate) + + +def encode_base64(wave_bytes: bytes) -> str: + return base64.standard_b64encode(wave_bytes).decode("utf-8") + + +def generate_sine_wave_base64(seconds: float, samplerate: int, frequency: float) -> str: + wave_bytes = generate_sine_wave_bytes(seconds, samplerate, frequency) + wave_base64 = encode_base64(wave_bytes) + return wave_base64 + + +class TestConnectBase64Waves(TestCase): + def test_connect(self): + samplerate = 1000 + wave = generate_sine_wave_ndarray( + seconds=2, samplerate=samplerate, frequency=10 + ) + wave_base64 = encode_base64(encode_bytes(wave, samplerate=samplerate)) + + wave_x2_ref = np.concatenate([wave, wave]) + + wave_x2, _ = connect_base64_waves(waves=[wave_base64, wave_base64]) + + self.assertEqual(wave_x2_ref.shape, wave_x2.shape) + + self.assertTrue((wave_x2_ref == wave_x2).all()) + + def test_no_wave_error(self): + self.assertRaises(ConnectBase64WavesException, connect_base64_waves, waves=[]) + + def test_invalid_base64_error(self): + wave_1000hz = generate_sine_wave_base64( + seconds=2, samplerate=1000, frequency=10 + ) + wave_1000hz_broken = wave_1000hz[1:] # remove head 1 char + + self.assertRaises( + ConnectBase64WavesException, + connect_base64_waves, + waves=[ + wave_1000hz_broken, + ], + ) + + def test_invalid_wave_file_error(self): + wave_1000hz = generate_sine_wave_bytes(seconds=2, samplerate=1000, frequency=10) + wave_1000hz_broken_bytes = wave_1000hz[1:] # remove head 1 byte + wave_1000hz_broken = encode_base64(wave_1000hz_broken_bytes) + + self.assertRaises( + ConnectBase64WavesException, + connect_base64_waves, + waves=[ + wave_1000hz_broken, + ], + ) + + def test_different_frequency(self): + wave_24000hz = generate_sine_wave_ndarray( + seconds=1, samplerate=24000, frequency=10 + ) + wave_1000hz = generate_sine_wave_ndarray( + seconds=2, samplerate=1000, frequency=10 + ) + wave_24000_base64 = encode_base64(encode_bytes(wave_24000hz, samplerate=24000)) + wave_1000_base64 = encode_base64(encode_bytes(wave_1000hz, samplerate=1000)) + + wave_1000hz_to2400hz = resample(wave_1000hz, 24000 * len(wave_1000hz) // 1000) + wave_x2_ref = np.concatenate([wave_24000hz, wave_1000hz_to2400hz]) + + wave_x2, _ = connect_base64_waves(waves=[wave_24000_base64, wave_1000_base64]) + + self.assertEqual(wave_x2_ref.shape, wave_x2.shape) + numpy.testing.assert_array_almost_equal(wave_x2_ref, wave_x2) + + def test_different_channels(self): + wave_1000hz = generate_sine_wave_ndarray( + seconds=2, samplerate=1000, frequency=10 + ) + wave_2ch_1000hz = np.array([wave_1000hz, wave_1000hz]).T + wave_1ch_base64 = encode_base64(encode_bytes(wave_1000hz, samplerate=1000)) + wave_2ch_base64 = encode_base64(encode_bytes(wave_2ch_1000hz, samplerate=1000)) + + wave_x2_ref = np.concatenate([wave_2ch_1000hz, wave_2ch_1000hz]) + + wave_x2, _ = connect_base64_waves(waves=[wave_1ch_base64, wave_2ch_base64]) + + self.assertEqual(wave_x2_ref.shape, wave_x2.shape) + self.assertTrue((wave_x2_ref == wave_x2).all()) diff --git a/test/test_core_version_utility.py b/test/test_core_version_utility.py new file mode 100644 index 0000000000000000000000000000000000000000..e96ba8009e1614788e1e2b7ea9a11ae6d77dfe5c --- /dev/null +++ b/test/test_core_version_utility.py @@ -0,0 +1,40 @@ +from unittest import TestCase + +from voicevox_engine.utility import get_latest_core_version, parse_core_version + + +class TestCoreVersion(TestCase): + def test_parse_core_version(self): + parse_core_version("0.0.0") + parse_core_version("0.1.0") + parse_core_version("0.10.0") + parse_core_version("0.10.0-preview.1") + parse_core_version("0.14.0") + parse_core_version("0.14.0-preview.1") + parse_core_version("0.14.0-preview.10") + + def test_get_latest_core_version(self): + self.assertEqual( + get_latest_core_version( + versions=[ + "0.0.0", + "0.1.0", + "0.10.0", + "0.10.0-preview.1", + "0.14.0", + "0.14.0-preview.1", + "0.14.0-preview.10", + ] + ), + "0.14.0", + ) + + self.assertEqual( + get_latest_core_version( + versions=[ + "0.14.0", + "0.15.0-preview.1", + ] + ), + "0.15.0-preview.1", + ) diff --git a/test/test_full_context_label.py b/test/test_full_context_label.py new file mode 100644 index 0000000000000000000000000000000000000000..7cdde34f4644ccf7b3048d707f99b0171e25114e --- /dev/null +++ b/test/test_full_context_label.py @@ -0,0 +1,404 @@ +from copy import deepcopy +from itertools import chain +from unittest import TestCase + +from voicevox_engine.full_context_label import ( + AccentPhrase, + BreathGroup, + Mora, + Phoneme, + Utterance, +) + + +class TestBasePhonemes(TestCase): + def setUp(self): + super().setUp() + # pyopenjtalk.extract_fullcontext("こんにちは、ヒホです。")の結果 + # 出来る限りテスト内で他のライブラリに依存しないため、 + # またテスト内容を透明化するために、テストケースを生成している + self.test_case_hello_hiho = [ + # sil (無音) + "xx^xx-sil+k=o/A:xx+xx+xx/B:xx-xx_xx/C:xx_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:xx_xx#xx_xx@xx_xx|xx_xx/G:5_5%0_xx_xx/H:xx_xx/I:xx-xx" + + "@xx+xx&xx-xx|xx+xx/J:1_5/K:2+2-9", + # k + "xx^sil-k+o=N/A:-4+1+5/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # o + "sil^k-o+N=n/A:-4+1+5/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # N (ん) + "k^o-N+n=i/A:-3+2+4/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # n + "o^N-n+i=ch/A:-2+3+3/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # i + "N^n-i+ch=i/A:-2+3+3/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # ch + "n^i-ch+i=w/A:-1+4+2/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # i + "i^ch-i+w=a/A:-1+4+2/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # w + "ch^i-w+a=pau/A:0+5+1/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # a + "i^w-a+pau=h/A:0+5+1/B:xx-xx_xx/C:09_xx+xx/D:09+xx_xx/E:xx_xx!xx_xx-xx" + + "/F:5_5#0_xx@1_1|1_5/G:4_1%0_xx_0/H:xx_xx/I:1-5" + + "@1+2&1-2|1+9/J:1_4/K:2+2-9", + # pau (読点) + "w^a-pau+h=i/A:xx+xx+xx/B:09-xx_xx/C:xx_xx+xx/D:09+xx_xx/E:5_5!0_xx-xx" + + "/F:xx_xx#xx_xx@xx_xx|xx_xx/G:4_1%0_xx_xx/H:1_5/I:xx-xx" + + "@xx+xx&xx-xx|xx+xx/J:1_4/K:2+2-9", + # h + "a^pau-h+i=h/A:0+1+4/B:09-xx_xx/C:09_xx+xx/D:22+xx_xx/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # i + "pau^h-i+h=o/A:0+1+4/B:09-xx_xx/C:09_xx+xx/D:22+xx_xx/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # h + "h^i-h+o=d/A:1+2+3/B:09-xx_xx/C:22_xx+xx/D:10+7_2/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # o + "i^h-o+d=e/A:1+2+3/B:09-xx_xx/C:22_xx+xx/D:10+7_2/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # d + "h^o-d+e=s/A:2+3+2/B:22-xx_xx/C:10_7+2/D:xx+xx_xx/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # e + "o^d-e+s=U/A:2+3+2/B:22-xx_xx/C:10_7+2/D:xx+xx_xx/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # s + "d^e-s+U=sil/A:3+4+1/B:22-xx_xx/C:10_7+2/D:xx+xx_xx/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # U (無声母音) + "e^s-U+sil=xx/A:3+4+1/B:22-xx_xx/C:10_7+2/D:xx+xx_xx/E:5_5!0_xx-0" + + "/F:4_1#0_xx@1_1|1_4/G:xx_xx%xx_xx_xx/H:1_5/I:1-4" + + "@2+1&2-1|6+4/J:xx_xx/K:2+2-9", + # sil (無音) + "s^U-sil+xx=xx/A:xx+xx+xx/B:10-7_2/C:xx_xx+xx/D:xx+xx_xx/E:4_1!0_xx-xx" + + "/F:xx_xx#xx_xx@xx_xx|xx_xx/G:xx_xx%xx_xx_xx/H:1_4/I:xx-xx" + + "@xx+xx&xx-xx|xx+xx/J:xx_xx/K:2+2-9", + ] + self.phonemes_hello_hiho = [ + Phoneme.from_label(label) for label in self.test_case_hello_hiho + ] + + +class TestPhoneme(TestBasePhonemes): + def test_phoneme(self): + self.assertEqual( + " ".join([phoneme.phoneme for phoneme in self.phonemes_hello_hiho]), + "sil k o N n i ch i w a pau h i h o d e s U sil", + ) + + def test_is_pause(self): + self.assertEqual( + [phoneme.is_pause() for phoneme in self.phonemes_hello_hiho], + [ + True, # sil + False, # k + False, # o + False, # N + False, # n + False, # i + False, # ch + False, # i + False, # w + False, # a + True, # pau + False, # h + False, # i + False, # h + False, # o + False, # d + False, # e + False, # s + False, # u + True, # sil + ], + ) + + def test_label(self) -> None: + self.assertEqual( + [phoneme.label for phoneme in self.phonemes_hello_hiho], + self.test_case_hello_hiho, + ) + + +class TestMora(TestBasePhonemes): + def setUp(self) -> None: + super().setUp() + # contexts["a2"] == "1" ko + self.mora_hello_1 = Mora( + consonant=self.phonemes_hello_hiho[1], vowel=self.phonemes_hello_hiho[2] + ) + # contexts["a2"] == "2" N + self.mora_hello_2 = Mora(consonant=None, vowel=self.phonemes_hello_hiho[3]) + # contexts["a2"] == "3" ni + self.mora_hello_3 = Mora( + consonant=self.phonemes_hello_hiho[4], vowel=self.phonemes_hello_hiho[5] + ) + # contexts["a2"] == "4" chi + self.mora_hello_4 = Mora( + consonant=self.phonemes_hello_hiho[6], vowel=self.phonemes_hello_hiho[7] + ) + # contexts["a2"] == "5" wa + self.mora_hello_5 = Mora( + consonant=self.phonemes_hello_hiho[8], vowel=self.phonemes_hello_hiho[9] + ) + # contexts["a2"] == "1" hi + self.mora_hiho_1 = Mora( + consonant=self.phonemes_hello_hiho[11], vowel=self.phonemes_hello_hiho[12] + ) + # contexts["a2"] == "2" ho + self.mora_hiho_2 = Mora( + consonant=self.phonemes_hello_hiho[13], vowel=self.phonemes_hello_hiho[14] + ) + # contexts["a2"] == "3" de + self.mora_hiho_3 = Mora( + consonant=self.phonemes_hello_hiho[15], vowel=self.phonemes_hello_hiho[16] + ) + # contexts["a2"] == "1" sU + self.mora_hiho_4 = Mora( + consonant=self.phonemes_hello_hiho[17], vowel=self.phonemes_hello_hiho[18] + ) + + def assert_phonemes(self, mora: Mora, mora_str: str) -> None: + self.assertEqual( + "".join([phoneme.phoneme for phoneme in mora.phonemes]), mora_str + ) + + def assert_labels(self, mora: Mora, label_start: int, label_end: int) -> None: + self.assertEqual(mora.labels, self.test_case_hello_hiho[label_start:label_end]) + + def test_phonemes(self) -> None: + self.assert_phonemes(self.mora_hello_1, "ko") + self.assert_phonemes(self.mora_hello_2, "N") + self.assert_phonemes(self.mora_hello_3, "ni") + self.assert_phonemes(self.mora_hello_4, "chi") + self.assert_phonemes(self.mora_hello_5, "wa") + self.assert_phonemes(self.mora_hiho_1, "hi") + self.assert_phonemes(self.mora_hiho_2, "ho") + self.assert_phonemes(self.mora_hiho_3, "de") + self.assert_phonemes(self.mora_hiho_4, "sU") + + def test_labels(self) -> None: + self.assert_labels(self.mora_hello_1, 1, 3) + self.assert_labels(self.mora_hello_2, 3, 4) + self.assert_labels(self.mora_hello_3, 4, 6) + self.assert_labels(self.mora_hello_4, 6, 8) + self.assert_labels(self.mora_hello_5, 8, 10) + self.assert_labels(self.mora_hiho_1, 11, 13) + self.assert_labels(self.mora_hiho_2, 13, 15) + self.assert_labels(self.mora_hiho_3, 15, 17) + self.assert_labels(self.mora_hiho_4, 17, 19) + + def test_set_context(self): + # 値を書き換えるので、他のテストに影響を出さないためにdeepcopyする + mora_hello_1 = deepcopy(self.mora_hello_1) + # phonemeにあたる"p3"を書き換える + mora_hello_1.set_context("p3", "a") + self.assert_phonemes(mora_hello_1, "aa") + + +class TestAccentPhrase(TestBasePhonemes): + def setUp(self) -> None: + super().setUp() + # TODO: ValueErrorを吐く作為的ではない自然な例の模索 + # 存在しないなら放置でよい + self.accent_phrase_hello = AccentPhrase.from_phonemes( + self.phonemes_hello_hiho[1:10] + ) + self.accent_phrase_hiho = AccentPhrase.from_phonemes( + self.phonemes_hello_hiho[11:19] + ) + + def test_accent(self): + self.assertEqual(self.accent_phrase_hello.accent, 5) + self.assertEqual(self.accent_phrase_hiho.accent, 1) + + def test_set_context(self): + accent_phrase_hello = deepcopy(self.accent_phrase_hello) + # phonemeにあたる"p3"を書き換える + accent_phrase_hello.set_context("p3", "a") + self.assertEqual( + "".join([phoneme.phoneme for phoneme in accent_phrase_hello.phonemes]), + "aaaaaaaaa", + ) + + def test_phonemes(self): + self.assertEqual( + " ".join( + [phoneme.phoneme for phoneme in self.accent_phrase_hello.phonemes] + ), + "k o N n i ch i w a", + ) + self.assertEqual( + " ".join([phoneme.phoneme for phoneme in self.accent_phrase_hiho.phonemes]), + "h i h o d e s U", + ) + + def test_labels(self): + self.assertEqual( + self.accent_phrase_hello.labels, self.test_case_hello_hiho[1:10] + ) + self.assertEqual( + self.accent_phrase_hiho.labels, self.test_case_hello_hiho[11:19] + ) + + def test_merge(self): + # 「こんにちはヒホです」 + # 読点を無くしたものと同等 + merged_accent_phrase = self.accent_phrase_hello.merge(self.accent_phrase_hiho) + self.assertEqual(merged_accent_phrase.accent, 5) + self.assertEqual( + " ".join([phoneme.phoneme for phoneme in merged_accent_phrase.phonemes]), + "k o N n i ch i w a h i h o d e s U", + ) + self.assertEqual( + merged_accent_phrase.labels, + self.test_case_hello_hiho[1:10] + self.test_case_hello_hiho[11:19], + ) + + +class TestBreathGroup(TestBasePhonemes): + def setUp(self) -> None: + super().setUp() + self.breath_group_hello = BreathGroup.from_phonemes( + self.phonemes_hello_hiho[1:10] + ) + self.breath_group_hiho = BreathGroup.from_phonemes( + self.phonemes_hello_hiho[11:19] + ) + + def test_set_context(self): + # 値を書き換えるので、他のテストに影響を出さないためにdeepcopyする + breath_group_hello = deepcopy(self.breath_group_hello) + # phonemeにあたる"p3"を書き換える + breath_group_hello.set_context("p3", "a") + self.assertEqual( + "".join([phoneme.phoneme for phoneme in breath_group_hello.phonemes]), + "aaaaaaaaa", + ) + + def test_phonemes(self): + self.assertEqual( + " ".join([phoneme.phoneme for phoneme in self.breath_group_hello.phonemes]), + "k o N n i ch i w a", + ) + self.assertEqual( + " ".join([phoneme.phoneme for phoneme in self.breath_group_hiho.phonemes]), + "h i h o d e s U", + ) + + def test_labels(self): + self.assertEqual( + self.breath_group_hello.labels, self.test_case_hello_hiho[1:10] + ) + self.assertEqual( + self.breath_group_hiho.labels, self.test_case_hello_hiho[11:19] + ) + + +class TestUtterance(TestBasePhonemes): + def setUp(self) -> None: + super().setUp() + self.utterance_hello_hiho = Utterance.from_phonemes(self.phonemes_hello_hiho) + + def test_phonemes(self): + self.assertEqual( + " ".join( + [phoneme.phoneme for phoneme in self.utterance_hello_hiho.phonemes] + ), + "sil k o N n i ch i w a pau h i h o d e s U sil", + ) + changed_utterance = Utterance.from_phonemes(self.utterance_hello_hiho.phonemes) + self.assertEqual(len(changed_utterance.breath_groups), 2) + accent_phrases = list( + chain.from_iterable( + breath_group.accent_phrases + for breath_group in changed_utterance.breath_groups + ) + ) + for prev, cent, post in zip( + [None] + accent_phrases[:-1], + accent_phrases, + accent_phrases[1:] + [None], + ): + mora_num = len(cent.moras) + accent = cent.accent + + if prev is not None: + for phoneme in prev.phonemes: + self.assertEqual(phoneme.contexts["g1"], str(mora_num)) + self.assertEqual(phoneme.contexts["g2"], str(accent)) + + if post is not None: + for phoneme in post.phonemes: + self.assertEqual(phoneme.contexts["e1"], str(mora_num)) + self.assertEqual(phoneme.contexts["e2"], str(accent)) + + for phoneme in cent.phonemes: + self.assertEqual( + phoneme.contexts["k2"], + str( + sum( + [ + len(breath_group.accent_phrases) + for breath_group in changed_utterance.breath_groups + ] + ) + ), + ) + + for prev, cent, post in zip( + [None] + changed_utterance.breath_groups[:-1], + changed_utterance.breath_groups, + changed_utterance.breath_groups[1:] + [None], + ): + accent_phrase_num = len(cent.accent_phrases) + + if prev is not None: + for phoneme in prev.phonemes: + self.assertEqual(phoneme.contexts["j1"], str(accent_phrase_num)) + + if post is not None: + for phoneme in post.phonemes: + self.assertEqual(phoneme.contexts["h1"], str(accent_phrase_num)) + + for phoneme in cent.phonemes: + self.assertEqual(phoneme.contexts["i1"], str(accent_phrase_num)) + self.assertEqual( + phoneme.contexts["i5"], + str(accent_phrases.index(cent.accent_phrases[0]) + 1), + ) + self.assertEqual( + phoneme.contexts["i6"], + str( + len(accent_phrases) + - accent_phrases.index(cent.accent_phrases[0]) + ), + ) + + def test_labels(self): + self.assertEqual(self.utterance_hello_hiho.labels, self.test_case_hello_hiho) diff --git a/test/test_kana_parser.py b/test/test_kana_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..ef800b60003b5d14b90a8eeb86e0fa29a919f878 --- /dev/null +++ b/test/test_kana_parser.py @@ -0,0 +1,688 @@ +from typing import List +from unittest import TestCase + +from voicevox_engine import kana_parser +from voicevox_engine.kana_parser import create_kana +from voicevox_engine.model import AccentPhrase, Mora, ParseKanaError, ParseKanaErrorCode + + +def parse_kana(text: str) -> List[AccentPhrase]: + accent_phrases = kana_parser.parse_kana(text) + return accent_phrases + + +class TestParseKana(TestCase): + def test_phrase_length(self): + self.assertEqual(len(parse_kana("ア'/ア'")), 2) + self.assertEqual(len(parse_kana("ア'、ア'")), 2) + self.assertEqual(len(parse_kana("ア'/ア'/ア'/ア'/ア'")), 5) + self.assertEqual(len(parse_kana("ス'")), 1) + self.assertEqual(len(parse_kana("_ス'")), 1) + self.assertEqual(len(parse_kana("ギェ'")), 1) + self.assertEqual(len(parse_kana("ギェ'、ギェ'/ギェ'")), 3) + + def test_accent(self): + self.assertEqual(parse_kana("シャ'シシュシェショ")[0].accent, 1) + self.assertEqual(parse_kana("シャ'_シシュシェショ")[0].accent, 1) + self.assertEqual(parse_kana("シャシ'シュシェショ")[0].accent, 2) + self.assertEqual(parse_kana("シャ_シ'シュシェショ")[0].accent, 2) + self.assertEqual(parse_kana("シャシシュ'シェショ")[0].accent, 3) + self.assertEqual(parse_kana("シャ_シシュ'シェショ")[0].accent, 3) + self.assertEqual(parse_kana("シャシシュシェショ'")[0].accent, 5) + self.assertEqual(parse_kana("シャ_シシュシェショ'")[0].accent, 5) + + def test_mora_length(self): + self.assertEqual(len(parse_kana("シャ'シシュシェショ")[0].moras), 5) + self.assertEqual(len(parse_kana("シャ'_シシュシェショ")[0].moras), 5) + self.assertEqual(len(parse_kana("シャシ'シュシェショ")[0].moras), 5) + self.assertEqual(len(parse_kana("シャ_シ'シュシェショ")[0].moras), 5) + self.assertEqual(len(parse_kana("シャシシュシェショ'")[0].moras), 5) + self.assertEqual(len(parse_kana("シャ_シシュシェショ'")[0].moras), 5) + + def test_pause(self): + self.assertIsNone(parse_kana("ア'/ア'")[0].pause_mora) + self.assertIsNone(parse_kana("ア'/ア'")[1].pause_mora) + self.assertIsNotNone(parse_kana("ア'、ア'")[0].pause_mora) + self.assertIsNone(parse_kana("ア'、ア'")[1].pause_mora) + + def test_unvoice(self): + self.assertEqual(parse_kana("ス'")[0].moras[0].vowel, "u") + self.assertEqual(parse_kana("_ス'")[0].moras[0].vowel, "U") + + def test_roundtrip(self): + for text in ["コンニチワ'", "ワタシワ'/シャチョオデ'_ス", "トテモ'、エラ'インデス"]: + self.assertEqual(create_kana(parse_kana(text)), text) + + for text in ["ヲ'", "ェ'"]: + self.assertEqual(create_kana(parse_kana(text)), text) + + def _accent_phrase_marks_base( + self, text: str, expected_accent_phrases: List[AccentPhrase] + ) -> None: + accent_phrases = kana_parser.parse_kana(text) + self.assertEqual(expected_accent_phrases, accent_phrases) + + def test_accent_phrase_marks(self): + def a_slash_a_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = a_slash_a_accent_phrases() + self._accent_phrase_marks_base( + text="ア'/ア'", + expected_accent_phrases=expected_accent_phrases, + ) + + def a_jp_comma_a_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = a_jp_comma_a_accent_phrases() + self._accent_phrase_marks_base( + text="ア'、ア'", + expected_accent_phrases=expected_accent_phrases, + ) + + def a_slash_a_slash_a_slash_a_slash_a_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = a_slash_a_slash_a_slash_a_slash_a_accent_phrases() + self._accent_phrase_marks_base( + text="ア'/ア'/ア'/ア'/ア'", + expected_accent_phrases=expected_accent_phrases, + ) + + def su_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ス", + consonant="s", + consonant_length=0.0, + vowel="u", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = su_accent_phrases() + self._accent_phrase_marks_base( + text="ス'", + expected_accent_phrases=expected_accent_phrases, + ) + + def under_score_su_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ス", + consonant="s", + consonant_length=0.0, + vowel="U", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = under_score_su_accent_phrases() + self._accent_phrase_marks_base( + text="_ス'", + expected_accent_phrases=expected_accent_phrases, + ) + + def gye_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = gye_accent_phrases() + self._accent_phrase_marks_base( + text="ギェ'", + expected_accent_phrases=expected_accent_phrases, + ) + + def gye_gye_gye_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + ), + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + + expected_accent_phrases = gye_gye_gye_accent_phrases() + self._accent_phrase_marks_base( + text="ギェ'、ギェ'/ギェ'", + expected_accent_phrases=expected_accent_phrases, + ) + + def test_interrogative_accent_phrase_marks(self): + def a_question_mark_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + is_interrogative=True, + ), + ] + + expected_accent_phrases = a_question_mark_accent_phrases() + self._accent_phrase_marks_base( + text="ア'?", + expected_accent_phrases=expected_accent_phrases, + ) + + def gye_gye_gye_question_mark_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + ), + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + AccentPhrase( + moras=[ + Mora( + text="ギェ", + consonant="gy", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + is_interrogative=True, + ), + ] + + expected_accent_phrases = gye_gye_gye_question_mark_accent_phrases() + self._accent_phrase_marks_base( + text="ギェ'、ギェ'/ギェ'?", + expected_accent_phrases=expected_accent_phrases, + ) + + def a_pause_a_question_pause_a_question_a_question_mark_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + is_interrogative=True, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + is_interrogative=True, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + is_interrogative=True, + ), + ] + + expected_accent_phrases = ( + a_pause_a_question_pause_a_question_a_question_mark_accent_phrases() + ) + self._accent_phrase_marks_base( + text="ア'、ア'?、ア'?/ア'?", + expected_accent_phrases=expected_accent_phrases, + ) + + +class TestParseKanaException(TestCase): + def _assert_error_code(self, kana: str, code: ParseKanaErrorCode): + with self.assertRaises(ParseKanaError) as err: + parse_kana(kana) + self.assertEqual(err.exception.errcode, code) + + def test_exceptions(self): + self._assert_error_code("アクセント", ParseKanaErrorCode.ACCENT_NOTFOUND) + self._assert_error_code("'アクセント", ParseKanaErrorCode.ACCENT_TOP) + self._assert_error_code("ア'ク'セント", ParseKanaErrorCode.ACCENT_TWICE) + self._assert_error_code("ひ'らがな", ParseKanaErrorCode.UNKNOWN_TEXT) + self._assert_error_code("__ス'", ParseKanaErrorCode.UNKNOWN_TEXT) + self._assert_error_code("ア'/", ParseKanaErrorCode.EMPTY_PHRASE) + self._assert_error_code("/ア'", ParseKanaErrorCode.EMPTY_PHRASE) + self._assert_error_code("", ParseKanaErrorCode.EMPTY_PHRASE) + + with self.assertRaises(ParseKanaError) as err: + parse_kana("ヒト'ツメ/フタツメ") + self.assertEqual(err.exception.errcode, ParseKanaErrorCode.ACCENT_NOTFOUND) + self.assertEqual(err.exception.kwargs, {"text": "フタツメ"}) + + with self.assertRaises(ParseKanaError) as err: + parse_kana("ア'/") + self.assertEqual(err.exception.errcode, ParseKanaErrorCode.EMPTY_PHRASE) + self.assertEqual(err.exception.kwargs, {"position": "2"}) + + with self.assertRaises(ParseKanaError) as err: + kana_parser.parse_kana("ア?ア'") + self.assertEqual( + err.exception.errcode, ParseKanaErrorCode.INTERROGATION_MARK_NOT_AT_END + ) + + +class TestCreateKana(TestCase): + def test_create_kana_interrogative(self): + def koreha_arimasuka_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="コ", + consonant="k", + consonant_length=2.5, + vowel="o", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="レ", + consonant="r", + consonant_length=2.5, + vowel="e", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="ワ", + consonant="w", + consonant_length=2.5, + vowel="a", + vowel_length=2.5, + pitch=2.5, + ), + ], + accent=3, + pause_mora=None, + is_interrogative=False, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="リ", + consonant="r", + consonant_length=2.5, + vowel="i", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="マ", + consonant="m", + consonant_length=2.5, + vowel="a", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="ス", + consonant="s", + consonant_length=2.5, + vowel="U", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="カ", + consonant="k", + consonant_length=2.5, + vowel="a", + vowel_length=2.5, + pitch=2.5, + ), + ], + accent=3, + pause_mora=None, + is_interrogative=False, + ), + ] + + accent_phrases = koreha_arimasuka_accent_phrases() + self.assertEqual(create_kana(accent_phrases), "コレワ'/アリマ'_スカ") + + accent_phrases = koreha_arimasuka_accent_phrases() + accent_phrases[-1].is_interrogative = True + self.assertEqual(create_kana(accent_phrases), "コレワ'/アリマ'_スカ?") + + def kya_accent_phrases(): + return [ + AccentPhrase( + moras=[ + Mora( + text="キャ", + consonant="ky", + consonant_length=2.5, + vowel="a", + vowel_length=2.5, + pitch=2.5, + ), + Mora( + text="ッ", + consonant=None, + consonant_length=None, + vowel="cl", + vowel_length=0.1, + pitch=0, + ), + ], + accent=1, + pause_mora=None, + is_interrogative=False, + ), + ] + + accent_phrases = kya_accent_phrases() + self.assertEqual(create_kana(accent_phrases), "キャ'ッ") + + accent_phrases = kya_accent_phrases() + accent_phrases[-1].is_interrogative = True + self.assertEqual(create_kana(accent_phrases), "キャ'ッ?") diff --git a/test/test_mock_synthesis_engine.py b/test/test_mock_synthesis_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..c06a0504a37d316c4769fcf0c658ac245f0e50d8 --- /dev/null +++ b/test/test_mock_synthesis_engine.py @@ -0,0 +1,140 @@ +from unittest import TestCase + +from voicevox_engine.dev.synthesis_engine import MockSynthesisEngine +from voicevox_engine.kana_parser import create_kana +from voicevox_engine.model import AccentPhrase, AudioQuery, Mora + + +class TestMockSynthesisEngine(TestCase): + def setUp(self): + super().setUp() + + self.accent_phrases_hello_hiho = [ + AccentPhrase( + moras=[ + Mora( + text="コ", + consonant="k", + consonant_length=0.0, + vowel="o", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ン", + consonant=None, + consonant_length=None, + vowel="N", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ニ", + consonant="n", + consonant_length=0.0, + vowel="i", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="チ", + consonant="ch", + consonant_length=0.0, + vowel="i", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ワ", + consonant="w", + consonant_length=0.0, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=5, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + ), + AccentPhrase( + moras=[ + Mora( + text="ヒ", + consonant="h", + consonant_length=0.0, + vowel="i", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ホ", + consonant="h", + consonant_length=0.0, + vowel="o", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="デ", + consonant="d", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ス", + consonant="s", + consonant_length=0.0, + vowel="U", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + self.engine = MockSynthesisEngine(speakers="", supported_devices="") + + def test_replace_phoneme_length(self): + self.assertEqual( + self.engine.replace_phoneme_length( + accent_phrases=self.accent_phrases_hello_hiho, + speaker_id=0, + ), + self.accent_phrases_hello_hiho, + ) + + def test_replace_mora_pitch(self): + self.assertEqual( + self.engine.replace_mora_pitch( + accent_phrases=self.accent_phrases_hello_hiho, + speaker_id=0, + ), + self.accent_phrases_hello_hiho, + ) + + def test_synthesis(self): + self.engine.synthesis( + AudioQuery( + accent_phrases=self.accent_phrases_hello_hiho, + speedScale=1, + pitchScale=0, + intonationScale=1, + volumeScale=1, + prePhonemeLength=0.1, + postPhonemeLength=0.1, + outputSamplingRate=24000, + outputStereo=False, + kana=create_kana(self.accent_phrases_hello_hiho), + ), + speaker_id=0, + ) diff --git a/test/test_mora_list.py b/test/test_mora_list.py new file mode 100644 index 0000000000000000000000000000000000000000..25b287fa0e8b0febb1895ac84223823915e548ea --- /dev/null +++ b/test/test_mora_list.py @@ -0,0 +1,20 @@ +from unittest import TestCase + +from voicevox_engine.mora_list import openjtalk_mora2text + + +class TestOpenJTalkMoraList(TestCase): + def test_mora2text(self): + self.assertEqual("ッ", openjtalk_mora2text["cl"]) + self.assertEqual("ティ", openjtalk_mora2text["ti"]) + self.assertEqual("トゥ", openjtalk_mora2text["tu"]) + self.assertEqual("ディ", openjtalk_mora2text["di"]) + # GitHub issue #60 + self.assertEqual("ギェ", openjtalk_mora2text["gye"]) + self.assertEqual("イェ", openjtalk_mora2text["ye"]) + + def test_mora2text_injective(self): + """異なるモーラが同じ読みがなに対応しないか確認する""" + values = list(openjtalk_mora2text.values()) + uniq_values = list(set(values)) + self.assertCountEqual(values, uniq_values) diff --git a/test/test_mora_to_text.py b/test/test_mora_to_text.py new file mode 100644 index 0000000000000000000000000000000000000000..691681dd1b202731eb5dde45e083b4d6c7526743 --- /dev/null +++ b/test/test_mora_to_text.py @@ -0,0 +1,29 @@ +from unittest import TestCase + +# TODO: import from voicevox_engine.synthesis_engine.mora +from voicevox_engine.synthesis_engine.synthesis_engine_base import mora_to_text + + +class TestMoraToText(TestCase): + def test_voice(self): + self.assertEqual(mora_to_text("a"), "ア") + self.assertEqual(mora_to_text("i"), "イ") + self.assertEqual(mora_to_text("ka"), "カ") + self.assertEqual(mora_to_text("N"), "ン") + self.assertEqual(mora_to_text("cl"), "ッ") + self.assertEqual(mora_to_text("gye"), "ギェ") + self.assertEqual(mora_to_text("ye"), "イェ") + self.assertEqual(mora_to_text("wo"), "ウォ") + + def test_unvoice(self): + self.assertEqual(mora_to_text("A"), "ア") + self.assertEqual(mora_to_text("I"), "イ") + self.assertEqual(mora_to_text("kA"), "カ") + self.assertEqual(mora_to_text("gyE"), "ギェ") + self.assertEqual(mora_to_text("yE"), "イェ") + self.assertEqual(mora_to_text("wO"), "ウォ") + + def test_invalid_mora(self): + """変なモーラが来ても例外を投げない""" + self.assertEqual(mora_to_text("x"), "x") + self.assertEqual(mora_to_text(""), "") diff --git a/test/test_preset.py b/test/test_preset.py new file mode 100644 index 0000000000000000000000000000000000000000..3a162829c18798a704ef86d958efa87dbc1dca25 --- /dev/null +++ b/test/test_preset.py @@ -0,0 +1,303 @@ +from os import remove +from pathlib import Path +from shutil import copyfile +from tempfile import TemporaryDirectory +from unittest import TestCase + +from voicevox_engine.preset import Preset, PresetError, PresetManager + + +class TestPresetManager(TestCase): + def setUp(self): + self.tmp_dir = TemporaryDirectory() + self.tmp_dir_path = Path(self.tmp_dir.name) + + def tearDown(self): + self.tmp_dir.cleanup() + + def test_validation(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-1.yaml")) + presets = preset_manager.load_presets() + self.assertFalse(presets is None) + + def test_validation_same(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-1.yaml")) + presets = preset_manager.load_presets() + presets2 = preset_manager.load_presets() + self.assertFalse(presets is None) + self.assertEqual(presets, presets2) + + def test_validation_2(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml")) + with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"): + preset_manager.load_presets() + + def test_preset_id(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-3.yaml")) + with self.assertRaises(PresetError, msg="プリセットのidに重複があります"): + preset_manager.load_presets() + + def test_empty_file(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-4.yaml")) + with self.assertRaises(PresetError, msg="プリセットの設定ファイルが空の内容です"): + preset_manager.load_presets() + + def test_not_exist_file(self): + preset_manager = PresetManager(preset_path=Path("test/presets-dummy.yaml")) + with self.assertRaises(PresetError, msg="プリセットの設定ファイルが見つかりません"): + preset_manager.load_presets() + + def test_add_preset(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": 10, + "name": "test10", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + id = preset_manager.add_preset(preset) + self.assertEqual(id, 10) + self.assertEqual(len(preset_manager.presets), 3) + for _preset in preset_manager.presets: + if _preset.id == id: + self.assertEqual(_preset, preset) + remove(temp_path) + + def test_add_preset_load_failure(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml")) + with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"): + preset_manager.add_preset( + Preset( + **{ + "id": 1, + "name": "", + "speaker_uuid": "", + "style_id": 0, + "speedScale": 0, + "pitchScale": 0, + "intonationScale": 0, + "volumeScale": 0, + "prePhonemeLength": 0, + "postPhonemeLength": 0, + } + ) + ) + + def test_add_preset_conflict_id(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": 2, + "name": "test3", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + id = preset_manager.add_preset(preset) + self.assertEqual(id, 3) + self.assertEqual(len(preset_manager.presets), 3) + for _preset in preset_manager.presets: + if _preset.id == id: + self.assertEqual(_preset, preset) + remove(temp_path) + + def test_add_preset_conflict_id2(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": -1, + "name": "test3", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + id = preset_manager.add_preset(preset) + self.assertEqual(id, 3) + self.assertEqual(len(preset_manager.presets), 3) + for _preset in preset_manager.presets: + if _preset.id == id: + self.assertEqual(_preset, preset) + remove(temp_path) + + def test_add_preset_write_failure(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": 10, + "name": "test10", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + preset_manager.load_presets() + preset_manager.load_presets = lambda: [] + preset_manager.preset_path = "" + with self.assertRaises(PresetError, msg="プリセットの設定ファイルに書き込み失敗しました"): + preset_manager.add_preset(preset) + self.assertEqual(len(preset_manager.presets), 2) + remove(temp_path) + + def test_update_preset(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": 1, + "name": "test1 new", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + id = preset_manager.update_preset(preset) + self.assertEqual(id, 1) + self.assertEqual(len(preset_manager.presets), 2) + for _preset in preset_manager.presets: + if _preset.id == id: + self.assertEqual(_preset, preset) + remove(temp_path) + + def test_update_preset_load_failure(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml")) + with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"): + preset_manager.update_preset( + Preset( + **{ + "id": 1, + "name": "", + "speaker_uuid": "", + "style_id": 0, + "speedScale": 0, + "pitchScale": 0, + "intonationScale": 0, + "volumeScale": 0, + "prePhonemeLength": 0, + "postPhonemeLength": 0, + } + ) + ) + + def test_update_preset_not_found(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": 10, + "name": "test1 new", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + with self.assertRaises(PresetError, msg="更新先のプリセットが存在しません"): + preset_manager.update_preset(preset) + self.assertEqual(len(preset_manager.presets), 2) + remove(temp_path) + + def test_update_preset_write_failure(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset = Preset( + **{ + "id": 1, + "name": "test1 new", + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "style_id": 2, + "speedScale": 1, + "pitchScale": 1, + "intonationScale": 0.5, + "volumeScale": 1, + "prePhonemeLength": 0.1, + "postPhonemeLength": 0.1, + } + ) + preset_manager.load_presets() + preset_manager.load_presets = lambda: [] + preset_manager.preset_path = "" + with self.assertRaises(PresetError, msg="プリセットの設定ファイルに書き込み失敗しました"): + preset_manager.update_preset(preset) + self.assertEqual(len(preset_manager.presets), 2) + self.assertEqual(preset_manager.presets[0].name, "test") + remove(temp_path) + + def test_delete_preset(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + id = preset_manager.delete_preset(1) + self.assertEqual(id, 1) + self.assertEqual(len(preset_manager.presets), 1) + remove(temp_path) + + def test_delete_preset_load_failure(self): + preset_manager = PresetManager(preset_path=Path("test/presets-test-2.yaml")) + with self.assertRaises(PresetError, msg="プリセットの設定ファイルにミスがあります"): + preset_manager.delete_preset(10) + + def test_delete_preset_not_found(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + with self.assertRaises(PresetError, msg="削除対象のプリセットが存在しません"): + preset_manager.delete_preset(10) + self.assertEqual(len(preset_manager.presets), 2) + remove(temp_path) + + def test_delete_preset_write_failure(self): + temp_path = self.tmp_dir_path / "presets-test-temp.yaml" + copyfile(Path("test/presets-test-1.yaml"), temp_path) + preset_manager = PresetManager(preset_path=temp_path) + preset_manager.load_presets() + preset_manager.load_presets = lambda: [] + preset_manager.preset_path = "" + with self.assertRaises(PresetError, msg="プリセットの設定ファイルに書き込み失敗しました"): + preset_manager.delete_preset(1) + self.assertEqual(len(preset_manager.presets), 2) + remove(temp_path) diff --git a/test/test_setting.py b/test/test_setting.py new file mode 100644 index 0000000000000000000000000000000000000000..494e3095e1e26b74bb70436f5ff317bca26b13c7 --- /dev/null +++ b/test/test_setting.py @@ -0,0 +1,72 @@ +from pathlib import Path +from tempfile import TemporaryDirectory +from unittest import TestCase + +from voicevox_engine.setting import CorsPolicyMode, Setting, SettingLoader + + +class TestSettingLoader(TestCase): + def setUp(self): + self.tmp_dir = TemporaryDirectory() + self.tmp_dir_path = Path(self.tmp_dir.name) + + def test_loading_1(self): + setting_loader = SettingLoader(Path("not_exist.yaml")) + settings = setting_loader.load_setting_file() + + self.assertEqual( + settings.dict(), + {"allow_origin": None, "cors_policy_mode": CorsPolicyMode.localapps}, + ) + + def test_loading_2(self): + setting_loader = SettingLoader( + setting_file_path=Path("test/setting-test-load-1.yaml") + ) + settings = setting_loader.load_setting_file() + + self.assertEqual( + settings.dict(), + {"allow_origin": None, "cors_policy_mode": CorsPolicyMode.localapps}, + ) + + def test_loading_3(self): + setting_loader = SettingLoader( + setting_file_path=Path("test/setting-test-load-2.yaml") + ) + settings = setting_loader.load_setting_file() + + self.assertEqual( + settings.dict(), + {"allow_origin": None, "cors_policy_mode": "all"}, + ) + + def test_loading_4(self): + setting_loader = SettingLoader( + setting_file_path=Path("test/setting-test-load-3.yaml") + ) + settings = setting_loader.load_setting_file() + + self.assertEqual( + settings.dict(), + { + "allow_origin": "192.168.254.255 192.168.255.255", + "cors_policy_mode": CorsPolicyMode.localapps, + }, + ) + + def test_dump(self): + setting_loader = SettingLoader( + setting_file_path=Path(self.tmp_dir_path / "setting-test-dump.yaml") + ) + settings = Setting(cors_policy_mode=CorsPolicyMode.localapps) + setting_loader.dump_setting_file(settings) + + self.assertTrue(setting_loader.setting_file_path.is_file()) + self.assertEqual( + setting_loader.load_setting_file().dict(), + {"allow_origin": None, "cors_policy_mode": CorsPolicyMode.localapps}, + ) + + def tearDown(self): + self.tmp_dir.cleanup() diff --git a/test/test_synthesis_engine.py b/test/test_synthesis_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..b1a21741623145d1e9833b9ebf238c16d1e86edc --- /dev/null +++ b/test/test_synthesis_engine.py @@ -0,0 +1,654 @@ +import math +from copy import deepcopy +from random import random +from typing import Union +from unittest import TestCase +from unittest.mock import Mock + +import numpy + +from voicevox_engine.acoustic_feature_extractor import OjtPhoneme +from voicevox_engine.model import AccentPhrase, AudioQuery, Mora +from voicevox_engine.synthesis_engine import SynthesisEngine + +# TODO: import from voicevox_engine.synthesis_engine.mora +from voicevox_engine.synthesis_engine.synthesis_engine import ( + mora_phoneme_list, + pre_process, + split_mora, + to_flatten_moras, + to_phoneme_data_list, + unvoiced_mora_phoneme_list, +) + + +def yukarin_s_mock(length: int, phoneme_list: numpy.ndarray, speaker_id: numpy.ndarray): + result = [] + # mockとしての適当な処理、特に意味はない + for i in range(length): + result.append(float(phoneme_list[i] * 0.5 + speaker_id)) + return numpy.array(result) + + +def yukarin_sa_mock( + length: int, + vowel_phoneme_list: numpy.ndarray, + consonant_phoneme_list: numpy.ndarray, + start_accent_list: numpy.ndarray, + end_accent_list: numpy.ndarray, + start_accent_phrase_list: numpy.ndarray, + end_accent_phrase_list: numpy.ndarray, + speaker_id: numpy.ndarray, +): + result = [] + # mockとしての適当な処理、特に意味はない + for i in range(length): + result.append( + float( + ( + vowel_phoneme_list[0][i] + + consonant_phoneme_list[0][i] + + start_accent_list[0][i] + + end_accent_list[0][i] + + start_accent_phrase_list[0][i] + + end_accent_phrase_list[0][i] + ) + * 0.5 + + speaker_id + ) + ) + return numpy.array(result)[numpy.newaxis] + + +def decode_mock( + length: int, + phoneme_size: int, + f0: numpy.ndarray, + phoneme: numpy.ndarray, + speaker_id: Union[numpy.ndarray, int], +): + result = [] + # mockとしての適当な処理、特に意味はない + for i in range(length): + # decode forwardはデータサイズがlengthの256倍になるのでとりあえず256回データをresultに入れる + for _ in range(256): + result.append( + float( + f0[i][0] * (numpy.where(phoneme[i] == 1)[0] / phoneme_size) + + speaker_id + ) + ) + return numpy.array(result) + + +class MockCore: + yukarin_s_forward = Mock(side_effect=yukarin_s_mock) + yukarin_sa_forward = Mock(side_effect=yukarin_sa_mock) + decode_forward = Mock(side_effect=decode_mock) + + def metas(self): + return "" + + def supported_devices(self): + return "" + + def is_model_loaded(self, speaker_id): + return True + + +class TestSynthesisEngine(TestCase): + def setUp(self): + super().setUp() + self.str_list_hello_hiho = ( + "sil k o N n i ch i w a pau h i h o d e s U sil".split() + ) + self.phoneme_data_list_hello_hiho = [ + OjtPhoneme(phoneme=p, start=i, end=i + 1) + for i, p in enumerate( + "pau k o N n i ch i w a pau h i h o d e s U pau".split() + ) + ] + self.accent_phrases_hello_hiho = [ + AccentPhrase( + moras=[ + Mora( + text="コ", + consonant="k", + consonant_length=0.0, + vowel="o", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ン", + consonant=None, + consonant_length=None, + vowel="N", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ニ", + consonant="n", + consonant_length=0.0, + vowel="i", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="チ", + consonant="ch", + consonant_length=0.0, + vowel="i", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ワ", + consonant="w", + consonant_length=0.0, + vowel="a", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=5, + pause_mora=Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0.0, + pitch=0.0, + ), + ), + AccentPhrase( + moras=[ + Mora( + text="ヒ", + consonant="h", + consonant_length=0.0, + vowel="i", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ホ", + consonant="h", + consonant_length=0.0, + vowel="o", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="デ", + consonant="d", + consonant_length=0.0, + vowel="e", + vowel_length=0.0, + pitch=0.0, + ), + Mora( + text="ス", + consonant="s", + consonant_length=0.0, + vowel="U", + vowel_length=0.0, + pitch=0.0, + ), + ], + accent=1, + pause_mora=None, + ), + ] + core = MockCore() + self.yukarin_s_mock = core.yukarin_s_forward + self.yukarin_sa_mock = core.yukarin_sa_forward + self.decode_mock = core.decode_forward + self.synthesis_engine = SynthesisEngine( + core=core, + ) + + def test_to_flatten_moras(self): + flatten_moras = to_flatten_moras(self.accent_phrases_hello_hiho) + self.assertEqual( + flatten_moras, + self.accent_phrases_hello_hiho[0].moras + + [self.accent_phrases_hello_hiho[0].pause_mora] + + self.accent_phrases_hello_hiho[1].moras, + ) + + def test_to_phoneme_data_list(self): + phoneme_data_list = to_phoneme_data_list(self.str_list_hello_hiho) + self.assertEqual(phoneme_data_list, self.phoneme_data_list_hello_hiho) + + def test_split_mora(self): + consonant_phoneme_list, vowel_phoneme_list, vowel_indexes = split_mora( + self.phoneme_data_list_hello_hiho + ) + + self.assertEqual(vowel_indexes, [0, 2, 3, 5, 7, 9, 10, 12, 14, 16, 18, 19]) + self.assertEqual( + vowel_phoneme_list, + [ + OjtPhoneme(phoneme="pau", start=0, end=1), + OjtPhoneme(phoneme="o", start=2, end=3), + OjtPhoneme(phoneme="N", start=3, end=4), + OjtPhoneme(phoneme="i", start=5, end=6), + OjtPhoneme(phoneme="i", start=7, end=8), + OjtPhoneme(phoneme="a", start=9, end=10), + OjtPhoneme(phoneme="pau", start=10, end=11), + OjtPhoneme(phoneme="i", start=12, end=13), + OjtPhoneme(phoneme="o", start=14, end=15), + OjtPhoneme(phoneme="e", start=16, end=17), + OjtPhoneme(phoneme="U", start=18, end=19), + OjtPhoneme(phoneme="pau", start=19, end=20), + ], + ) + self.assertEqual( + consonant_phoneme_list, + [ + None, + OjtPhoneme(phoneme="k", start=1, end=2), + None, + OjtPhoneme(phoneme="n", start=4, end=5), + OjtPhoneme(phoneme="ch", start=6, end=7), + OjtPhoneme(phoneme="w", start=8, end=9), + None, + OjtPhoneme(phoneme="h", start=11, end=12), + OjtPhoneme(phoneme="h", start=13, end=14), + OjtPhoneme(phoneme="d", start=15, end=16), + OjtPhoneme(phoneme="s", start=17, end=18), + None, + ], + ) + + def test_pre_process(self): + flatten_moras, phoneme_data_list = pre_process( + deepcopy(self.accent_phrases_hello_hiho) + ) + + mora_index = 0 + phoneme_index = 1 + + self.assertEqual(phoneme_data_list[0], OjtPhoneme("pau", 0, 1)) + for accent_phrase in self.accent_phrases_hello_hiho: + moras = accent_phrase.moras + for mora in moras: + self.assertEqual(flatten_moras[mora_index], mora) + mora_index += 1 + if mora.consonant is not None: + self.assertEqual( + phoneme_data_list[phoneme_index], + OjtPhoneme(mora.consonant, phoneme_index, phoneme_index + 1), + ) + phoneme_index += 1 + self.assertEqual( + phoneme_data_list[phoneme_index], + OjtPhoneme(mora.vowel, phoneme_index, phoneme_index + 1), + ) + phoneme_index += 1 + if accent_phrase.pause_mora: + self.assertEqual(flatten_moras[mora_index], accent_phrase.pause_mora) + mora_index += 1 + self.assertEqual( + phoneme_data_list[phoneme_index], + OjtPhoneme("pau", phoneme_index, phoneme_index + 1), + ) + phoneme_index += 1 + self.assertEqual( + phoneme_data_list[phoneme_index], + OjtPhoneme("pau", phoneme_index, phoneme_index + 1), + ) + + def test_replace_phoneme_length(self): + result = self.synthesis_engine.replace_phoneme_length( + accent_phrases=deepcopy(self.accent_phrases_hello_hiho), speaker_id=1 + ) + + # yukarin_sに渡される値の検証 + yukarin_s_args = self.yukarin_s_mock.call_args[1] + list_length = yukarin_s_args["length"] + phoneme_list = yukarin_s_args["phoneme_list"] + self.assertEqual(list_length, 20) + self.assertEqual(list_length, len(phoneme_list)) + numpy.testing.assert_array_equal( + phoneme_list, + numpy.array( + [ + 0, + 23, + 30, + 4, + 28, + 21, + 10, + 21, + 42, + 7, + 0, + 19, + 21, + 19, + 30, + 12, + 14, + 35, + 6, + 0, + ], + dtype=numpy.int64, + ), + ) + self.assertEqual(yukarin_s_args["speaker_id"], 1) + + # flatten_morasを使わずに愚直にaccent_phrasesにデータを反映させてみる + true_result = deepcopy(self.accent_phrases_hello_hiho) + index = 1 + + def result_value(i: int): + return float(phoneme_list[i] * 0.5 + 1) + + for accent_phrase in true_result: + moras = accent_phrase.moras + for mora in moras: + if mora.consonant is not None: + mora.consonant_length = result_value(index) + index += 1 + mora.vowel_length = result_value(index) + index += 1 + if accent_phrase.pause_mora is not None: + accent_phrase.pause_mora.vowel_length = result_value(index) + index += 1 + + self.assertEqual(result, true_result) + + def test_replace_mora_pitch(self): + # 空のリストでエラーを吐かないか + empty_accent_phrases = [] + self.assertEqual( + self.synthesis_engine.replace_mora_pitch( + accent_phrases=empty_accent_phrases, speaker_id=1 + ), + [], + ) + + result = self.synthesis_engine.replace_mora_pitch( + accent_phrases=deepcopy(self.accent_phrases_hello_hiho), speaker_id=1 + ) + + # yukarin_saに渡される値の検証 + yukarin_sa_args = self.yukarin_sa_mock.call_args[1] + list_length = yukarin_sa_args["length"] + vowel_phoneme_list = yukarin_sa_args["vowel_phoneme_list"][0] + consonant_phoneme_list = yukarin_sa_args["consonant_phoneme_list"][0] + start_accent_list = yukarin_sa_args["start_accent_list"][0] + end_accent_list = yukarin_sa_args["end_accent_list"][0] + start_accent_phrase_list = yukarin_sa_args["start_accent_phrase_list"][0] + end_accent_phrase_list = yukarin_sa_args["end_accent_phrase_list"][0] + self.assertEqual(list_length, 12) + self.assertEqual(list_length, len(vowel_phoneme_list)) + self.assertEqual(list_length, len(consonant_phoneme_list)) + self.assertEqual(list_length, len(start_accent_list)) + self.assertEqual(list_length, len(end_accent_list)) + self.assertEqual(list_length, len(start_accent_phrase_list)) + self.assertEqual(list_length, len(end_accent_phrase_list)) + self.assertEqual(yukarin_sa_args["speaker_id"], 1) + + numpy.testing.assert_array_equal( + vowel_phoneme_list, + numpy.array( + [ + 0, + 30, + 4, + 21, + 21, + 7, + 0, + 21, + 30, + 14, + 6, + 0, + ] + ), + ) + numpy.testing.assert_array_equal( + consonant_phoneme_list, + numpy.array( + [ + -1, + 23, + -1, + 28, + 10, + 42, + -1, + 19, + 19, + 12, + 35, + -1, + ] + ), + ) + numpy.testing.assert_array_equal( + start_accent_list, numpy.array([0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + ) + numpy.testing.assert_array_equal( + end_accent_list, numpy.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0]) + ) + numpy.testing.assert_array_equal( + start_accent_phrase_list, numpy.array([0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]) + ) + numpy.testing.assert_array_equal( + end_accent_phrase_list, numpy.array([0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0]) + ) + + # flatten_morasを使わずに愚直にaccent_phrasesにデータを反映させてみる + true_result = deepcopy(self.accent_phrases_hello_hiho) + index = 1 + + def result_value(i: int): + # unvoiced_mora_phoneme_listのPhoneme ID版 + unvoiced_mora_phoneme_id_list = [ + OjtPhoneme(p, 0, 0).phoneme_id for p in unvoiced_mora_phoneme_list + ] + if vowel_phoneme_list[i] in unvoiced_mora_phoneme_id_list: + return 0 + return ( + vowel_phoneme_list[i] + + consonant_phoneme_list[i] + + start_accent_list[i] + + end_accent_list[i] + + start_accent_phrase_list[i] + + end_accent_phrase_list[i] + ) * 0.5 + 1 + + for accent_phrase in true_result: + moras = accent_phrase.moras + for mora in moras: + mora.pitch = result_value(index) + index += 1 + if accent_phrase.pause_mora is not None: + accent_phrase.pause_mora.pitch = result_value(index) + index += 1 + + self.assertEqual(result, true_result) + + def synthesis_test_base(self, audio_query: AudioQuery): + accent_phrases = audio_query.accent_phrases + + # decode forwardのために適当にpitchとlengthを設定し、リストで持っておく + phoneme_length_list = [0.0] + phoneme_id_list = [0] + f0_list = [0.0] + for accent_phrase in accent_phrases: + moras = accent_phrase.moras + for mora in moras: + if mora.consonant is not None: + mora.consonant_length = 0.1 + phoneme_length_list.append(0.1) + phoneme_id_list.append(OjtPhoneme(mora.consonant, 0, 0).phoneme_id) + mora.vowel_length = 0.2 + phoneme_length_list.append(0.2) + phoneme_id_list.append(OjtPhoneme(mora.vowel, 0, 0).phoneme_id) + if mora.vowel not in unvoiced_mora_phoneme_list: + mora.pitch = 5.0 + random() + f0_list.append(mora.pitch) + if accent_phrase.pause_mora is not None: + accent_phrase.pause_mora.vowel_length = 0.2 + phoneme_length_list.append(0.2) + phoneme_id_list.append(OjtPhoneme("pau", 0, 0).phoneme_id) + f0_list.append(0.0) + phoneme_length_list.append(0.0) + phoneme_id_list.append(0) + f0_list.append(0.0) + + phoneme_length_list[0] = audio_query.prePhonemeLength + phoneme_length_list[-1] = audio_query.postPhonemeLength + + for i in range(len(phoneme_length_list)): + phoneme_length_list[i] /= audio_query.speedScale + + result = self.synthesis_engine.synthesis(query=audio_query, speaker_id=1) + + # decodeに渡される値の検証 + decode_args = self.decode_mock.call_args[1] + list_length = decode_args["length"] + self.assertEqual( + list_length, + int(sum([round(p * 24000 / 256) for p in phoneme_length_list])), + ) + + num_phoneme = OjtPhoneme.num_phoneme + # mora_phoneme_listのPhoneme ID版 + mora_phoneme_id_list = [ + OjtPhoneme(p, 0, 0).phoneme_id for p in mora_phoneme_list + ] + + # numpy.repeatをfor文でやる + f0 = [] + phoneme = [] + f0_index = 0 + mean_f0 = [] + for i, phoneme_length in enumerate(phoneme_length_list): + f0_single = numpy.array(f0_list[f0_index], dtype=numpy.float32) * ( + 2**audio_query.pitchScale + ) + for _ in range(int(round(phoneme_length * (24000 / 256)))): + f0.append([f0_single]) + phoneme_s = [] + for _ in range(num_phoneme): + phoneme_s.append(0) + # one hot + phoneme_s[phoneme_id_list[i]] = 1 + phoneme.append(phoneme_s) + # consonantとvowelを判別し、vowelであればf0_indexを一つ進める + if phoneme_id_list[i] in mora_phoneme_id_list: + if f0_single > 0: + mean_f0.append(f0_single) + f0_index += 1 + + mean_f0 = numpy.array(mean_f0, dtype=numpy.float32).mean() + f0 = numpy.array(f0, dtype=numpy.float32) + for i in range(len(f0)): + if f0[i][0] != 0.0: + f0[i][0] = (f0[i][0] - mean_f0) * audio_query.intonationScale + mean_f0 + + phoneme = numpy.array(phoneme, dtype=numpy.float32) + + # 乱数の影響で数値の位置がずれが生じるので、大半(4/5)があっていればよしとする + # また、上の部分のint(round(phoneme_length * (24000 / 256)))の影響で + # 本来のf0/phonemeとテスト生成したf0/phonemeの長さが変わることがあり、 + # テスト生成したものが若干長くなることがあるので、本来のものの長さを基準にassertする + assert_f0_count = 0 + decode_f0 = decode_args["f0"] + for i in range(len(decode_f0)): + # 乱数の影響等で数値にずれが生じるので、10の-5乗までの近似値であれば許容する + assert_f0_count += math.isclose(f0[i][0], decode_f0[i][0], rel_tol=10e-5) + self.assertTrue(assert_f0_count >= int(len(decode_f0) / 5) * 4) + assert_phoneme_count = 0 + decode_phoneme = decode_args["phoneme"] + for i in range(len(decode_phoneme)): + assert_true_count = 0 + for j in range(len(decode_phoneme[i])): + assert_true_count += bool(phoneme[i][j] == decode_phoneme[i][j]) + assert_phoneme_count += assert_true_count == num_phoneme + self.assertTrue(assert_phoneme_count >= int(len(decode_phoneme) / 5) * 4) + self.assertEqual(decode_args["speaker_id"], 1) + + # decode forwarderのmockを使う + true_result = decode_mock(list_length, num_phoneme, f0, phoneme, 1) + + true_result *= audio_query.volumeScale + + # TODO: resampyの部分は値の検証しようがないので、パスする + if audio_query.outputSamplingRate != 24000: + return + + assert_result_count = 0 + for i in range(len(true_result)): + if audio_query.outputStereo: + assert_result_count += math.isclose( + true_result[i], result[i][0], rel_tol=10e-5 + ) and math.isclose(true_result[i], result[i][1], rel_tol=10e-5) + else: + assert_result_count += math.isclose( + true_result[i], result[i], rel_tol=10e-5 + ) + self.assertTrue(assert_result_count >= int(len(true_result) / 5) * 4) + + def test_synthesis(self): + audio_query = AudioQuery( + accent_phrases=deepcopy(self.accent_phrases_hello_hiho), + speedScale=1.0, + pitchScale=1.0, + intonationScale=1.0, + volumeScale=1.0, + prePhonemeLength=0.1, + postPhonemeLength=0.1, + outputSamplingRate=24000, + outputStereo=False, + # このテスト内では使わないので生成不要 + kana="", + ) + + self.synthesis_test_base(audio_query) + + # speed scaleのテスト + audio_query.speedScale = 1.2 + self.synthesis_test_base(audio_query) + + # pitch scaleのテスト + audio_query.pitchScale = 1.5 + audio_query.speedScale = 1.0 + self.synthesis_test_base(audio_query) + + # intonation scaleのテスト + audio_query.pitchScale = 1.0 + audio_query.intonationScale = 1.4 + self.synthesis_test_base(audio_query) + + # volume scaleのテスト + audio_query.intonationScale = 1.0 + audio_query.volumeScale = 2.0 + self.synthesis_test_base(audio_query) + + # pre/post phoneme lengthのテスト + audio_query.volumeScale = 1.0 + audio_query.prePhonemeLength = 0.5 + audio_query.postPhonemeLength = 0.5 + self.synthesis_test_base(audio_query) + + # output sampling rateのテスト + audio_query.prePhonemeLength = 0.1 + audio_query.postPhonemeLength = 0.1 + audio_query.outputSamplingRate = 48000 + self.synthesis_test_base(audio_query) + + # output stereoのテスト + audio_query.outputSamplingRate = 24000 + audio_query.outputStereo = True + self.synthesis_test_base(audio_query) diff --git a/test/test_synthesis_engine_base.py b/test/test_synthesis_engine_base.py new file mode 100644 index 0000000000000000000000000000000000000000..63f976a0ee5ec012c2ce832e014fb5ee960ebecb --- /dev/null +++ b/test/test_synthesis_engine_base.py @@ -0,0 +1,411 @@ +from typing import List, Union +from unittest import TestCase +from unittest.mock import Mock + +import numpy + +from voicevox_engine.model import AccentPhrase, AudioQuery, Mora +from voicevox_engine.synthesis_engine import SynthesisEngine + + +def yukarin_s_mock(length: int, phoneme_list: numpy.ndarray, speaker_id: numpy.ndarray): + result = [] + # mockとしての適当な処理、特に意味はない + for i in range(length): + result.append(round(float(phoneme_list[i] * 0.0625 + speaker_id), 2)) + return numpy.array(result) + + +def yukarin_sa_mock( + length: int, + vowel_phoneme_list: numpy.ndarray, + consonant_phoneme_list: numpy.ndarray, + start_accent_list: numpy.ndarray, + end_accent_list: numpy.ndarray, + start_accent_phrase_list: numpy.ndarray, + end_accent_phrase_list: numpy.ndarray, + speaker_id: numpy.ndarray, +): + result = [] + # mockとしての適当な処理、特に意味はない + for i in range(length): + result.append( + round( + float( + ( + vowel_phoneme_list[0][i] + + consonant_phoneme_list[0][i] + + start_accent_list[0][i] + + end_accent_list[0][i] + + start_accent_phrase_list[0][i] + + end_accent_phrase_list[0][i] + ) + * 0.0625 + + speaker_id + ), + 2, + ) + ) + return numpy.array(result)[numpy.newaxis] + + +def decode_mock( + length: int, + phoneme_size: int, + f0: numpy.ndarray, + phoneme: numpy.ndarray, + speaker_id: Union[numpy.ndarray, int], +): + result = [] + # mockとしての適当な処理、特に意味はない + for i in range(length): + # decode forwardはデータサイズがlengthの256倍になるのでとりあえず256回データをresultに入れる + for _ in range(256): + result.append( + float( + f0[i][0] * (numpy.where(phoneme[i] == 1)[0] / phoneme_size) + + speaker_id + ) + ) + return numpy.array(result) + + +def koreha_arimasuka_base_expected(): + return [ + AccentPhrase( + moras=[ + Mora( + text="コ", + consonant="k", + consonant_length=2.44, + vowel="o", + vowel_length=2.88, + pitch=4.38, + ), + Mora( + text="レ", + consonant="r", + consonant_length=3.06, + vowel="e", + vowel_length=1.88, + pitch=4.0, + ), + Mora( + text="ワ", + consonant="w", + consonant_length=3.62, + vowel="a", + vowel_length=1.44, + pitch=4.19, + ), + ], + accent=3, + pause_mora=None, + is_interrogative=False, + ), + AccentPhrase( + moras=[ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=1.44, + pitch=1.44, + ), + Mora( + text="リ", + consonant="r", + consonant_length=3.06, + vowel="i", + vowel_length=2.31, + pitch=4.44, + ), + Mora( + text="マ", + consonant="m", + consonant_length=2.62, + vowel="a", + vowel_length=1.44, + pitch=3.12, + ), + Mora( + text="ス", + consonant="s", + consonant_length=3.19, + vowel="U", + vowel_length=1.38, + pitch=0.0, + ), + Mora( + text="カ", + consonant="k", + consonant_length=2.44, + vowel="a", + vowel_length=1.44, + pitch=2.94, + ), + ], + accent=3, + pause_mora=None, + is_interrogative=False, + ), + ] + + +def create_mock_query(accent_phrases): + return AudioQuery( + accent_phrases=accent_phrases, + speedScale=1, + pitchScale=0, + intonationScale=1, + volumeScale=1, + prePhonemeLength=0.1, + postPhonemeLength=0.1, + outputSamplingRate=24000, + outputStereo=False, + kana="", + ) + + +class MockCore: + yukarin_s_forward = Mock(side_effect=yukarin_s_mock) + yukarin_sa_forward = Mock(side_effect=yukarin_sa_mock) + decode_forward = Mock(side_effect=decode_mock) + + def metas(self): + return "" + + def supported_devices(self): + return "" + + def is_model_loaded(self, speaker_id): + return True + + +class TestSynthesisEngineBase(TestCase): + def setUp(self): + super().setUp() + self.synthesis_engine = SynthesisEngine( + core=MockCore(), + ) + self.synthesis_engine._synthesis_impl = Mock() + + def create_accent_phrases_test_base(self, text: str, expected: List[AccentPhrase]): + actual = self.synthesis_engine.create_accent_phrases(text, 1) + self.assertEqual( + expected, + actual, + "case(text:" + text + ")", + ) + + def create_synthesis_test_base( + self, + text: str, + expected: List[AccentPhrase], + enable_interrogative_upspeak: bool, + ): + """音声合成時に疑問文モーラ処理を行っているかどうかを検証 + (https://github.com/VOICEVOX/voicevox_engine/issues/272#issuecomment-1022610866) + """ + accent_phrases = self.synthesis_engine.create_accent_phrases(text, 1) + query = create_mock_query(accent_phrases=accent_phrases) + self.synthesis_engine.synthesis( + query, 0, enable_interrogative_upspeak=enable_interrogative_upspeak + ) + # _synthesis_implの第一引数に与えられたqueryを検証 + actual = self.synthesis_engine._synthesis_impl.call_args[0][0].accent_phrases + + self.assertEqual( + expected, + actual, + "case(text:" + text + ")", + ) + + def test_create_accent_phrases(self): + """accent_phrasesの作成時では疑問文モーラ処理を行わない + (https://github.com/VOICEVOX/voicevox_engine/issues/272#issuecomment-1022610866) + """ + expected = koreha_arimasuka_base_expected() + expected[-1].is_interrogative = True + self.create_accent_phrases_test_base(text="これはありますか?", expected=expected) + + def test_synthesis_interrogative(self): + expected = koreha_arimasuka_base_expected() + expected[-1].is_interrogative = True + expected[-1].moras += [ + Mora( + text="ア", + consonant=None, + consonant_length=None, + vowel="a", + vowel_length=0.15, + pitch=expected[-1].moras[-1].pitch + 0.3, + ) + ] + self.create_synthesis_test_base( + text="これはありますか?", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = koreha_arimasuka_base_expected() + expected[-1].is_interrogative = True + self.create_synthesis_test_base( + text="これはありますか?", + expected=expected, + enable_interrogative_upspeak=False, + ) + + expected = koreha_arimasuka_base_expected() + self.create_synthesis_test_base( + text="これはありますか", + expected=expected, + enable_interrogative_upspeak=True, + ) + + def nn_base_expected(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ン", + consonant=None, + consonant_length=None, + vowel="N", + vowel_length=1.25, + pitch=1.44, + ) + ], + accent=1, + pause_mora=None, + is_interrogative=False, + ) + ] + + expected = nn_base_expected() + self.create_synthesis_test_base( + text="ん", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = nn_base_expected() + expected[-1].is_interrogative = True + expected[-1].moras += [ + Mora( + text="ン", + consonant=None, + consonant_length=None, + vowel="N", + vowel_length=0.15, + pitch=expected[-1].moras[-1].pitch + 0.3, + ) + ] + self.create_synthesis_test_base( + text="ん?", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = nn_base_expected() + expected[-1].is_interrogative = True + self.create_synthesis_test_base( + text="ん?", + expected=expected, + enable_interrogative_upspeak=False, + ) + + def ltu_base_expected(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ッ", + consonant=None, + consonant_length=None, + vowel="cl", + vowel_length=1.69, + pitch=0.0, + ) + ], + accent=1, + pause_mora=None, + is_interrogative=False, + ) + ] + + expected = ltu_base_expected() + self.create_synthesis_test_base( + text="っ", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = ltu_base_expected() + expected[-1].is_interrogative = True + self.create_synthesis_test_base( + text="っ?", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = ltu_base_expected() + expected[-1].is_interrogative = True + self.create_synthesis_test_base( + text="っ?", + expected=expected, + enable_interrogative_upspeak=False, + ) + + def su_base_expected(): + return [ + AccentPhrase( + moras=[ + Mora( + text="ス", + consonant="s", + consonant_length=3.19, + vowel="u", + vowel_length=3.5, + pitch=5.94, + ) + ], + accent=1, + pause_mora=None, + is_interrogative=False, + ) + ] + + expected = su_base_expected() + self.create_synthesis_test_base( + text="す", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = su_base_expected() + expected[-1].is_interrogative = True + expected[-1].moras += [ + Mora( + text="ウ", + consonant=None, + consonant_length=None, + vowel="u", + vowel_length=0.15, + pitch=expected[-1].moras[-1].pitch + 0.3, + ) + ] + self.create_synthesis_test_base( + text="す?", + expected=expected, + enable_interrogative_upspeak=True, + ) + + expected = su_base_expected() + expected[-1].is_interrogative = True + self.create_synthesis_test_base( + text="す?", + expected=expected, + enable_interrogative_upspeak=False, + ) diff --git a/test/test_user_dict.py b/test/test_user_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..4280bbe53e9b2d71df7b9c996f56ade7b802d561 --- /dev/null +++ b/test/test_user_dict.py @@ -0,0 +1,348 @@ +import json +from copy import deepcopy +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Dict +from unittest import TestCase + +from fastapi import HTTPException +from pyopenjtalk import g2p, unset_user_dict + +from voicevox_engine.model import UserDictWord, WordTypes +from voicevox_engine.part_of_speech_data import MAX_PRIORITY, part_of_speech_data +from voicevox_engine.user_dict import ( + apply_word, + create_word, + delete_word, + import_user_dict, + read_dict, + rewrite_word, + update_dict, +) + +# jsonとして保存される正しい形式の辞書データ +valid_dict_dict_json = { + "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": { + "surface": "test", + "cost": part_of_speech_data[WordTypes.PROPER_NOUN].cost_candidates[5], + "part_of_speech": "名詞", + "part_of_speech_detail_1": "固有名詞", + "part_of_speech_detail_2": "一般", + "part_of_speech_detail_3": "*", + "inflectional_type": "*", + "inflectional_form": "*", + "stem": "*", + "yomi": "テスト", + "pronunciation": "テスト", + "accent_type": 1, + "accent_associative_rule": "*", + }, +} + +# APIでやり取りされる正しい形式の辞書データ +valid_dict_dict_api = deepcopy(valid_dict_dict_json) +del valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]["cost"] +valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]["priority"] = 5 + +import_word = UserDictWord( + surface="test2", + priority=5, + part_of_speech="名詞", + part_of_speech_detail_1="固有名詞", + part_of_speech_detail_2="一般", + part_of_speech_detail_3="*", + inflectional_type="*", + inflectional_form="*", + stem="*", + yomi="テストツー", + pronunciation="テストツー", + accent_type=1, + accent_associative_rule="*", +) + + +def get_new_word(user_dict: Dict[str, UserDictWord]): + assert len(user_dict) == 2 or ( + len(user_dict) == 1 and "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e" not in user_dict + ) + for word_uuid in user_dict.keys(): + if word_uuid == "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": + continue + return user_dict[word_uuid] + raise AssertionError + + +class TestUserDict(TestCase): + def setUp(self): + self.tmp_dir = TemporaryDirectory() + self.tmp_dir_path = Path(self.tmp_dir.name) + + def tearDown(self): + unset_user_dict() + self.tmp_dir.cleanup() + + def test_read_not_exist_json(self): + self.assertEqual( + read_dict(user_dict_path=(self.tmp_dir_path / "not_exist.json")), + {}, + ) + + def test_create_word(self): + # 将来的に品詞などが追加された時にテストを増やす + self.assertEqual( + create_word(surface="test", pronunciation="テスト", accent_type=1), + UserDictWord( + surface="test", + priority=5, + part_of_speech="名詞", + part_of_speech_detail_1="固有名詞", + part_of_speech_detail_2="一般", + part_of_speech_detail_3="*", + inflectional_type="*", + inflectional_form="*", + stem="*", + yomi="テスト", + pronunciation="テスト", + accent_type=1, + accent_associative_rule="*", + ), + ) + + def test_apply_word_without_json(self): + user_dict_path = self.tmp_dir_path / "test_apply_word_without_json.json" + apply_word( + surface="test", + pronunciation="テスト", + accent_type=1, + user_dict_path=user_dict_path, + compiled_dict_path=(self.tmp_dir_path / "test_apply_word_without_json.dic"), + ) + res = read_dict(user_dict_path=user_dict_path) + self.assertEqual(len(res), 1) + new_word = get_new_word(res) + self.assertEqual( + ( + new_word.surface, + new_word.pronunciation, + new_word.accent_type, + ), + ("test", "テスト", 1), + ) + + def test_apply_word_with_json(self): + user_dict_path = self.tmp_dir_path / "test_apply_word_with_json.json" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + apply_word( + surface="test2", + pronunciation="テストツー", + accent_type=3, + user_dict_path=user_dict_path, + compiled_dict_path=(self.tmp_dir_path / "test_apply_word_with_json.dic"), + ) + res = read_dict(user_dict_path=user_dict_path) + self.assertEqual(len(res), 2) + new_word = get_new_word(res) + self.assertEqual( + ( + new_word.surface, + new_word.pronunciation, + new_word.accent_type, + ), + ("test2", "テストツー", 3), + ) + + def test_rewrite_word_invalid_id(self): + user_dict_path = self.tmp_dir_path / "test_rewrite_word_invalid_id.json" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + self.assertRaises( + HTTPException, + rewrite_word, + word_uuid="c2be4dc5-d07d-4767-8be1-04a1bb3f05a9", + surface="test2", + pronunciation="テストツー", + accent_type=2, + user_dict_path=user_dict_path, + compiled_dict_path=(self.tmp_dir_path / "test_rewrite_word_invalid_id.dic"), + ) + + def test_rewrite_word_valid_id(self): + user_dict_path = self.tmp_dir_path / "test_rewrite_word_valid_id.json" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + rewrite_word( + word_uuid="aab7dda2-0d97-43c8-8cb7-3f440dab9b4e", + surface="test2", + pronunciation="テストツー", + accent_type=2, + user_dict_path=user_dict_path, + compiled_dict_path=(self.tmp_dir_path / "test_rewrite_word_valid_id.dic"), + ) + new_word = read_dict(user_dict_path=user_dict_path)[ + "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e" + ] + self.assertEqual( + (new_word.surface, new_word.pronunciation, new_word.accent_type), + ("test2", "テストツー", 2), + ) + + def test_delete_word_invalid_id(self): + user_dict_path = self.tmp_dir_path / "test_delete_word_invalid_id.json" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + self.assertRaises( + HTTPException, + delete_word, + word_uuid="c2be4dc5-d07d-4767-8be1-04a1bb3f05a9", + user_dict_path=user_dict_path, + compiled_dict_path=(self.tmp_dir_path / "test_delete_word_invalid_id.dic"), + ) + + def test_delete_word_valid_id(self): + user_dict_path = self.tmp_dir_path / "test_delete_word_valid_id.json" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + delete_word( + word_uuid="aab7dda2-0d97-43c8-8cb7-3f440dab9b4e", + user_dict_path=user_dict_path, + compiled_dict_path=(self.tmp_dir_path / "test_delete_word_valid_id.dic"), + ) + self.assertEqual(len(read_dict(user_dict_path=user_dict_path)), 0) + + def test_priority(self): + for pos in part_of_speech_data: + for i in range(MAX_PRIORITY + 1): + self.assertEqual( + create_word( + surface="test", + pronunciation="テスト", + accent_type=1, + word_type=pos, + priority=i, + ).priority, + i, + ) + + def test_import_dict(self): + user_dict_path = self.tmp_dir_path / "test_import_dict.json" + compiled_dict_path = self.tmp_dir_path / "test_import_dict.dic" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + import_user_dict( + {"b1affe2a-d5f0-4050-926c-f28e0c1d9a98": import_word}, + override=False, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + self.assertEqual( + read_dict(user_dict_path)["b1affe2a-d5f0-4050-926c-f28e0c1d9a98"], + import_word, + ) + self.assertEqual( + read_dict(user_dict_path)["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"], + UserDictWord(**valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]), + ) + + def test_import_dict_no_override(self): + user_dict_path = self.tmp_dir_path / "test_import_dict_no_override.json" + compiled_dict_path = self.tmp_dir_path / "test_import_dict_no_override.dic" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + import_user_dict( + {"aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": import_word}, + override=False, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + self.assertEqual( + read_dict(user_dict_path)["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"], + UserDictWord(**valid_dict_dict_api["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"]), + ) + + def test_import_dict_override(self): + user_dict_path = self.tmp_dir_path / "test_import_dict_override.json" + compiled_dict_path = self.tmp_dir_path / "test_import_dict_override.dic" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + import_user_dict( + {"aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": import_word}, + override=True, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + self.assertEqual( + read_dict(user_dict_path)["aab7dda2-0d97-43c8-8cb7-3f440dab9b4e"], + import_word, + ) + + def test_import_invalid_word(self): + user_dict_path = self.tmp_dir_path / "test_import_invalid_dict.json" + compiled_dict_path = self.tmp_dir_path / "test_import_invalid_dict.dic" + invalid_accent_associative_rule_word = deepcopy(import_word) + invalid_accent_associative_rule_word.accent_associative_rule = "invalid" + user_dict_path.write_text( + json.dumps(valid_dict_dict_json, ensure_ascii=False), encoding="utf-8" + ) + self.assertRaises( + AssertionError, + import_user_dict, + { + "aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": invalid_accent_associative_rule_word + }, + override=True, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + invalid_pos_word = deepcopy(import_word) + invalid_pos_word.context_id = 2 + invalid_pos_word.part_of_speech = "フィラー" + invalid_pos_word.part_of_speech_detail_1 = "*" + invalid_pos_word.part_of_speech_detail_2 = "*" + invalid_pos_word.part_of_speech_detail_3 = "*" + self.assertRaises( + ValueError, + import_user_dict, + {"aab7dda2-0d97-43c8-8cb7-3f440dab9b4e": invalid_pos_word}, + override=True, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + + def test_update_dict(self): + user_dict_path = self.tmp_dir_path / "test_update_dict.json" + compiled_dict_path = self.tmp_dir_path / "test_update_dict.dic" + update_dict( + user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path + ) + test_text = "テスト用の文字列" + success_pronunciation = "デフォルトノジショデハゼッタイニセイセイサレナイヨミ" + + # 既に辞書に登録されていないか確認する + self.assertNotEqual(g2p(text=test_text, kana=True), success_pronunciation) + + apply_word( + surface=test_text, + pronunciation=success_pronunciation, + accent_type=1, + priority=10, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + self.assertEqual(g2p(text=test_text, kana=True), success_pronunciation) + + # 疑似的にエンジンを再起動する + unset_user_dict() + update_dict( + user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path + ) + + self.assertEqual(g2p(text=test_text, kana=True), success_pronunciation) diff --git a/test/test_user_dict_model.py b/test/test_user_dict_model.py new file mode 100644 index 0000000000000000000000000000000000000000..9a3a49021d0c74ff15507d259ac2437eeaa03557 --- /dev/null +++ b/test/test_user_dict_model.py @@ -0,0 +1,108 @@ +from copy import deepcopy +from unittest import TestCase + +from pydantic import ValidationError + +from voicevox_engine.kana_parser import parse_kana +from voicevox_engine.model import UserDictWord + + +class TestUserDictWords(TestCase): + def setUp(self): + self.test_model = { + "surface": "テスト", + "priority": 0, + "part_of_speech": "名詞", + "part_of_speech_detail_1": "固有名詞", + "part_of_speech_detail_2": "一般", + "part_of_speech_detail_3": "*", + "inflectional_type": "*", + "inflectional_form": "*", + "stem": "*", + "yomi": "テスト", + "pronunciation": "テスト", + "accent_type": 0, + "accent_associative_rule": "*", + } + + def test_valid_word(self): + test_value = deepcopy(self.test_model) + try: + UserDictWord(**test_value) + except ValidationError as e: + self.fail(f"Unexpected Validation Error\n{str(e)}") + + def test_convert_to_zenkaku(self): + test_value = deepcopy(self.test_model) + test_value["surface"] = "test" + self.assertEqual(UserDictWord(**test_value).surface, "test") + + def test_count_mora(self): + test_value = deepcopy(self.test_model) + self.assertEqual(UserDictWord(**test_value).mora_count, 3) + + def test_count_mora_x(self): + test_value = deepcopy(self.test_model) + for s in [chr(i) for i in range(12449, 12533)]: + if s in ["ァ", "ィ", "ゥ", "ェ", "ォ", "ッ", "ャ", "ュ", "ョ", "ヮ"]: + continue + for x in "ァィゥェォャュョ": + expected_count = 0 + test_value["pronunciation"] = s + x + for accent_phrase in parse_kana( + test_value["pronunciation"] + "'", + ): + expected_count += len(accent_phrase.moras) + with self.subTest(s=s, x=x): + self.assertEqual( + UserDictWord(**test_value).mora_count, + expected_count, + ) + + def test_count_mora_xwa(self): + test_value = deepcopy(self.test_model) + test_value["pronunciation"] = "クヮンセイ" + expected_count = 0 + for accent_phrase in parse_kana( + test_value["pronunciation"] + "'", + ): + expected_count += len(accent_phrase.moras) + self.assertEqual( + UserDictWord(**test_value).mora_count, + expected_count, + ) + + def test_invalid_pronunciation_not_katakana(self): + test_value = deepcopy(self.test_model) + test_value["pronunciation"] = "ぼいぼ" + with self.assertRaises(ValidationError): + UserDictWord(**test_value) + + def test_invalid_pronunciation_invalid_sutegana(self): + test_value = deepcopy(self.test_model) + test_value["pronunciation"] = "アィウェォ" + with self.assertRaises(ValidationError): + UserDictWord(**test_value) + + def test_invalid_pronunciation_invalid_xwa(self): + test_value = deepcopy(self.test_model) + test_value["pronunciation"] = "アヮ" + with self.assertRaises(ValidationError): + UserDictWord(**test_value) + + def test_count_mora_voiced_sound(self): + test_value = deepcopy(self.test_model) + test_value["pronunciation"] = "ボイボ" + self.assertEqual(UserDictWord(**test_value).mora_count, 3) + + def test_invalid_accent_type(self): + test_value = deepcopy(self.test_model) + test_value["accent_type"] = 4 + with self.assertRaises(ValidationError): + UserDictWord(**test_value) + + def test_invalid_accent_type_2(self): + test_value = deepcopy(self.test_model) + test_value["accent_type"] = -1 + with self.assertRaises(ValidationError): + UserDictWord(**test_value) diff --git a/test/test_word_types.py b/test/test_word_types.py new file mode 100644 index 0000000000000000000000000000000000000000..1f2635b680e9b82d23ae3825f2a746b171d6ed3a --- /dev/null +++ b/test/test_word_types.py @@ -0,0 +1,9 @@ +from unittest import TestCase + +from voicevox_engine.model import WordTypes +from voicevox_engine.part_of_speech_data import part_of_speech_data + + +class TestWordTypes(TestCase): + def test_word_types(self): + self.assertCountEqual(list(WordTypes), list(part_of_speech_data.keys())) diff --git a/ui_template/ui.html b/ui_template/ui.html new file mode 100644 index 0000000000000000000000000000000000000000..a37b9e1040cf1952564f4507ee55ade0384c90a7 --- /dev/null +++ b/ui_template/ui.html @@ -0,0 +1,120 @@ + + + + + VOICEVOX Engine 設定 + + + + + + + +
+
+ + +
+ + +
+

+ allまたはlocalappsを指定。allはすべてを許可します。 +

+

+ localappsはオリジン間リソース共有ポリシーを、app://.とlocalhost関連に限定します。 +

+

+ その他のオリジンはallow_originオプションで追加できます。デフォルトはlocalapps。 +

+
+
+ +
+ + +
+ 許可するオリジンを指定します。複数指定する場合は、直後にスペースで区切って追加できます。 +
+
+ + + + +
+
+ + diff --git a/voicevox_engine/__init__.py b/voicevox_engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ca702104050d218302f1b0850d0b679eb8c1c617 --- /dev/null +++ b/voicevox_engine/__init__.py @@ -0,0 +1 @@ +__version__ = "latest" diff --git a/voicevox_engine/acoustic_feature_extractor.py b/voicevox_engine/acoustic_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..8fa37fbae8badda63d9fe7173cf407eb25343144 --- /dev/null +++ b/voicevox_engine/acoustic_feature_extractor.py @@ -0,0 +1,332 @@ +from abc import abstractmethod +from enum import Enum +from pathlib import Path +from typing import List, Sequence + +import numpy + + +class BasePhoneme(object): + """ + 音素の応用クラス群の抽象基底クラス + + Attributes + ---------- + phoneme_list : Sequence[str] + 音素のリスト + num_phoneme : int + 音素リストの要素数 + space_phoneme : str + 読点に値する音素 + """ + + phoneme_list: Sequence[str] + num_phoneme: int + space_phoneme: str + + def __init__( + self, + phoneme: str, + start: float, + end: float, + ): + self.phoneme = phoneme + self.start = numpy.round(start, decimals=2) + self.end = numpy.round(end, decimals=2) + + def __repr__(self): + return f"Phoneme(phoneme='{self.phoneme}', start={self.start}, end={self.end})" + + def __eq__(self, o: object): + return isinstance(o, BasePhoneme) and ( + self.phoneme == o.phoneme and self.start == o.start and self.end == o.end + ) + + def verify(self): + """ + 音素クラスとして、データが正しいかassertする + """ + assert self.phoneme in self.phoneme_list, f"{self.phoneme} is not defined." + + @property + def phoneme_id(self): + """ + phoneme_id (phoneme list内でのindex)を取得する + Returns + ------- + id : int + phoneme_idを返す + """ + return self.phoneme_list.index(self.phoneme) + + @property + def duration(self): + """ + 音素継続期間を取得する + Returns + ------- + duration : int + 音素継続期間を返す + """ + return self.end - self.start + + @property + def onehot(self): + """ + phoneme listの長さ分の0埋め配列のうち、phoneme id番目がTrue(1)の配列を返す + Returns + ------- + onehot : numpu.ndarray + 関数内で変更された配列を返す + """ + array = numpy.zeros(self.num_phoneme, dtype=bool) + array[self.phoneme_id] = True + return array + + @classmethod + def parse(cls, s: str): + """ + 文字列をパースして音素クラスを作る + Parameters + ---------- + s : str + パースしたい文字列 + + Returns + ------- + phoneme : BasePhoneme + パース結果を用いた音素クラスを返す + + Examples + -------- + >>> BasePhoneme.parse('1.7425000 1.9125000 o:') + Phoneme(phoneme='o:', start=1.74, end=1.91) + """ + words = s.split() + return cls( + start=float(words[0]), + end=float(words[1]), + phoneme=words[2], + ) + + @classmethod + @abstractmethod + def convert(cls, phonemes: List["BasePhoneme"]) -> List["BasePhoneme"]: + raise NotImplementedError + + @classmethod + def load_lab_list(cls, path: Path): + """ + labファイルを読み込む + Parameters + ---------- + path : Path + 読み込みたいlabファイルのパス + + Returns + ------- + phonemes : List[BasePhoneme] + パース結果を用いた音素クラスを返す + """ + phonemes = [cls.parse(s) for s in path.read_text().split("\n") if len(s) > 0] + phonemes = cls.convert(phonemes) + + for phoneme in phonemes: + phoneme.verify() + return phonemes + + @classmethod + def save_lab_list(cls, phonemes: List["BasePhoneme"], path: Path): + """ + 音素クラスのリストをlabファイル形式で保存する + Parameters + ---------- + phonemes : List[BasePhoneme] + 保存したい音素クラスのリスト + path : Path + labファイルの保存先パス + """ + text = "\n".join( + [ + f"{numpy.round(p.start, decimals=2):.2f}\t" + f"{numpy.round(p.end, decimals=2):.2f}\t" + f"{p.phoneme}" + for p in phonemes + ] + ) + path.write_text(text) + + +class JvsPhoneme(BasePhoneme): + """ + JVS(Japanese versatile speech)コーパスに含まれる音素群クラス + + Attributes + ---------- + phoneme_list : Sequence[str] + 音素のリスト + num_phoneme : int + 音素リストの要素数 + space_phoneme : str + 読点に値する音素 + """ + + phoneme_list = ( + "pau", + "I", + "N", + "U", + "a", + "b", + "by", + "ch", + "cl", + "d", + "dy", + "e", + "f", + "g", + "gy", + "h", + "hy", + "i", + "j", + "k", + "ky", + "m", + "my", + "n", + "ny", + "o", + "p", + "py", + "r", + "ry", + "s", + "sh", + "t", + "ts", + "u", + "v", + "w", + "y", + "z", + ) + num_phoneme = len(phoneme_list) + space_phoneme = "pau" + + @classmethod + def convert(cls, phonemes: List["JvsPhoneme"]) -> List["JvsPhoneme"]: + """ + 最初と最後のsil(silent)をspace_phoneme(pau)に置き換え(変換)する + Parameters + ---------- + phonemes : List[JvsPhoneme] + 変換したいphonemeのリスト + + Returns + ------- + phonemes : List[JvsPhoneme] + 変換されたphonemeのリスト + """ + if "sil" in phonemes[0].phoneme: + phonemes[0].phoneme = cls.space_phoneme + if "sil" in phonemes[-1].phoneme: + phonemes[-1].phoneme = cls.space_phoneme + return phonemes + + +class OjtPhoneme(BasePhoneme): + """ + OpenJTalkに含まれる音素群クラス + + Attributes + ---------- + phoneme_list : Sequence[str] + 音素のリスト + num_phoneme : int + 音素リストの要素数 + space_phoneme : str + 読点に値する音素 + """ + + phoneme_list = ( + "pau", + "A", + "E", + "I", + "N", + "O", + "U", + "a", + "b", + "by", + "ch", + "cl", + "d", + "dy", + "e", + "f", + "g", + "gw", + "gy", + "h", + "hy", + "i", + "j", + "k", + "kw", + "ky", + "m", + "my", + "n", + "ny", + "o", + "p", + "py", + "r", + "ry", + "s", + "sh", + "t", + "ts", + "ty", + "u", + "v", + "w", + "y", + "z", + ) + num_phoneme = len(phoneme_list) + space_phoneme = "pau" + + @classmethod + def convert(cls, phonemes: List["OjtPhoneme"]): + """ + 最初と最後のsil(silent)をspace_phoneme(pau)に置き換え(変換)する + Parameters + ---------- + phonemes : List[OjtPhoneme] + 変換したいphonemeのリスト + + Returns + ------- + phonemes : List[OjtPhoneme] + 変換されたphonemeのリスト + """ + if "sil" in phonemes[0].phoneme: + phonemes[0].phoneme = cls.space_phoneme + if "sil" in phonemes[-1].phoneme: + phonemes[-1].phoneme = cls.space_phoneme + return phonemes + + +class PhonemeType(str, Enum): + jvs = "jvs" + openjtalk = "openjtalk" + + +phoneme_type_to_class = { + PhonemeType.jvs: JvsPhoneme, + PhonemeType.openjtalk: OjtPhoneme, +} diff --git a/voicevox_engine/cancellable_engine.py b/voicevox_engine/cancellable_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..1bedb3ff3ebce858d8c585cf8b0d121a4d816210 --- /dev/null +++ b/voicevox_engine/cancellable_engine.py @@ -0,0 +1,220 @@ +import argparse +import asyncio +import queue +from multiprocessing import Pipe, Process +from multiprocessing.connection import Connection +from tempfile import NamedTemporaryFile +from typing import List, Optional, Tuple + +import soundfile + +# FIXME: remove FastAPI dependency +from fastapi import HTTPException, Request + +from .model import AudioQuery +from .synthesis_engine import make_synthesis_engines +from .utility import get_latest_core_version + + +class CancellableEngine: + """ + 音声合成のキャンセル機能に関するクラス + 初期化後は、synthesis関数で音声合成できる + (オリジナルと比べ引数が増えているので注意) + + Attributes + ---------- + watch_con_list: List[Tuple[Request, Process]] + Requestは接続の監視に使用され、Processは通信切断時のプロセスキルに使用される + クライアントから接続があるとListにTupleが追加される + 接続が切断、もしくは音声合成が終了すると削除される + procs_and_cons: queue.Queue[Tuple[Process, Connection]] + 音声合成の準備が終わっているプロセスのList + (音声合成中のプロセスは入っていない) + """ + + def __init__(self, args: argparse.Namespace) -> None: + """ + 変数の初期化を行う + また、args.init_processesの数だけプロセスを起動し、procs_and_consに格納する + """ + self.args = args + if not self.args.enable_cancellable_synthesis: + raise HTTPException( + status_code=404, + detail="実験的機能はデフォルトで無効になっています。使用するには引数を指定してください。", + ) + + self.watch_con_list: List[Tuple[Request, Process]] = [] + self.procs_and_cons: queue.Queue[Tuple[Process, Connection]] = queue.Queue() + for _ in range(self.args.init_processes): + self.procs_and_cons.put(self.start_new_proc()) + + def start_new_proc( + self, + ) -> Tuple[Process, Connection]: + """ + 新しく開始したプロセスを返す関数 + + Returns + ------- + ret_proc: Process + 新規のプロセス + sub_proc_con1: Connection + ret_procのプロセスと通信するためのPipe + """ + sub_proc_con1, sub_proc_con2 = Pipe(True) + ret_proc = Process( + target=start_synthesis_subprocess, + kwargs={ + "args": self.args, + "sub_proc_con": sub_proc_con2, + }, + daemon=True, + ) + ret_proc.start() + return ret_proc, sub_proc_con1 + + def finalize_con( + self, + req: Request, + proc: Process, + sub_proc_con: Optional[Connection], + ) -> None: + """ + 接続が切断された時の処理を行う関数 + watch_con_listからの削除、プロセスの後処理を行う + プロセスが生きている場合はそのままprocs_and_consに加える + 死んでいる場合は新しく生成したものをprocs_and_consに加える + + Parameters + ---------- + req: fastapi.Request + 接続確立時に受け取ったものをそのまま渡せばよい + https://fastapi.tiangolo.com/advanced/using-request-directly/ + proc: Process + 音声合成を行っていたプロセス + sub_proc_con: Connection, optional + 音声合成を行っていたプロセスとのPipe + 指定されていない場合、プロセスは再利用されず終了される + """ + try: + self.watch_con_list.remove((req, proc)) + except ValueError: + pass + try: + if not proc.is_alive() or sub_proc_con is None: + proc.close() + raise ValueError + # プロセスが死んでいない場合は再利用する + self.procs_and_cons.put((proc, sub_proc_con)) + except ValueError: + # プロセスが死んでいるので新しく作り直す + self.procs_and_cons.put(self.start_new_proc()) + + def _synthesis_impl( + self, + query: AudioQuery, + speaker_id: int, + request: Request, + core_version: Optional[str], + ) -> str: + """ + 音声合成を行う関数 + 通常エンジンの引数に比べ、requestが必要になっている + また、返り値がファイル名になっている + + Parameters + ---------- + query: AudioQuery + speaker_id: int + request: fastapi.Request + 接続確立時に受け取ったものをそのまま渡せばよい + https://fastapi.tiangolo.com/advanced/using-request-directly/ + core_version: str + + Returns + ------- + f_name: str + 生成された音声ファイルの名前 + """ + proc, sub_proc_con1 = self.procs_and_cons.get() + self.watch_con_list.append((request, proc)) + try: + sub_proc_con1.send((query, speaker_id, core_version)) + f_name = sub_proc_con1.recv() + except EOFError: + raise HTTPException(status_code=422, detail="既にサブプロセスは終了されています") + except Exception: + self.finalize_con(request, proc, sub_proc_con1) + raise + + self.finalize_con(request, proc, sub_proc_con1) + return f_name + + async def catch_disconnection(self): + """ + 接続監視を行うコルーチン + """ + while True: + await asyncio.sleep(1) + for con in self.watch_con_list: + req, proc = con + if await req.is_disconnected(): + try: + if proc.is_alive(): + proc.terminate() + proc.join() + proc.close() + except ValueError: + pass + finally: + self.finalize_con(req, proc, None) + + +def start_synthesis_subprocess( + args: argparse.Namespace, + sub_proc_con: Connection, +): + """ + 音声合成を行うサブプロセスで行うための関数 + pickle化の関係でグローバルに書いている + + Parameters + ---------- + args: argparse.Namespace + 起動時に作られたものをそのまま渡す + sub_proc_con: Connection + メインプロセスと通信するためのPipe + """ + + synthesis_engines = make_synthesis_engines( + use_gpu=args.use_gpu, + voicelib_dirs=args.voicelib_dir, + voicevox_dir=args.voicevox_dir, + runtime_dirs=args.runtime_dir, + cpu_num_threads=args.cpu_num_threads, + enable_mock=args.enable_mock, + ) + assert len(synthesis_engines) != 0, "音声合成エンジンがありません。" + latest_core_version = get_latest_core_version(versions=synthesis_engines.keys()) + while True: + try: + query, speaker_id, core_version = sub_proc_con.recv() + if core_version is None: + _engine = synthesis_engines[latest_core_version] + elif core_version in synthesis_engines: + _engine = synthesis_engines[core_version] + else: + # バージョンが見つからないエラー + sub_proc_con.send("") + continue + wave = _engine._synthesis_impl(query, speaker_id) + with NamedTemporaryFile(delete=False) as f: + soundfile.write( + file=f, data=wave, samplerate=query.outputSamplingRate, format="WAV" + ) + sub_proc_con.send(f.name) + except Exception: + sub_proc_con.close() + raise diff --git a/voicevox_engine/dev/core/__init__.py b/voicevox_engine/dev/core/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..432b00b93b362ec24d63e2daf65c70dbee8f3b08 --- /dev/null +++ b/voicevox_engine/dev/core/__init__.py @@ -0,0 +1,17 @@ +from .mock import ( + decode_forward, + initialize, + metas, + supported_devices, + yukarin_s_forward, + yukarin_sa_forward, +) + +__all__ = [ + "decode_forward", + "initialize", + "yukarin_s_forward", + "yukarin_sa_forward", + "metas", + "supported_devices", +] diff --git a/voicevox_engine/dev/core/mock.py b/voicevox_engine/dev/core/mock.py new file mode 100644 index 0000000000000000000000000000000000000000..59eb63d7039b44a27c9e5e17120d83d41763c353 --- /dev/null +++ b/voicevox_engine/dev/core/mock.py @@ -0,0 +1,121 @@ +import json +from logging import getLogger +from typing import Any, Dict, List + +import numpy as np +from pyopenjtalk import tts +from scipy.signal import resample + +DUMMY_TEXT = "これはダミーのテキストです" + + +def initialize(path: str, use_gpu: bool, *args: List[Any]) -> None: + pass + + +def yukarin_s_forward(length: int, **kwargs: Dict[str, Any]) -> np.ndarray: + logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため + logger.info( + "Sorry, yukarin_s_forward() is a mock. Return values are incorrect.", + ) + return np.ones(length) / 5 + + +def yukarin_sa_forward(length: int, **kwargs: Dict[str, Any]) -> np.ndarray: + logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため + logger.info( + "Sorry, yukarin_sa_forward() is a mock. Return values are incorrect.", + ) + return np.ones((1, length)) * 5 + + +def decode_forward(length: int, **kwargs: Dict[str, Any]) -> np.ndarray: + """ + 合成音声の波形データをNumPy配列で返します。ただし、常に固定の文言を読み上げます(DUMMY_TEXT) + 参照→SynthesisEngine のdocstring [Mock] + + Parameters + ---------- + length : int + フレームの長さ + + Returns + ------- + wave : np.ndarray + 音声合成した波形データ + + Note + ------- + ここで行う音声合成では、調声(ピッチ等)を反映しない + また、入力内容によらず常に固定の文言を読み上げる + + # pyopenjtalk.tts()の出力仕様 + dtype=np.float64, 16 bit, mono 48000 Hz + + # resampleの説明 + 非モックdecode_forwardと合わせるために、出力を24kHzに変換した。 + """ + logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため + logger.info( + "Sorry, decode_forward() is a mock. Return values are incorrect.", + ) + wave, sr = tts(DUMMY_TEXT) + wave = resample( + wave.astype("int16"), + 24000 * len(wave) // 48000, + ) + return wave + + +def metas() -> str: + return json.dumps( + [ + { + "name": "dummy1", + "styles": [ + {"name": "style0", "id": 0}, + {"name": "style1", "id": 2}, + {"name": "style2", "id": 4}, + {"name": "style3", "id": 6}, + ], + "speaker_uuid": "7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff", + "version": "mock", + }, + { + "name": "dummy2", + "styles": [ + {"name": "style0", "id": 1}, + {"name": "style1", "id": 3}, + {"name": "style2", "id": 5}, + {"name": "style3", "id": 7}, + ], + "speaker_uuid": "388f246b-8c41-4ac1-8e2d-5d79f3ff56d9", + "version": "mock", + }, + { + "name": "dummy3", + "styles": [ + {"name": "style0", "id": 8}, + ], + "speaker_uuid": "35b2c544-660e-401e-b503-0e14c635303a", + "version": "mock", + }, + { + "name": "dummy4", + "styles": [ + {"name": "style0", "id": 9}, + ], + "speaker_uuid": "b1a81618-b27b-40d2-b0ea-27a9ad408c4b", + "version": "mock", + }, + ] + ) + + +def supported_devices() -> str: + return json.dumps( + { + "cpu": True, + "cuda": False, + } + ) diff --git a/voicevox_engine/dev/synthesis_engine/__init__.py b/voicevox_engine/dev/synthesis_engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b2ac5b17594c2a9a137e23a72210209f2cbd4b --- /dev/null +++ b/voicevox_engine/dev/synthesis_engine/__init__.py @@ -0,0 +1,3 @@ +from .mock import MockSynthesisEngine + +__all__ = ["MockSynthesisEngine"] diff --git a/voicevox_engine/dev/synthesis_engine/mock.py b/voicevox_engine/dev/synthesis_engine/mock.py new file mode 100644 index 0000000000000000000000000000000000000000..3a1b47ac3ca86560a9cc4a379890a9c9609d1d4a --- /dev/null +++ b/voicevox_engine/dev/synthesis_engine/mock.py @@ -0,0 +1,136 @@ +from logging import getLogger +from typing import Any, Dict, List, Optional + +import numpy as np +from pyopenjtalk import tts +from scipy.signal import resample + +from ...model import AccentPhrase, AudioQuery +from ...synthesis_engine import SynthesisEngineBase +from ...synthesis_engine.synthesis_engine import to_flatten_moras + + +class MockSynthesisEngine(SynthesisEngineBase): + """ + SynthesisEngine [Mock] + """ + + def __init__( + self, + speakers: str, + supported_devices: Optional[str] = None, + ): + """ + __init__ [Mock] + """ + super().__init__() + + self._speakers = speakers + self._supported_devices = supported_devices + self.default_sampling_rate = 24000 + + @property + def speakers(self) -> str: + return self._speakers + + @property + def supported_devices(self) -> Optional[str]: + return self._supported_devices + + def replace_phoneme_length( + self, accent_phrases: List[AccentPhrase], speaker_id: int + ) -> List[AccentPhrase]: + """ + replace_phoneme_length 入力accent_phrasesを変更せずにそのまま返します [Mock] + + Parameters + ---------- + accent_phrases : List[AccentPhrase] + フレーズ句のリスト + speaker_id : int + 話者 + + Returns + ------- + List[AccentPhrase] + フレーズ句のリスト(変更なし) + """ + return accent_phrases + + def replace_mora_pitch( + self, accent_phrases: List[AccentPhrase], speaker_id: int + ) -> List[AccentPhrase]: + """ + replace_mora_pitch 入力accent_phrasesを変更せずにそのまま返します [Mock] + + Parameters + ---------- + accent_phrases : List[AccentPhrase] + フレーズ句のリスト + speaker_id : int + 話者 + + Returns + ------- + List[AccentPhrase] + フレーズ句のリスト(変更なし) + """ + return accent_phrases + + def _synthesis_impl(self, query: AudioQuery, speaker_id: int) -> np.ndarray: + """ + synthesis voicevox coreを使わずに、音声合成する [Mock] + + Parameters + ---------- + query : AudioQuery + /audio_query APIで得たjson + speaker_id : int + 話者 + + Returns + ------- + wave [npt.NDArray[np.int16]] + 音声波形データをNumPy配列で返します + """ + # recall text in katakana + flatten_moras = to_flatten_moras(query.accent_phrases) + kana_text = "".join([mora.text for mora in flatten_moras]) + + wave = self.forward(kana_text) + + # volume + wave *= query.volumeScale + + return wave.astype("int16") + + def forward(self, text: str, **kwargs: Dict[str, Any]) -> np.ndarray: + """ + forward tts via pyopenjtalk.tts() + 参照→SynthesisEngine のdocstring [Mock] + + Parameters + ---------- + text : str + 入力文字列(例:読み上げたい文章をカタカナにした文字列、等) + + Returns + ------- + wave [npt.NDArray[np.int16]] + 音声波形データをNumPy配列で返します + + Note + ------- + ここで行う音声合成では、調声(ピッチ等)を反映しない + + # pyopenjtalk.tts()の出力仕様 + dtype=np.float64, 16 bit, mono 48000 Hz + + # resampleの説明 + 非モック実装(decode_forward)と合わせるために、出力を24kHzに変換した。 + """ + logger = getLogger("uvicorn") # FastAPI / Uvicorn 内からの利用のため + logger.info("[Mock] input text: %s" % text) + wave, sr = tts(text) + wave = resample(wave, 24000 * len(wave) // 48000) + return wave diff --git a/voicevox_engine/downloadable_library.py b/voicevox_engine/downloadable_library.py new file mode 100644 index 0000000000000000000000000000000000000000..e4abf88b9e4ec7d971d30bf0e226e1584c17c23b --- /dev/null +++ b/voicevox_engine/downloadable_library.py @@ -0,0 +1,86 @@ +import base64 +import json +import zipfile +from io import BytesIO +from pathlib import Path +from typing import List + +from fastapi import HTTPException + +from voicevox_engine.model import DownloadableLibrary + +__all__ = ["LibraryManager"] + +INFO_FILE = "metas.json" + + +class LibraryManager: + def __init__(self, library_root_dir: Path): + self.library_root_dir = library_root_dir + self.library_root_dir.mkdir(exist_ok=True) + + def downloadable_libraries(self): + # == ダウンロード情報をネットワーク上から取得する場合 + # url = "https://example.com/downloadable_libraries.json" + # response = requests.get(url) + # return list(map(DownloadableLibrary.parse_obj, response.json())) + + # == ダウンロード情報をjsonファイルから取得する場合 + # with open( + # self.root_dir / "engine_manifest_assets" / "downloadable_libraries.json", + # encoding="utf-8", + # ) as f: + # return list(map(DownloadableLibrary.parse_obj, json.load(f))) + + # ダミーとして、speaker_infoのアセットを読み込む + with open( + "./engine_manifest_assets/downloadable_libraries.json", + encoding="utf-8", + ) as f: + libraries = json.load(f) + speaker_info = libraries[0]["speakers"][0]["speaker_info"] + mock_root_dir = Path("./speaker_info/7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff") + speaker_info["policy"] = (mock_root_dir / "policy.md").read_text() + speaker_info["portrait"] = base64.b64encode( + (mock_root_dir / "portrait.png").read_bytes() + ) + for style_info in speaker_info["style_infos"]: + style_id = style_info["id"] + style_info["icon"] = base64.b64encode( + (mock_root_dir / "icons" / f"{style_id}.png").read_bytes() + ) + style_info["voice_samples"] = [ + base64.b64encode( + ( + mock_root_dir / "voice_samples" / f"{style_id}_{i:0>3}.wav" + ).read_bytes() + ) + for i in range(1, 4) + ] + return list(map(DownloadableLibrary.parse_obj, libraries)) + + def installed_libraries(self) -> List[DownloadableLibrary]: + library = [] + for library_dir in self.library_root_dir.iterdir(): + if library_dir.is_dir(): + with open(library_dir / INFO_FILE, encoding="utf-8") as f: + library.append(json.load(f)) + return library + + def install_library(self, library_id: str, file: BytesIO): + for downloadable_library in self.downloadable_libraries(): + if downloadable_library.uuid == library_id: + library_info = downloadable_library.dict() + break + else: + raise HTTPException(status_code=404, detail="指定された音声ライブラリが見つかりません。") + library_dir = self.library_root_dir / library_id + library_dir.mkdir(exist_ok=True) + with open(library_dir / INFO_FILE, "w", encoding="utf-8") as f: + json.dump(library_info, f, indent=4, ensure_ascii=False) + with zipfile.ZipFile(file) as zf: + if zf.testzip() is not None: + raise HTTPException(status_code=422, detail="不正なZIPファイルです。") + + zf.extractall(library_dir) + return library_dir diff --git a/voicevox_engine/engine_manifest/EngineManifest.py b/voicevox_engine/engine_manifest/EngineManifest.py new file mode 100644 index 0000000000000000000000000000000000000000..44a9329b40658999fda3f369887ab4455d86372d --- /dev/null +++ b/voicevox_engine/engine_manifest/EngineManifest.py @@ -0,0 +1,58 @@ +from typing import List, Optional + +from pydantic import BaseModel, Field + + +class UpdateInfo(BaseModel): + """ + エンジンのアップデート情報 + """ + + version: str = Field(title="エンジンのバージョン名") + descriptions: List[str] = Field(title="アップデートの詳細についての説明") + contributors: Optional[List[str]] = Field(title="貢献者名") + + +class LicenseInfo(BaseModel): + """ + 依存ライブラリのライセンス情報 + """ + + name: str = Field(title="依存ライブラリ名") + version: Optional[str] = Field(title="依存ライブラリのバージョン") + license: Optional[str] = Field(title="依存ライブラリのライセンス名") + text: str = Field(title="依存ライブラリのライセンス本文") + + +class SupportedFeatures(BaseModel): + """ + エンジンが持つ機能の一覧 + """ + + adjust_mora_pitch: bool = Field(title="モーラごとの音高の調整") + adjust_phoneme_length: bool = Field(title="音素ごとの長さの調整") + adjust_speed_scale: bool = Field(title="全体の話速の調整") + adjust_pitch_scale: bool = Field(title="全体の音高の調整") + adjust_intonation_scale: bool = Field(title="全体の抑揚の調整") + adjust_volume_scale: bool = Field(title="全体の音量の調整") + interrogative_upspeak: bool = Field(title="疑問文の自動調整") + synthesis_morphing: bool = Field(title="2人の話者でモーフィングした音声を合成") + manage_library: bool = Field(title="音声ライブラリのインストール・アンインストール") + + +class EngineManifest(BaseModel): + """ + エンジン自体に関する情報 + """ + + manifest_version: str = Field(title="マニフェストのバージョン") + name: str = Field(title="エンジン名") + brand_name: str = Field(title="ブランド名") + uuid: str = Field(title="エンジンのUUID") + url: str = Field(title="エンジンのURL") + icon: str = Field(title="エンジンのアイコンをBASE64エンコードしたもの") + default_sampling_rate: int = Field(title="デフォルトのサンプリング周波数") + terms_of_service: str = Field(title="エンジンの利用規約") + update_infos: List[UpdateInfo] = Field(title="エンジンのアップデート情報") + dependency_licenses: List[LicenseInfo] = Field(title="依存関係のライセンス情報") + supported_features: SupportedFeatures = Field(title="エンジンが持つ機能") diff --git a/voicevox_engine/engine_manifest/EngineManifestLoader.py b/voicevox_engine/engine_manifest/EngineManifestLoader.py new file mode 100644 index 0000000000000000000000000000000000000000..bec6a2a7b6ec7f7316eace37db102070ba437a21 --- /dev/null +++ b/voicevox_engine/engine_manifest/EngineManifestLoader.py @@ -0,0 +1,46 @@ +import json +from base64 import b64encode +from pathlib import Path + +from .EngineManifest import EngineManifest, LicenseInfo, UpdateInfo + + +class EngineManifestLoader: + def __init__(self, manifest_path: Path, root_dir: Path): + self.manifest_path = manifest_path + self.root_dir = root_dir + + def load_manifest(self) -> EngineManifest: + manifest = json.loads(self.manifest_path.read_text(encoding="utf-8")) + + manifest = EngineManifest( + manifest_version=manifest["manifest_version"], + name=manifest["name"], + brand_name=manifest["brand_name"], + uuid=manifest["uuid"], + url=manifest["url"], + default_sampling_rate=manifest["default_sampling_rate"], + icon=b64encode((self.root_dir / manifest["icon"]).read_bytes()).decode( + "utf-8" + ), + terms_of_service=(self.root_dir / manifest["terms_of_service"]).read_text( + "utf-8" + ), + update_infos=[ + UpdateInfo(**update_info) + for update_info in json.loads( + (self.root_dir / manifest["update_infos"]).read_text("utf-8") + ) + ], + dependency_licenses=[ + LicenseInfo(**license_info) + for license_info in json.loads( + (self.root_dir / manifest["dependency_licenses"]).read_text("utf-8") + ) + ], + supported_features={ + key: item["value"] + for key, item in manifest["supported_features"].items() + }, + ) + return manifest diff --git a/voicevox_engine/engine_manifest/__init__.py b/voicevox_engine/engine_manifest/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..02c293179a59cbae3f28ce79ab8a31616fa69d19 --- /dev/null +++ b/voicevox_engine/engine_manifest/__init__.py @@ -0,0 +1,7 @@ +from .EngineManifest import EngineManifest +from .EngineManifestLoader import EngineManifestLoader + +__all__ = [ + "EngineManifest", + "EngineManifestLoader", +] diff --git a/voicevox_engine/full_context_label.py b/voicevox_engine/full_context_label.py new file mode 100644 index 0000000000000000000000000000000000000000..894a56751ad95a979487cf1cbf4e846f8e163d04 --- /dev/null +++ b/voicevox_engine/full_context_label.py @@ -0,0 +1,525 @@ +import re +from dataclasses import dataclass +from itertools import chain +from typing import Dict, List, Optional + +import pyopenjtalk + + +@dataclass +class Phoneme: + """ + 音素(母音・子音)クラス、音素の元となるcontextを保持する + 音素には、母音や子音以外にも無音(silent/pause)も含まれる + + Attributes + ---------- + contexts: Dict[str, str] + 音素の元 + """ + + contexts: Dict[str, str] + + @classmethod + def from_label(cls, label: str): + """ + pyopenjtalk.extract_fullcontextで得られる音素の元(ラベル)から、Phonemeクラスを作成する + Parameters + ---------- + label : str + pyopenjtalk.extract_fullcontextで得られるラベルを渡す + + Returns + ------- + phoneme: Phoneme + Phonemeクラスを返す + """ + + # フルコンテキストラベルの仕様は、 + # http://hts.sp.nitech.ac.jp/?Download の HTS-2.3のJapanese tar.bz2 (126 MB)をダウンロードして、data/lab_format.pdfを見るとリストが見つかります。 # noqa + contexts = re.search( + r"^(?P.+?)\^(?P.+?)\-(?P.+?)\+(?P.+?)\=(?P.+?)" + r"/A\:(?P.+?)\+(?P.+?)\+(?P.+?)" + r"/B\:(?P.+?)\-(?P.+?)\_(?P.+?)" + r"/C\:(?P.+?)\_(?P.+?)\+(?P.+?)" + r"/D\:(?P.+?)\+(?P.+?)\_(?P.+?)" + r"/E\:(?P.+?)\_(?P.+?)\!(?P.+?)\_(?P.+?)\-(?P.+?)" + r"/F\:(?P.+?)\_(?P.+?)\#(?P.+?)\_(?P.+?)\@(?P.+?)\_(?P.+?)\|(?P.+?)\_(?P.+?)" # noqa + r"/G\:(?P.+?)\_(?P.+?)\%(?P.+?)\_(?P.+?)\_(?P.+?)" + r"/H\:(?P

.+?)\_(?P

.+?)" + r"/I\:(?P.+?)\-(?P.+?)\@(?P.+?)\+(?P.+?)\&(?P.+?)\-(?P.+?)\|(?P.+?)\+(?P.+?)" # noqa + r"/J\:(?P.+?)\_(?P.+?)" + r"/K\:(?P.+?)\+(?P.+?)\-(?P.+?)$", + label, + ).groupdict() + return cls(contexts=contexts) + + @property + def label(self): + """ + pyopenjtalk.extract_fullcontextで得られるラベルと等しい + Returns + ------- + lebel: str + ラベルを返す + """ + return ( + "{p1}^{p2}-{p3}+{p4}={p5}" + "/A:{a1}+{a2}+{a3}" + "/B:{b1}-{b2}_{b3}" + "/C:{c1}_{c2}+{c3}" + "/D:{d1}+{d2}_{d3}" + "/E:{e1}_{e2}!{e3}_{e4}-{e5}" + "/F:{f1}_{f2}#{f3}_{f4}@{f5}_{f6}|{f7}_{f8}" + "/G:{g1}_{g2}%{g3}_{g4}_{g5}" + "/H:{h1}_{h2}" + "/I:{i1}-{i2}@{i3}+{i4}&{i5}-{i6}|{i7}+{i8}" + "/J:{j1}_{j2}" + "/K:{k1}+{k2}-{k3}" + ).format(**self.contexts) + + @property + def phoneme(self): + """ + 音素クラスの中で、発声に必要な要素を返す + Returns + ------- + phoneme : str + 発声に必要な要素を返す + """ + return self.contexts["p3"] + + def is_pause(self): + """ + 音素がポーズ(無音、silent/pause)であるかを返す + Returns + ------- + is_pose : bool + 音素がポーズ(無音、silent/pause)であるか(True)否か(False) + """ + return self.contexts["f1"] == "xx" + + def __repr__(self): + return f"" + + +@dataclass +class Mora: + """ + モーラクラス + モーラは1音素(母音や促音「っ」、撥音「ん」など)か、2音素(母音と子音の組み合わせ)で成り立つ + + Attributes + ---------- + consonant : Optional[Phoneme] + 子音 + vowel : Phoneme + 母音 + """ + + consonant: Optional[Phoneme] + vowel: Phoneme + + def set_context(self, key: str, value: str): + """ + Moraクラス内に含まれるPhonemeのcontextのうち、指定されたキーの値を変更する + consonantが存在する場合は、vowelと同じようにcontextを変更する + Parameters + ---------- + key : str + 変更したいcontextのキー + value : str + 変更したいcontextの値 + """ + self.vowel.contexts[key] = value + if self.consonant is not None: + self.consonant.contexts[key] = value + + @property + def phonemes(self): + """ + 音素群を返す + Returns + ------- + phonemes : List[Phoneme] + 母音しかない場合は母音のみ、子音もある場合は子音、母音の順番でPhonemeのリストを返す + """ + if self.consonant is not None: + return [self.consonant, self.vowel] + else: + return [self.vowel] + + @property + def labels(self): + """ + ラベル群を返す + Returns + ------- + labels : List[str] + Moraに含まれるすべてのラベルを返す + """ + return [p.label for p in self.phonemes] + + +@dataclass +class AccentPhrase: + """ + アクセント句クラス + 同じアクセントのMoraを複数保持する + Attributes + ---------- + moras : List[Mora] + 音韻のリスト + accent : int + アクセント + """ + + moras: List[Mora] + accent: int + is_interrogative: bool + + @classmethod + def from_phonemes(cls, phonemes: List[Phoneme]): + """ + PhonemeのリストからAccentPhraseクラスを作成する + Parameters + ---------- + phonemes : List[Phoneme] + phonemeのリストを渡す + + Returns + ------- + accent_phrase : AccentPhrase + AccentPhraseクラスを返す + """ + moras: List[Mora] = [] + + mora_phonemes: List[Phoneme] = [] + for phoneme, next_phoneme in zip(phonemes, phonemes[1:] + [None]): + # workaround for Hihosiba/voicevox_engine#57 + # (py)openjtalk によるアクセント句内のモーラへの附番は 49 番目まで + # 49 番目のモーラについて、続く音素のモーラ番号を単一モーラの特定に使えない + if int(phoneme.contexts["a2"]) == 49: + break + + mora_phonemes.append(phoneme) + + if ( + next_phoneme is None + or phoneme.contexts["a2"] != next_phoneme.contexts["a2"] + ): + if len(mora_phonemes) == 1: + consonant, vowel = None, mora_phonemes[0] + elif len(mora_phonemes) == 2: + consonant, vowel = mora_phonemes[0], mora_phonemes[1] + else: + raise ValueError(mora_phonemes) + mora = Mora(consonant=consonant, vowel=vowel) + moras.append(mora) + mora_phonemes = [] + + accent = int(moras[0].vowel.contexts["f2"]) + # workaround for Hihosiba/voicevox_engine#55 + # アクセント位置とするキー f2 の値がアクセント句内のモーラ数を超える場合がある + accent = accent if accent <= len(moras) else len(moras) + is_interrogative = moras[-1].vowel.contexts["f3"] == "1" + return cls(moras=moras, accent=accent, is_interrogative=is_interrogative) + + def set_context(self, key: str, value: str): + """ + AccentPhraseに間接的に含まれる全てのPhonemeのcontextの、指定されたキーの値を変更する + Parameters + ---------- + key : str + 変更したいcontextのキー + value : str + 変更したいcontextの値 + """ + for mora in self.moras: + mora.set_context(key, value) + + @property + def phonemes(self): + """ + 音素群を返す + Returns + ------- + phonemes : List[Phoneme] + AccentPhraseに間接的に含まれる全てのPhonemeを返す + """ + return list(chain.from_iterable(m.phonemes for m in self.moras)) + + @property + def labels(self): + """ + ラベル群を返す + Returns + ------- + labels : List[str] + AccentPhraseに間接的に含まれる全てのラベルを返す + """ + return [p.label for p in self.phonemes] + + def merge(self, accent_phrase: "AccentPhrase"): + """ + AccentPhraseを合成する + (このクラスが保持するmorasの後ろに、引数として渡されたAccentPhraseのmorasを合成する) + Parameters + ---------- + accent_phrase : AccentPhrase + 合成したいAccentPhraseを渡す + + Returns + ------- + accent_phrase : AccentPhrase + 合成されたAccentPhraseを返す + """ + return AccentPhrase( + moras=self.moras + accent_phrase.moras, + accent=self.accent, + is_interrogative=accent_phrase.is_interrogative, + ) + + +@dataclass +class BreathGroup: + """ + 発声の区切りクラス + アクセントの異なるアクセント句を複数保持する + Attributes + ---------- + accent_phrases : List[AccentPhrase] + アクセント句のリスト + """ + + accent_phrases: List[AccentPhrase] + + @classmethod + def from_phonemes(cls, phonemes: List[Phoneme]): + """ + PhonemeのリストからBreathGroupクラスを作成する + Parameters + ---------- + phonemes : List[Phoneme] + phonemeのリストを渡す + + Returns + ------- + breath_group : BreathGroup + BreathGroupクラスを返す + """ + accent_phrases: List[AccentPhrase] = [] + accent_phonemes: List[Phoneme] = [] + for phoneme, next_phoneme in zip(phonemes, phonemes[1:] + [None]): + accent_phonemes.append(phoneme) + + if ( + next_phoneme is None + or phoneme.contexts["i3"] != next_phoneme.contexts["i3"] + or phoneme.contexts["f5"] != next_phoneme.contexts["f5"] + ): + accent_phrase = AccentPhrase.from_phonemes(accent_phonemes) + accent_phrases.append(accent_phrase) + accent_phonemes = [] + + return cls(accent_phrases=accent_phrases) + + def set_context(self, key: str, value: str): + """ + BreathGroupに間接的に含まれる全てのPhonemeのcontextの、指定されたキーの値を変更する + Parameters + ---------- + key : str + 変更したいcontextのキー + value : str + 変更したいcontextの値 + """ + for accent_phrase in self.accent_phrases: + accent_phrase.set_context(key, value) + + @property + def phonemes(self): + """ + 音素群を返す + Returns + ------- + phonemes : List[Phoneme] + BreathGroupに間接的に含まれる全てのPhonemeを返す + """ + return list( + chain.from_iterable( + accent_phrase.phonemes for accent_phrase in self.accent_phrases + ) + ) + + @property + def labels(self): + """ + ラベル群を返す + Returns + ------- + labels : List[str] + BreathGroupに間接的に含まれる全てのラベルを返す + """ + return [p.label for p in self.phonemes] + + +@dataclass +class Utterance: + """ + 発声クラス + 発声の区切りと無音を複数保持する + Attributes + ---------- + breath_groups : List[BreathGroup] + 発声の区切りのリスト + pauses : List[Phoneme] + 無音のリスト + """ + + breath_groups: List[BreathGroup] + pauses: List[Phoneme] + + @classmethod + def from_phonemes(cls, phonemes: List[Phoneme]): + """ + Phonemeの完全なリストからUtteranceクラスを作成する + Parameters + ---------- + phonemes : List[Phoneme] + phonemeのリストを渡す + + Returns + ------- + utterance : Utterance + Utteranceクラスを返す + """ + pauses: List[Phoneme] = [] + + breath_groups: List[BreathGroup] = [] + group_phonemes: List[Phoneme] = [] + for phoneme in phonemes: + if not phoneme.is_pause(): + group_phonemes.append(phoneme) + + else: + pauses.append(phoneme) + + if len(group_phonemes) > 0: + breath_group = BreathGroup.from_phonemes(group_phonemes) + breath_groups.append(breath_group) + group_phonemes = [] + + return cls(breath_groups=breath_groups, pauses=pauses) + + def set_context(self, key: str, value: str): + """ + Utteranceに間接的に含まれる全てのPhonemeのcontextの、指定されたキーの値を変更する + Parameters + ---------- + key : str + 変更したいcontextのキー + value : str + 変更したいcontextの値 + """ + for breath_group in self.breath_groups: + breath_group.set_context(key, value) + + @property + def phonemes(self): + """ + 音素群を返す + Returns + ------- + phonemes : List[Phoneme] + Utteranceクラスに直接的・間接的に含まれる、全てのPhonemeを返す + """ + accent_phrases = list( + chain.from_iterable( + breath_group.accent_phrases for breath_group in self.breath_groups + ) + ) + for prev, cent, post in zip( + [None] + accent_phrases[:-1], + accent_phrases, + accent_phrases[1:] + [None], + ): + mora_num = len(cent.moras) + accent = cent.accent + + if prev is not None: + prev.set_context("g1", str(mora_num)) + prev.set_context("g2", str(accent)) + + if post is not None: + post.set_context("e1", str(mora_num)) + post.set_context("e2", str(accent)) + + cent.set_context("f1", str(mora_num)) + cent.set_context("f2", str(accent)) + for i_mora, mora in enumerate(cent.moras): + mora.set_context("a1", str(i_mora - accent + 1)) + mora.set_context("a2", str(i_mora + 1)) + mora.set_context("a3", str(mora_num - i_mora)) + + for prev, cent, post in zip( + [None] + self.breath_groups[:-1], + self.breath_groups, + self.breath_groups[1:] + [None], + ): + accent_phrase_num = len(cent.accent_phrases) + + if prev is not None: + prev.set_context("j1", str(accent_phrase_num)) + + if post is not None: + post.set_context("h1", str(accent_phrase_num)) + + cent.set_context("i1", str(accent_phrase_num)) + cent.set_context( + "i5", str(accent_phrases.index(cent.accent_phrases[0]) + 1) + ) + cent.set_context( + "i6", + str(len(accent_phrases) - accent_phrases.index(cent.accent_phrases[0])), + ) + + self.set_context( + "k2", + str( + sum( + [ + len(breath_group.accent_phrases) + for breath_group in self.breath_groups + ] + ) + ), + ) + + phonemes: List[Phoneme] = [] + for i in range(len(self.pauses)): + if self.pauses[i] is not None: + phonemes += [self.pauses[i]] + + if i < len(self.pauses) - 1: + phonemes += self.breath_groups[i].phonemes + + return phonemes + + @property + def labels(self): + """ + ラベル群を返す + Returns + ------- + labels : List[str] + Utteranceクラスに直接的・間接的に含まれる全てのラベルを返す + """ + return [p.label for p in self.phonemes] + + +def extract_full_context_label(text: str): + labels = pyopenjtalk.extract_fullcontext(text) + phonemes = [Phoneme.from_label(label=label) for label in labels] + utterance = Utterance.from_phonemes(phonemes) + return utterance diff --git a/voicevox_engine/kana_parser.py b/voicevox_engine/kana_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..7aa9e9d82c8e48195c6993dea267708b42012c5a --- /dev/null +++ b/voicevox_engine/kana_parser.py @@ -0,0 +1,146 @@ +from typing import List, Optional + +from .model import AccentPhrase, Mora, ParseKanaError, ParseKanaErrorCode +from .mora_list import openjtalk_text2mora + +LOOP_LIMIT = 300 +UNVOICE_SYMBOL = "_" +ACCENT_SYMBOL = "'" +NOPAUSE_DELIMITER = "/" +PAUSE_DELIMITER = "、" +WIDE_INTERROGATION_MARK = "?" + +text2mora_with_unvoice = {} +for text, (consonant, vowel) in openjtalk_text2mora.items(): + text2mora_with_unvoice[text] = Mora( + text=text, + consonant=consonant if len(consonant) > 0 else None, + consonant_length=0 if len(consonant) > 0 else None, + vowel=vowel, + vowel_length=0, + pitch=0, + is_interrogative=False, + ) + if vowel in ["a", "i", "u", "e", "o"]: + text2mora_with_unvoice[UNVOICE_SYMBOL + text] = Mora( + text=text, + consonant=consonant if len(consonant) > 0 else None, + consonant_length=0 if len(consonant) > 0 else None, + vowel=vowel.upper(), + vowel_length=0, + pitch=0, + is_interrogative=False, + ) + + +def _text_to_accent_phrase(phrase: str) -> AccentPhrase: + """ + longest matchにより読み仮名からAccentPhraseを生成 + 入力長Nに対し計算量O(N^2) + """ + accent_index: Optional[int] = None + moras: List[Mora] = [] + + base_index = 0 # パース開始位置。ここから右の文字列をstackに詰めていく。 + stack = "" # 保留中の文字列 + matched_text: Optional[str] = None # 保留中の文字列内で最後にマッチした仮名 + + outer_loop = 0 + while base_index < len(phrase): + outer_loop += 1 + if phrase[base_index] == ACCENT_SYMBOL: + if len(moras) == 0: + raise ParseKanaError(ParseKanaErrorCode.ACCENT_TOP, text=phrase) + if accent_index is not None: + raise ParseKanaError(ParseKanaErrorCode.ACCENT_TWICE, text=phrase) + accent_index = len(moras) + base_index += 1 + continue + for watch_index in range(base_index, len(phrase)): + if phrase[watch_index] == ACCENT_SYMBOL: + break + # 普通の文字の場合 + stack += phrase[watch_index] + if stack in text2mora_with_unvoice: + matched_text = stack + # push mora + if matched_text is None: + raise ParseKanaError(ParseKanaErrorCode.UNKNOWN_TEXT, text=stack) + else: + moras.append(text2mora_with_unvoice[matched_text].copy(deep=True)) + base_index += len(matched_text) + stack = "" + matched_text = None + if outer_loop > LOOP_LIMIT: + raise ParseKanaError(ParseKanaErrorCode.INFINITE_LOOP) + if accent_index is None: + raise ParseKanaError(ParseKanaErrorCode.ACCENT_NOTFOUND, text=phrase) + else: + return AccentPhrase(moras=moras, accent=accent_index, pause_mora=None) + + +def parse_kana(text: str) -> List[AccentPhrase]: + """ + AquesTalkライクな読み仮名をパースして音長・音高未指定のaccent phraseに変換 + """ + + parsed_results: List[AccentPhrase] = [] + phrase_base = 0 + if len(text) == 0: + raise ParseKanaError(ParseKanaErrorCode.EMPTY_PHRASE, position=1) + + for i in range(len(text) + 1): + if i == len(text) or text[i] in [PAUSE_DELIMITER, NOPAUSE_DELIMITER]: + phrase = text[phrase_base:i] + if len(phrase) == 0: + raise ParseKanaError( + ParseKanaErrorCode.EMPTY_PHRASE, + position=str(len(parsed_results) + 1), + ) + phrase_base = i + 1 + + is_interrogative = WIDE_INTERROGATION_MARK in phrase + if is_interrogative: + if WIDE_INTERROGATION_MARK in phrase[:-1]: + raise ParseKanaError( + ParseKanaErrorCode.INTERROGATION_MARK_NOT_AT_END, text=phrase + ) + phrase = phrase.replace(WIDE_INTERROGATION_MARK, "") + + accent_phrase: AccentPhrase = _text_to_accent_phrase(phrase) + if i < len(text) and text[i] == PAUSE_DELIMITER: + accent_phrase.pause_mora = Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0, + pitch=0, + ) + accent_phrase.is_interrogative = is_interrogative + + parsed_results.append(accent_phrase) + + return parsed_results + + +def create_kana(accent_phrases: List[AccentPhrase]) -> str: + text = "" + for i, phrase in enumerate(accent_phrases): + for j, mora in enumerate(phrase.moras): + if mora.vowel in ["A", "I", "U", "E", "O"]: + text += UNVOICE_SYMBOL + + text += mora.text + if j + 1 == phrase.accent: + text += ACCENT_SYMBOL + + if phrase.is_interrogative: + text += WIDE_INTERROGATION_MARK + + if i < len(accent_phrases) - 1: + if phrase.pause_mora is None: + text += NOPAUSE_DELIMITER + else: + text += PAUSE_DELIMITER + return text diff --git a/voicevox_engine/metas/Metas.py b/voicevox_engine/metas/Metas.py new file mode 100644 index 0000000000000000000000000000000000000000..58c42f06765c3554a138471d83fc90800e6a8540 --- /dev/null +++ b/voicevox_engine/metas/Metas.py @@ -0,0 +1,83 @@ +from enum import Enum +from typing import List, Optional + +from pydantic import BaseModel, Field + + +class SpeakerStyle(BaseModel): + """ + スピーカーのスタイル情報 + """ + + name: str = Field(title="スタイル名") + id: int = Field(title="スタイルID") + + +class SpeakerSupportPermittedSynthesisMorphing(str, Enum): + ALL = "ALL" # 全て許可 + SELF_ONLY = "SELF_ONLY" # 同じ話者内でのみ許可 + NOTHING = "NOTHING" # 全て禁止 + + @classmethod + def _missing_(cls, value: object) -> "SpeakerSupportPermittedSynthesisMorphing": + return SpeakerSupportPermittedSynthesisMorphing.ALL + + +class SpeakerSupportedFeatures(BaseModel): + """ + 話者の対応機能の情報 + """ + + permitted_synthesis_morphing: SpeakerSupportPermittedSynthesisMorphing = Field( + title="モーフィング機能への対応", default=SpeakerSupportPermittedSynthesisMorphing(None) + ) + + +class CoreSpeaker(BaseModel): + """ + コアに含まれるスピーカー情報 + """ + + name: str = Field(title="名前") + speaker_uuid: str = Field(title="スピーカーのUUID") + styles: List[SpeakerStyle] = Field(title="スピーカースタイルの一覧") + version: str = Field("スピーカーのバージョン") + + +class EngineSpeaker(BaseModel): + """ + エンジンに含まれるスピーカー情報 + """ + + supported_features: SpeakerSupportedFeatures = Field( + title="スピーカーの対応機能", default_factory=SpeakerSupportedFeatures + ) + + +class Speaker(CoreSpeaker, EngineSpeaker): + """ + スピーカー情報 + """ + + pass + + +class StyleInfo(BaseModel): + """ + スタイルの追加情報 + """ + + id: int = Field(title="スタイルID") + icon: str = Field(title="当該スタイルのアイコンをbase64エンコードしたもの") + portrait: Optional[str] = Field(title="当該スタイルのportrait.pngをbase64エンコードしたもの") + voice_samples: List[str] = Field(title="voice_sampleのwavファイルをbase64エンコードしたもの") + + +class SpeakerInfo(BaseModel): + """ + 話者の追加情報 + """ + + policy: str = Field(title="policy.md") + portrait: str = Field(title="portrait.pngをbase64エンコードしたもの") + style_infos: List[StyleInfo] = Field(title="スタイルの追加情報") diff --git a/voicevox_engine/metas/MetasStore.py b/voicevox_engine/metas/MetasStore.py new file mode 100644 index 0000000000000000000000000000000000000000..88a7bc37daad4ab70f1e7af07d7beab7eaa06e46 --- /dev/null +++ b/voicevox_engine/metas/MetasStore.py @@ -0,0 +1,72 @@ +import json +from pathlib import Path +from typing import TYPE_CHECKING, Dict, List, Tuple + +from voicevox_engine.metas.Metas import CoreSpeaker, EngineSpeaker, Speaker, StyleInfo + +if TYPE_CHECKING: + from voicevox_engine.synthesis_engine.synthesis_engine_base import ( + SynthesisEngineBase, + ) + + +class MetasStore: + """ + 話者やスタイルのメタ情報を管理する + """ + + def __init__(self, engine_speakers_path: Path) -> None: + self._engine_speakers_path = engine_speakers_path + self._loaded_metas: Dict[str, EngineSpeaker] = { + folder.name: EngineSpeaker( + **json.loads((folder / "metas.json").read_text(encoding="utf-8")) + ) + for folder in engine_speakers_path.iterdir() + } + + def speaker_engine_metas(self, speaker_uuid: str) -> EngineSpeaker: + return self.loaded_metas[speaker_uuid] + + def combine_metas(self, core_metas: List[CoreSpeaker]) -> List[Speaker]: + """ + 与えられたmetaにエンジンのコア情報を付加して返す + core_metas: コアのmetas()が返すJSONのModel + """ + + return [ + Speaker( + **self.speaker_engine_metas(speaker_meta.speaker_uuid).dict(), + **speaker_meta.dict(), + ) + for speaker_meta in core_metas + ] + + # FIXME: engineではなくList[CoreSpeaker]を渡す形にすることで + # SynthesisEngineBaseによる循環importを修正する + def load_combined_metas(self, engine: "SynthesisEngineBase") -> List[Speaker]: + """ + 与えられたエンジンから、コア・エンジン両方の情報を含んだMetasを返す + """ + + core_metas = [CoreSpeaker(**speaker) for speaker in json.loads(engine.speakers)] + return self.combine_metas(core_metas) + + @property + def engine_speakers_path(self) -> Path: + return self._engine_speakers_path + + @property + def loaded_metas(self) -> Dict[str, EngineSpeaker]: + return self._loaded_metas + + +def construct_lookup(speakers: List[Speaker]) -> Dict[int, Tuple[Speaker, StyleInfo]]: + """ + `{style.id: StyleInfo}`の変換テーブル + """ + + lookup_table = dict() + for speaker in speakers: + for style in speaker.styles: + lookup_table[style.id] = (speaker, style) + return lookup_table diff --git a/voicevox_engine/metas/__init__.py b/voicevox_engine/metas/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4907fdf38d604dc7949dd361812938afd9db0abb --- /dev/null +++ b/voicevox_engine/metas/__init__.py @@ -0,0 +1,6 @@ +from . import Metas, MetasStore + +__all__ = [ + "Metas", + "MetasStore", +] diff --git a/voicevox_engine/model.py b/voicevox_engine/model.py new file mode 100644 index 0000000000000000000000000000000000000000..fa5c23e26b5571a33b913cb12c9abfe3dcf34135 --- /dev/null +++ b/voicevox_engine/model.py @@ -0,0 +1,282 @@ +from enum import Enum +from re import findall, fullmatch +from typing import Dict, List, Optional + +from pydantic import BaseModel, Field, conint, validator + +from .metas.Metas import Speaker, SpeakerInfo + + +class Mora(BaseModel): + """ + モーラ(子音+母音)ごとの情報 + """ + + text: str = Field(title="文字") + consonant: Optional[str] = Field(title="子音の音素") + consonant_length: Optional[float] = Field(title="子音の音長") + vowel: str = Field(title="母音の音素") + vowel_length: float = Field(title="母音の音長") + pitch: float = Field(title="音高") # デフォルト値をつけるとts側のOpenAPIで生成されたコードの型がOptionalになる + + def __hash__(self): + items = [ + (k, tuple(v)) if isinstance(v, List) else (k, v) + for k, v in self.__dict__.items() + ] + return hash(tuple(sorted(items))) + + +class AccentPhrase(BaseModel): + """ + アクセント句ごとの情報 + """ + + moras: List[Mora] = Field(title="モーラのリスト") + accent: int = Field(title="アクセント箇所") + pause_mora: Optional[Mora] = Field(title="後ろに無音を付けるかどうか") + is_interrogative: bool = Field(default=False, title="疑問系かどうか") + + def __hash__(self): + items = [ + (k, tuple(v)) if isinstance(v, List) else (k, v) + for k, v in self.__dict__.items() + ] + return hash(tuple(sorted(items))) + + +class AudioQuery(BaseModel): + """ + 音声合成用のクエリ + """ + + accent_phrases: List[AccentPhrase] = Field(title="アクセント句のリスト") + speedScale: float = Field(title="全体の話速") + pitchScale: float = Field(title="全体の音高") + intonationScale: float = Field(title="全体の抑揚") + volumeScale: float = Field(title="全体の音量") + prePhonemeLength: float = Field(title="音声の前の無音時間") + postPhonemeLength: float = Field(title="音声の後の無音時間") + outputSamplingRate: int = Field(title="音声データの出力サンプリングレート") + outputStereo: bool = Field(title="音声データをステレオ出力するか否か") + kana: Optional[str] = Field(title="[読み取り専用]AquesTalkライクな読み仮名。音声合成クエリとしては無視される") + + def __hash__(self): + items = [ + (k, tuple(v)) if isinstance(v, List) else (k, v) + for k, v in self.__dict__.items() + ] + return hash(tuple(sorted(items))) + + +class ParseKanaErrorCode(Enum): + UNKNOWN_TEXT = "判別できない読み仮名があります: {text}" + ACCENT_TOP = "句頭にアクセントは置けません: {text}" + ACCENT_TWICE = "1つのアクセント句に二つ以上のアクセントは置けません: {text}" + ACCENT_NOTFOUND = "アクセントを指定していないアクセント句があります: {text}" + EMPTY_PHRASE = "{position}番目のアクセント句が空白です" + INTERROGATION_MARK_NOT_AT_END = "アクセント句末以外に「?」は置けません: {text}" + INFINITE_LOOP = "処理時に無限ループになってしまいました...バグ報告をお願いします。" + + +class ParseKanaError(Exception): + def __init__(self, errcode: ParseKanaErrorCode, **kwargs): + self.errcode = errcode + self.errname = errcode.name + self.kwargs: Dict[str, str] = kwargs + err_fmt: str = errcode.value + self.text = err_fmt.format(**kwargs) + + +class ParseKanaBadRequest(BaseModel): + text: str = Field(title="エラーメッセージ") + error_name: str = Field( + title="エラー名", + description="|name|description|\n|---|---|\n" + + "\n".join( + [ + "| {} | {} |".format(err.name, err.value) + for err in list(ParseKanaErrorCode) + ] + ), + ) + error_args: Dict[str, str] = Field(title="エラーを起こした箇所") + + def __init__(self, err: ParseKanaError): + super().__init__(text=err.text, error_name=err.errname, error_args=err.kwargs) + + +class MorphableTargetInfo(BaseModel): + + is_morphable: bool = Field(title="指定した話者に対してモーフィングの可否") + # FIXME: add reason property + # reason: Optional[str] = Field(title="is_morphableがfalseである場合、その理由") + + +class SpeakerNotFoundError(LookupError): + def __init__(self, speaker: int, *args: object, **kywrds: object) -> None: + self.speaker = speaker + super().__init__(f"speaker {speaker} is not found.", *args, **kywrds) + + +class LibrarySpeaker(BaseModel): + """ + 音声ライブラリに含まれる話者の情報 + """ + + speaker: Speaker = Field(title="話者情報") + speaker_info: SpeakerInfo = Field(title="話者の追加情報") + + +class DownloadableLibrary(BaseModel): + """ + ダウンロード可能な音声ライブラリの情報 + """ + + name: str = Field(title="音声ライブラリの名前") + uuid: str = Field(title="音声ライブラリのUUID") + version: str = Field(title="音声ライブラリのバージョン") + download_url: str = Field(title="音声ライブラリのダウンロードURL") + bytes: int = Field(title="音声ライブラリのバイト数") + speakers: List[LibrarySpeaker] = Field(title="音声ライブラリに含まれる話者のリスト") + + +USER_DICT_MIN_PRIORITY = 0 +USER_DICT_MAX_PRIORITY = 10 + + +class UserDictWord(BaseModel): + """ + 辞書のコンパイルに使われる情報 + """ + + surface: str = Field(title="表層形") + priority: conint(ge=USER_DICT_MIN_PRIORITY, le=USER_DICT_MAX_PRIORITY) = Field( + title="優先度" + ) + context_id: int = Field(title="文脈ID", default=1348) + part_of_speech: str = Field(title="品詞") + part_of_speech_detail_1: str = Field(title="品詞細分類1") + part_of_speech_detail_2: str = Field(title="品詞細分類2") + part_of_speech_detail_3: str = Field(title="品詞細分類3") + inflectional_type: str = Field(title="活用型") + inflectional_form: str = Field(title="活用形") + stem: str = Field(title="原形") + yomi: str = Field(title="読み") + pronunciation: str = Field(title="発音") + accent_type: int = Field(title="アクセント型") + mora_count: Optional[int] = Field(title="モーラ数") + accent_associative_rule: str = Field(title="アクセント結合規則") + + class Config: + validate_assignment = True + + @validator("surface") + def convert_to_zenkaku(cls, surface): + return surface.translate( + str.maketrans( + "".join(chr(0x21 + i) for i in range(94)), + "".join(chr(0xFF01 + i) for i in range(94)), + ) + ) + + @validator("pronunciation", pre=True) + def check_is_katakana(cls, pronunciation): + if not fullmatch(r"[ァ-ヴー]+", pronunciation): + raise ValueError("発音は有効なカタカナでなくてはいけません。") + sutegana = ["ァ", "ィ", "ゥ", "ェ", "ォ", "ャ", "ュ", "ョ", "ヮ", "ッ"] + for i in range(len(pronunciation)): + if pronunciation[i] in sutegana: + # 「キャット」のように、捨て仮名が連続する可能性が考えられるので、 + # 「ッ」に関しては「ッ」そのものが連続している場合と、「ッ」の後にほかの捨て仮名が連続する場合のみ無効とする + if i < len(pronunciation) - 1 and ( + pronunciation[i + 1] in sutegana[:-1] + or ( + pronunciation[i] == sutegana[-1] + and pronunciation[i + 1] == sutegana[-1] + ) + ): + raise ValueError("無効な発音です。(捨て仮名の連続)") + if pronunciation[i] == "ヮ": + if i != 0 and pronunciation[i - 1] not in ["ク", "グ"]: + raise ValueError("無効な発音です。(「くゎ」「ぐゎ」以外の「ゎ」の使用)") + return pronunciation + + @validator("mora_count", pre=True, always=True) + def check_mora_count_and_accent_type(cls, mora_count, values): + if "pronunciation" not in values or "accent_type" not in values: + # 適切な場所でエラーを出すようにする + return mora_count + + if mora_count is None: + rule_others = "[イ][ェ]|[ヴ][ャュョ]|[トド][ゥ]|[テデ][ィャュョ]|[デ][ェ]|[クグ][ヮ]" + rule_line_i = "[キシチニヒミリギジビピ][ェャュョ]" + rule_line_u = "[ツフヴ][ァ]|[ウスツフヴズ][ィ]|[ウツフヴ][ェォ]" + rule_one_mora = "[ァ-ヴー]" + mora_count = len( + findall( + f"(?:{rule_others}|{rule_line_i}|{rule_line_u}|{rule_one_mora})", + values["pronunciation"], + ) + ) + + if not 0 <= values["accent_type"] <= mora_count: + raise ValueError( + "誤ったアクセント型です({})。 expect: 0 <= accent_type <= {}".format( + values["accent_type"], mora_count + ) + ) + return mora_count + + +class PartOfSpeechDetail(BaseModel): + """ + 品詞ごとの情報 + """ + + part_of_speech: str = Field(title="品詞") + part_of_speech_detail_1: str = Field(title="品詞細分類1") + part_of_speech_detail_2: str = Field(title="品詞細分類2") + part_of_speech_detail_3: str = Field(title="品詞細分類3") + # context_idは辞書の左・右文脈IDのこと + # https://github.com/VOICEVOX/open_jtalk/blob/427cfd761b78efb6094bea3c5bb8c968f0d711ab/src/mecab-naist-jdic/_left-id.def # noqa + context_id: int = Field(title="文脈ID") + cost_candidates: List[int] = Field(title="コストのパーセンタイル") + accent_associative_rules: List[str] = Field(title="アクセント結合規則の一覧") + + +class WordTypes(str, Enum): + """ + fastapiでword_type引数を検証する時に使用するクラス + """ + + PROPER_NOUN = "PROPER_NOUN" + COMMON_NOUN = "COMMON_NOUN" + VERB = "VERB" + ADJECTIVE = "ADJECTIVE" + SUFFIX = "SUFFIX" + + +class SupportedDevicesInfo(BaseModel): + """ + 対応しているデバイスの情報 + """ + + cpu: bool = Field(title="CPUに対応しているか") + cuda: bool = Field(title="CUDA(Nvidia GPU)に対応しているか") + dml: bool = Field(title="DirectML(Nvidia GPU/Radeon GPU等)に対応しているか") + + +class SupportedFeaturesInfo(BaseModel): + """ + エンジンの機能の情報 + """ + + support_adjusting_mora: bool = Field(title="モーラが調整可能かどうか") + support_adjusting_speed_scale: bool = Field(title="話速が調整可能かどうか") + support_adjusting_pitch_scale: bool = Field(title="音高が調整可能かどうか") + support_adjusting_intonation_scale: bool = Field(title="抑揚が調整可能かどうか") + support_adjusting_volume_scale: bool = Field(title="音量が調整可能かどうか") + support_adjusting_silence_scale: bool = Field(title="前後の無音時間が調節可能かどうか") + support_interrogative_upspeak: bool = Field(title="疑似疑問文に対応しているかどうか") + support_switching_device: bool = Field(title="CPU/GPUの切り替えが可能かどうか") diff --git a/voicevox_engine/mora_list.py b/voicevox_engine/mora_list.py new file mode 100644 index 0000000000000000000000000000000000000000..5a49f4a3a434ef4832355fcc66c5192b1a4b3059 --- /dev/null +++ b/voicevox_engine/mora_list.py @@ -0,0 +1,218 @@ +""" +以下のモーラ対応表はOpenJTalkのソースコードから取得し、 +カタカナ表記とモーラが一対一対応するように改造した。 +ライセンス表記: +----------------------------------------------------------------- + The Japanese TTS System "Open JTalk" + developed by HTS Working Group + http://open-jtalk.sourceforge.net/ +----------------------------------------------------------------- + + Copyright (c) 2008-2014 Nagoya Institute of Technology + Department of Computer Science + +All rights reserved. + +Redistribution and use in source and binary forms, with or +without modification, are permitted provided that the following +conditions are met: + +- Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +- Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. +- Neither the name of the HTS working group nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND +CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" +_mora_list_minimum = [ + ["ヴォ", "v", "o"], + ["ヴェ", "v", "e"], + ["ヴィ", "v", "i"], + ["ヴァ", "v", "a"], + ["ヴ", "v", "u"], + ["ン", "", "N"], + ["ワ", "w", "a"], + ["ロ", "r", "o"], + ["レ", "r", "e"], + ["ル", "r", "u"], + ["リョ", "ry", "o"], + ["リュ", "ry", "u"], + ["リャ", "ry", "a"], + ["リェ", "ry", "e"], + ["リ", "r", "i"], + ["ラ", "r", "a"], + ["ヨ", "y", "o"], + ["ユ", "y", "u"], + ["ヤ", "y", "a"], + ["モ", "m", "o"], + ["メ", "m", "e"], + ["ム", "m", "u"], + ["ミョ", "my", "o"], + ["ミュ", "my", "u"], + ["ミャ", "my", "a"], + ["ミェ", "my", "e"], + ["ミ", "m", "i"], + ["マ", "m", "a"], + ["ポ", "p", "o"], + ["ボ", "b", "o"], + ["ホ", "h", "o"], + ["ペ", "p", "e"], + ["ベ", "b", "e"], + ["ヘ", "h", "e"], + ["プ", "p", "u"], + ["ブ", "b", "u"], + ["フォ", "f", "o"], + ["フェ", "f", "e"], + ["フィ", "f", "i"], + ["ファ", "f", "a"], + ["フ", "f", "u"], + ["ピョ", "py", "o"], + ["ピュ", "py", "u"], + ["ピャ", "py", "a"], + ["ピェ", "py", "e"], + ["ピ", "p", "i"], + ["ビョ", "by", "o"], + ["ビュ", "by", "u"], + ["ビャ", "by", "a"], + ["ビェ", "by", "e"], + ["ビ", "b", "i"], + ["ヒョ", "hy", "o"], + ["ヒュ", "hy", "u"], + ["ヒャ", "hy", "a"], + ["ヒェ", "hy", "e"], + ["ヒ", "h", "i"], + ["パ", "p", "a"], + ["バ", "b", "a"], + ["ハ", "h", "a"], + ["ノ", "n", "o"], + ["ネ", "n", "e"], + ["ヌ", "n", "u"], + ["ニョ", "ny", "o"], + ["ニュ", "ny", "u"], + ["ニャ", "ny", "a"], + ["ニェ", "ny", "e"], + ["ニ", "n", "i"], + ["ナ", "n", "a"], + ["ドゥ", "d", "u"], + ["ド", "d", "o"], + ["トゥ", "t", "u"], + ["ト", "t", "o"], + ["デョ", "dy", "o"], + ["デュ", "dy", "u"], + ["デャ", "dy", "a"], + ["デェ", "dy", "e"], + ["ディ", "d", "i"], + ["デ", "d", "e"], + ["テョ", "ty", "o"], + ["テュ", "ty", "u"], + ["テャ", "ty", "a"], + ["ティ", "t", "i"], + ["テ", "t", "e"], + ["ツォ", "ts", "o"], + ["ツェ", "ts", "e"], + ["ツィ", "ts", "i"], + ["ツァ", "ts", "a"], + ["ツ", "ts", "u"], + ["ッ", "", "cl"], + ["チョ", "ch", "o"], + ["チュ", "ch", "u"], + ["チャ", "ch", "a"], + ["チェ", "ch", "e"], + ["チ", "ch", "i"], + ["ダ", "d", "a"], + ["タ", "t", "a"], + ["ゾ", "z", "o"], + ["ソ", "s", "o"], + ["ゼ", "z", "e"], + ["セ", "s", "e"], + ["ズィ", "z", "i"], + ["ズ", "z", "u"], + ["スィ", "s", "i"], + ["ス", "s", "u"], + ["ジョ", "j", "o"], + ["ジュ", "j", "u"], + ["ジャ", "j", "a"], + ["ジェ", "j", "e"], + ["ジ", "j", "i"], + ["ショ", "sh", "o"], + ["シュ", "sh", "u"], + ["シャ", "sh", "a"], + ["シェ", "sh", "e"], + ["シ", "sh", "i"], + ["ザ", "z", "a"], + ["サ", "s", "a"], + ["ゴ", "g", "o"], + ["コ", "k", "o"], + ["ゲ", "g", "e"], + ["ケ", "k", "e"], + ["グヮ", "gw", "a"], + ["グ", "g", "u"], + ["クヮ", "kw", "a"], + ["ク", "k", "u"], + ["ギョ", "gy", "o"], + ["ギュ", "gy", "u"], + ["ギャ", "gy", "a"], + ["ギェ", "gy", "e"], + ["ギ", "g", "i"], + ["キョ", "ky", "o"], + ["キュ", "ky", "u"], + ["キャ", "ky", "a"], + ["キェ", "ky", "e"], + ["キ", "k", "i"], + ["ガ", "g", "a"], + ["カ", "k", "a"], + ["オ", "", "o"], + ["エ", "", "e"], + ["ウォ", "w", "o"], + ["ウェ", "w", "e"], + ["ウィ", "w", "i"], + ["ウ", "", "u"], + ["イェ", "y", "e"], + ["イ", "", "i"], + ["ア", "", "a"], +] +_mora_list_additional = [ + ["ヴョ", "by", "o"], + ["ヴュ", "by", "u"], + ["ヴャ", "by", "a"], + ["ヲ", "", "o"], + ["ヱ", "", "e"], + ["ヰ", "", "i"], + ["ヮ", "w", "a"], + ["ョ", "y", "o"], + ["ュ", "y", "u"], + ["ヅ", "z", "u"], + ["ヂ", "j", "i"], + ["ヶ", "k", "e"], + ["ャ", "y", "a"], + ["ォ", "", "o"], + ["ェ", "", "e"], + ["ゥ", "", "u"], + ["ィ", "", "i"], + ["ァ", "", "a"], +] + +openjtalk_mora2text = { + consonant + vowel: text for [text, consonant, vowel] in _mora_list_minimum +} +openjtalk_text2mora = { + text: (consonant, vowel) + for [text, consonant, vowel] in _mora_list_minimum + _mora_list_additional +} diff --git a/voicevox_engine/morphing.py b/voicevox_engine/morphing.py new file mode 100644 index 0000000000000000000000000000000000000000..d857aa11d8857772c4e119edfd57730932ced6fa --- /dev/null +++ b/voicevox_engine/morphing.py @@ -0,0 +1,208 @@ +from copy import deepcopy +from dataclasses import dataclass +from itertools import chain +from typing import Dict, List, Tuple + +import numpy as np +import pyworld as pw +from scipy.signal import resample + +from .metas.Metas import Speaker, SpeakerSupportPermittedSynthesisMorphing, StyleInfo +from .metas.MetasStore import construct_lookup +from .model import AudioQuery, MorphableTargetInfo, SpeakerNotFoundError +from .synthesis_engine import SynthesisEngine + + +# FIXME: ndarray type hint, https://github.com/JeremyCCHsu/Python-Wrapper-for-World-Vocoder/blob/2b64f86197573497c685c785c6e0e743f407b63e/pyworld/pyworld.pyx#L398 # noqa +@dataclass(frozen=True) +class MorphingParameter: + fs: int + frame_period: float + base_f0: np.ndarray + base_aperiodicity: np.ndarray + base_spectrogram: np.ndarray + target_spectrogram: np.ndarray + + +def create_morphing_parameter( + base_wave: np.ndarray, + target_wave: np.ndarray, + fs: int, +) -> MorphingParameter: + frame_period = 1.0 + base_f0, base_time_axis = pw.harvest(base_wave, fs, frame_period=frame_period) + base_spectrogram = pw.cheaptrick(base_wave, base_f0, base_time_axis, fs) + base_aperiodicity = pw.d4c(base_wave, base_f0, base_time_axis, fs) + + target_f0, morph_time_axis = pw.harvest(target_wave, fs, frame_period=frame_period) + target_spectrogram = pw.cheaptrick(target_wave, target_f0, morph_time_axis, fs) + target_spectrogram.resize(base_spectrogram.shape) + + return MorphingParameter( + fs=fs, + frame_period=frame_period, + base_f0=base_f0, + base_aperiodicity=base_aperiodicity, + base_spectrogram=base_spectrogram, + target_spectrogram=target_spectrogram, + ) + + +def get_morphable_targets( + speakers: List[Speaker], + base_speakers: List[int], +) -> List[Dict[int, MorphableTargetInfo]]: + """ + speakers: 全話者の情報 + base_speakers: モーフィング可能か判定したいベースの話者リスト(スタイルID) + """ + speaker_lookup = construct_lookup(speakers) + + morphable_targets_arr = [] + for base_speaker in base_speakers: + morphable_targets = dict() + for style in chain.from_iterable(speaker.styles for speaker in speakers): + morphable_targets[style.id] = MorphableTargetInfo( + is_morphable=is_synthesis_morphing_permitted( + speaker_lookup=speaker_lookup, + base_speaker=base_speaker, + target_speaker=style.id, + ) + ) + morphable_targets_arr.append(morphable_targets) + + return morphable_targets_arr + + +def is_synthesis_morphing_permitted( + speaker_lookup: Dict[int, Tuple[Speaker, StyleInfo]], + base_speaker: int, + target_speaker: int, +) -> bool: + """ + 指定されたspeakerがモーフィング可能かどうか返す + speakerが見つからない場合はSpeakerNotFoundErrorを送出する + """ + + base_speaker_data = speaker_lookup[base_speaker] + target_speaker_data = speaker_lookup[target_speaker] + + if base_speaker_data is None or target_speaker_data is None: + raise SpeakerNotFoundError( + base_speaker if base_speaker_data is None else target_speaker + ) + + base_speaker_info, _ = base_speaker_data + target_speaker_info, _ = target_speaker_data + + base_speaker_uuid = base_speaker_info.speaker_uuid + target_speaker_uuid = target_speaker_info.speaker_uuid + + base_speaker_morphing_info: SpeakerSupportPermittedSynthesisMorphing = ( + base_speaker_info.supported_features.permitted_synthesis_morphing + ) + + target_speaker_morphing_info: SpeakerSupportPermittedSynthesisMorphing = ( + target_speaker_info.supported_features.permitted_synthesis_morphing + ) + + # 禁止されている場合はFalse + if ( + base_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.NOTHING + or target_speaker_morphing_info + == SpeakerSupportPermittedSynthesisMorphing.NOTHING + ): + return False + # 同一話者のみの場合は同一話者判定 + if ( + base_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.SELF_ONLY + or target_speaker_morphing_info + == SpeakerSupportPermittedSynthesisMorphing.SELF_ONLY + ): + return base_speaker_uuid == target_speaker_uuid + # 念のため許可されているかチェック + return ( + base_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.ALL + and target_speaker_morphing_info == SpeakerSupportPermittedSynthesisMorphing.ALL + ) + + +def synthesis_morphing_parameter( + engine: SynthesisEngine, + query: AudioQuery, + base_speaker: int, + target_speaker: int, +) -> MorphingParameter: + query = deepcopy(query) + + # 不具合回避のためデフォルトのサンプリングレートでWORLDに掛けた後に指定のサンプリングレートに変換する + query.outputSamplingRate = engine.default_sampling_rate + + # WORLDに掛けるため合成はモノラルで行う + query.outputStereo = False + + base_wave = engine.synthesis(query=query, speaker_id=base_speaker).astype("float") + target_wave = engine.synthesis(query=query, speaker_id=target_speaker).astype( + "float" + ) + + return create_morphing_parameter( + base_wave=base_wave, + target_wave=target_wave, + fs=query.outputSamplingRate, + ) + + +def synthesis_morphing( + morph_param: MorphingParameter, + morph_rate: float, + output_fs: int, + output_stereo: bool = False, +) -> np.ndarray: + """ + 指定した割合で、パラメータをもとにモーフィングした音声を生成します。 + + Parameters + ---------- + morph_param : MorphingParameter + `synthesis_morphing_parameter`または`create_morphing_parameter`で作成したパラメータ + + morph_rate : float + モーフィングの割合 + 0.0でベースの話者、1.0でターゲットの話者に近づきます。 + + Returns + ------- + generated : np.ndarray + モーフィングした音声 + + Raises + ------- + ValueError + morph_rate ∈ [0, 1] + """ + + if morph_rate < 0.0 or morph_rate > 1.0: + raise ValueError("morph_rateは0.0から1.0の範囲で指定してください") + + morph_spectrogram = ( + morph_param.base_spectrogram * (1.0 - morph_rate) + + morph_param.target_spectrogram * morph_rate + ) + + y_h = pw.synthesize( + morph_param.base_f0, + morph_spectrogram, + morph_param.base_aperiodicity, + morph_param.fs, + morph_param.frame_period, + ) + + # TODO: synthesis_engine.py でのリサンプル処理と共通化する + if output_fs != morph_param.fs: + y_h = resample(y_h, output_fs * len(y_h) // morph_param.fs) + + if output_stereo: + y_h = np.array([y_h, y_h]).T + + return y_h diff --git a/voicevox_engine/part_of_speech_data.py b/voicevox_engine/part_of_speech_data.py new file mode 100644 index 0000000000000000000000000000000000000000..8950e47c8b1cc50f7cdd3f67c857be8baf59c321 --- /dev/null +++ b/voicevox_engine/part_of_speech_data.py @@ -0,0 +1,144 @@ +from typing import Dict + +from .model import ( + USER_DICT_MAX_PRIORITY, + USER_DICT_MIN_PRIORITY, + PartOfSpeechDetail, + WordTypes, +) + +MIN_PRIORITY = USER_DICT_MIN_PRIORITY +MAX_PRIORITY = USER_DICT_MAX_PRIORITY + +part_of_speech_data: Dict[WordTypes, PartOfSpeechDetail] = { + WordTypes.PROPER_NOUN: PartOfSpeechDetail( + part_of_speech="名詞", + part_of_speech_detail_1="固有名詞", + part_of_speech_detail_2="一般", + part_of_speech_detail_3="*", + context_id=1348, + cost_candidates=[ + -988, + 3488, + 4768, + 6048, + 7328, + 8609, + 8734, + 8859, + 8984, + 9110, + 14176, + ], + accent_associative_rules=[ + "*", + "C1", + "C2", + "C3", + "C4", + "C5", + ], + ), + WordTypes.COMMON_NOUN: PartOfSpeechDetail( + part_of_speech="名詞", + part_of_speech_detail_1="一般", + part_of_speech_detail_2="*", + part_of_speech_detail_3="*", + context_id=1345, + cost_candidates=[ + -4445, + 49, + 1473, + 2897, + 4321, + 5746, + 6554, + 7362, + 8170, + 8979, + 15001, + ], + accent_associative_rules=[ + "*", + "C1", + "C2", + "C3", + "C4", + "C5", + ], + ), + WordTypes.VERB: PartOfSpeechDetail( + part_of_speech="動詞", + part_of_speech_detail_1="自立", + part_of_speech_detail_2="*", + part_of_speech_detail_3="*", + context_id=642, + cost_candidates=[ + 3100, + 6160, + 6360, + 6561, + 6761, + 6962, + 7414, + 7866, + 8318, + 8771, + 13433, + ], + accent_associative_rules=[ + "*", + ], + ), + WordTypes.ADJECTIVE: PartOfSpeechDetail( + part_of_speech="形容詞", + part_of_speech_detail_1="自立", + part_of_speech_detail_2="*", + part_of_speech_detail_3="*", + context_id=20, + cost_candidates=[ + 1527, + 3266, + 3561, + 3857, + 4153, + 4449, + 5149, + 5849, + 6549, + 7250, + 10001, + ], + accent_associative_rules=[ + "*", + ], + ), + WordTypes.SUFFIX: PartOfSpeechDetail( + part_of_speech="名詞", + part_of_speech_detail_1="接尾", + part_of_speech_detail_2="一般", + part_of_speech_detail_3="*", + context_id=1358, + cost_candidates=[ + 4399, + 5373, + 6041, + 6710, + 7378, + 8047, + 9440, + 10834, + 12228, + 13622, + 15847, + ], + accent_associative_rules=[ + "*", + "C1", + "C2", + "C3", + "C4", + "C5", + ], + ), +} diff --git a/voicevox_engine/preset/Preset.py b/voicevox_engine/preset/Preset.py new file mode 100644 index 0000000000000000000000000000000000000000..40b03d10c7f3f663c42b8c32fa0db1bf2da0365b --- /dev/null +++ b/voicevox_engine/preset/Preset.py @@ -0,0 +1,18 @@ +from pydantic import BaseModel, Field + + +class Preset(BaseModel): + """ + プリセット情報 + """ + + id: int = Field(title="プリセットID") + name: str = Field(title="プリセット名") + speaker_uuid: str = Field(title="スピーカーのUUID") + style_id: int = Field(title="スタイルID") + speedScale: float = Field(title="全体の話速") + pitchScale: float = Field(title="全体の音高") + intonationScale: float = Field(title="全体の抑揚") + volumeScale: float = Field(title="全体の音量") + prePhonemeLength: float = Field(title="音声の前の無音時間") + postPhonemeLength: float = Field(title="音声の後の無音時間") diff --git a/voicevox_engine/preset/PresetError.py b/voicevox_engine/preset/PresetError.py new file mode 100644 index 0000000000000000000000000000000000000000..6f5f802f57b03ebcc07f1173f47b9cb384e0fbd1 --- /dev/null +++ b/voicevox_engine/preset/PresetError.py @@ -0,0 +1,2 @@ +class PresetError(Exception): + pass diff --git a/voicevox_engine/preset/PresetManager.py b/voicevox_engine/preset/PresetManager.py new file mode 100644 index 0000000000000000000000000000000000000000..b994eb67790b2ed906a2801d4987ba5debaad3aa --- /dev/null +++ b/voicevox_engine/preset/PresetManager.py @@ -0,0 +1,188 @@ +from pathlib import Path +from typing import List + +import yaml +from pydantic import ValidationError, parse_obj_as + +from .Preset import Preset +from .PresetError import PresetError + + +class PresetManager: + def __init__( + self, + preset_path: Path, + ): + self.presets = [] + self.last_modified_time = 0 + self.preset_path = preset_path + + def load_presets(self): + """ + プリセットのYAMLファイルを読み込む + + Returns + ------- + ret: List[Preset] + プリセットのリスト + """ + + # 設定ファイルのタイムスタンプを確認 + try: + _last_modified_time = self.preset_path.stat().st_mtime + if _last_modified_time == self.last_modified_time: + return self.presets + except OSError: + raise PresetError("プリセットの設定ファイルが見つかりません") + + with open(self.preset_path, mode="r", encoding="utf-8") as f: + obj = yaml.safe_load(f) + if obj is None: + raise PresetError("プリセットの設定ファイルが空の内容です") + + try: + _presets = parse_obj_as(List[Preset], obj) + except ValidationError: + raise PresetError("プリセットの設定ファイルにミスがあります") + + # idが一意か確認 + if len([preset.id for preset in _presets]) != len( + {preset.id for preset in _presets} + ): + raise PresetError("プリセットのidに重複があります") + + self.presets = _presets + self.last_modified_time = _last_modified_time + return self.presets + + def add_preset(self, preset: Preset): + """ + YAMLファイルに新規のプリセットを追加する + + Parameters + ---------- + preset : Preset + 追加するプリセットを渡す + + Returns + ------- + ret: int + 追加したプリセットのプリセットID + """ + + # 手動でファイルが更新されているかも知れないので、最新のYAMLファイルを読み直す + self.load_presets() + + # IDが0未満、または存在するIDなら新しいIDを決定し、配列に追加 + if preset.id < 0 or preset.id in {preset.id for preset in self.presets}: + preset.id = max([preset.id for preset in self.presets]) + 1 + self.presets.append(preset) + + # ファイルに書き込み + try: + with open(self.preset_path, mode="w", encoding="utf-8") as f: + yaml.safe_dump( + [preset.dict() for preset in self.presets], + f, + allow_unicode=True, + sort_keys=False, + ) + except Exception as err: + self.presets.pop() + if isinstance(err, FileNotFoundError): + raise PresetError("プリセットの設定ファイルに書き込み失敗しました") + else: + raise err + + return preset.id + + def update_preset(self, preset: Preset): + """ + YAMLファイルのプリセットを更新する + + Parameters + ---------- + preset : Preset + 更新するプリセットを渡す + + Returns + ------- + ret: int + 更新したプリセットのプリセットID + """ + + # 手動でファイルが更新されているかも知れないので、最新のYAMLファイルを読み直す + self.load_presets() + + # IDが存在するか探索 + prev_preset = (-1, None) + for i in range(len(self.presets)): + if self.presets[i].id == preset.id: + prev_preset = (i, self.presets[i]) + self.presets[i] = preset + break + else: + raise PresetError("更新先のプリセットが存在しません") + + # ファイルに書き込み + try: + with open(self.preset_path, mode="w", encoding="utf-8") as f: + yaml.safe_dump( + [preset.dict() for preset in self.presets], + f, + allow_unicode=True, + sort_keys=False, + ) + except Exception as err: + if prev_preset != (-1, None): + self.presets[prev_preset[0]] = prev_preset[1] + if isinstance(err, FileNotFoundError): + raise PresetError("プリセットの設定ファイルに書き込み失敗しました") + else: + raise err + + return preset.id + + def delete_preset(self, id: int): + """ + YAMLファイルのプリセットを削除する + + Parameters + ---------- + id: int + 削除するプリセットのプリセットIDを渡す + + Returns + ------- + ret: int + 削除したプリセットのプリセットID + """ + + # 手動でファイルが更新されているかも知れないので、最新のYAMLファイルを読み直す + self.load_presets() + + # IDが存在するか探索 + buf = None + buf_index = -1 + for i in range(len(self.presets)): + if self.presets[i].id == id: + buf = self.presets.pop(i) + buf_index = i + break + else: + raise PresetError("削除対象のプリセットが存在しません") + + # ファイルに書き込み + try: + with open(self.preset_path, mode="w", encoding="utf-8") as f: + yaml.safe_dump( + [preset.dict() for preset in self.presets], + f, + allow_unicode=True, + sort_keys=False, + ) + except FileNotFoundError: + self.presets.insert(buf_index, buf) + raise PresetError("プリセットの設定ファイルに書き込み失敗しました") + + return id diff --git a/voicevox_engine/preset/__init__.py b/voicevox_engine/preset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8c485e2fbfbcdd660d869ccc36483d6ace6272ec --- /dev/null +++ b/voicevox_engine/preset/__init__.py @@ -0,0 +1,9 @@ +from .Preset import Preset +from .PresetError import PresetError +from .PresetManager import PresetManager + +__all__ = [ + "Preset", + "PresetManager", + "PresetError", +] diff --git a/voicevox_engine/setting/Setting.py b/voicevox_engine/setting/Setting.py new file mode 100644 index 0000000000000000000000000000000000000000..f8912c6bff9afa959f445d8aa9c89c440b36b8db --- /dev/null +++ b/voicevox_engine/setting/Setting.py @@ -0,0 +1,25 @@ +from enum import Enum +from typing import Optional + +from pydantic import BaseModel, Field + + +class CorsPolicyMode(str, Enum): + """ + CORSの許可モード + """ + + all = "all" # 全てのオリジンからのリクエストを許可 + localapps = "localapps" # ローカルアプリケーションからのリクエストを許可 + + +class Setting(BaseModel): + """ + エンジンの設定情報 + """ + + cors_policy_mode: CorsPolicyMode = Field(title="リソース共有ポリシー") + allow_origin: Optional[str] = Field(title="許可するオリジン") + + class Config: + use_enum_values = True diff --git a/voicevox_engine/setting/SettingLoader.py b/voicevox_engine/setting/SettingLoader.py new file mode 100644 index 0000000000000000000000000000000000000000..a78952e96835f5bcf2b4fee23d38312dfa2ca573 --- /dev/null +++ b/voicevox_engine/setting/SettingLoader.py @@ -0,0 +1,33 @@ +from pathlib import Path + +import yaml + +from ..utility import engine_root, get_save_dir +from .Setting import Setting + +DEFAULT_SETTING_PATH: Path = engine_root() / "default_setting.yml" +USER_SETTING_PATH: Path = get_save_dir() / "setting.yml" + + +class SettingLoader: + def __init__(self, setting_file_path: Path) -> None: + self.setting_file_path = setting_file_path + + def load_setting_file(self) -> Setting: + if not self.setting_file_path.is_file(): + setting = yaml.safe_load(DEFAULT_SETTING_PATH.read_text(encoding="utf-8")) + else: + setting = yaml.safe_load(self.setting_file_path.read_text(encoding="utf-8")) + + setting = Setting( + cors_policy_mode=setting["cors_policy_mode"], + allow_origin=setting["allow_origin"], + ) + + return setting + + def dump_setting_file(self, settings: Setting) -> None: + settings_dict = settings.dict() + + with open(self.setting_file_path, mode="w", encoding="utf-8") as f: + yaml.safe_dump(settings_dict, f) diff --git a/voicevox_engine/setting/__init__.py b/voicevox_engine/setting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff399f92b662072737fe036b7c9832997a76a553 --- /dev/null +++ b/voicevox_engine/setting/__init__.py @@ -0,0 +1,9 @@ +from .Setting import CorsPolicyMode, Setting +from .SettingLoader import USER_SETTING_PATH, SettingLoader + +__all__ = [ + "USER_SETTING_PATH", + "CorsPolicyMode", + "Setting", + "SettingLoader", +] diff --git a/voicevox_engine/synthesis_engine/__init__.py b/voicevox_engine/synthesis_engine/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3e7f6a1ef940f2d20830d98336c34cbbc600d905 --- /dev/null +++ b/voicevox_engine/synthesis_engine/__init__.py @@ -0,0 +1,12 @@ +from .core_wrapper import CoreWrapper, load_runtime_lib +from .make_synthesis_engines import make_synthesis_engines +from .synthesis_engine import SynthesisEngine +from .synthesis_engine_base import SynthesisEngineBase + +__all__ = [ + "CoreWrapper", + "load_runtime_lib", + "make_synthesis_engines", + "SynthesisEngine", + "SynthesisEngineBase", +] diff --git a/voicevox_engine/synthesis_engine/core_wrapper.py b/voicevox_engine/synthesis_engine/core_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..fe8f9778707a7476f30ab5b80f1ed1e1f759b8a0 --- /dev/null +++ b/voicevox_engine/synthesis_engine/core_wrapper.py @@ -0,0 +1,538 @@ +import os +import platform +from ctypes import CDLL, POINTER, c_bool, c_char_p, c_float, c_int, c_long +from ctypes.util import find_library +from dataclasses import dataclass +from enum import Enum, auto +from pathlib import Path +from typing import List, Optional + +import numpy as np + + +class OldCoreError(Exception): + """古いコアが使用されている場合に発生するエラー""" + + +class CoreError(Exception): + """コア呼び出しで発生したエラー""" + + +def load_runtime_lib(runtime_dirs: List[Path]): + if platform.system() == "Windows": + # DirectML.dllはonnxruntimeと互換性のないWindows標準搭載のものを優先して読み込むことがあるため、明示的に読み込む + # 参考 1. https://github.com/microsoft/onnxruntime/issues/3360 + # 参考 2. https://tadaoyamaoka.hatenablog.com/entry/2020/06/07/113616 + lib_file_names = [ + "torch_cpu.dll", + "torch_cuda.dll", + "DirectML.dll", + "onnxruntime.dll", + ] + lib_names = ["torch_cpu", "torch_cuda", "onnxruntime"] + elif platform.system() == "Linux": + lib_file_names = ["libtorch.so", "libonnxruntime.so"] + lib_names = ["torch", "onnxruntime"] + elif platform.system() == "Darwin": + lib_file_names = ["libonnxruntime.dylib"] + lib_names = ["onnxruntime"] + else: + raise RuntimeError("不明なOSです") + for lib_path in runtime_dirs: + for file_name in lib_file_names: + try: + CDLL(str((lib_path / file_name).resolve(strict=True))) + except OSError: + pass + for lib_name in lib_names: + try: + CDLL(find_library(lib_name)) + except (OSError, TypeError): + pass + + +class GPUType(Enum): + # NONEはCPUしか対応していないことを示す + NONE = auto() + CUDA = auto() + DIRECT_ML = auto() + + +@dataclass(frozen=True) +class CoreInfo: + name: str + platform: str + arch: str + core_type: str + gpu_type: GPUType + + +# version 0.12 より前のコアの情報 +CORE_INFOS = [ + # Windows + CoreInfo( + name="core.dll", + platform="Windows", + arch="x64", + core_type="libtorch", + gpu_type=GPUType.CUDA, + ), + CoreInfo( + name="core_cpu.dll", + platform="Windows", + arch="x64", + core_type="libtorch", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="core_gpu_x64_nvidia.dll", + platform="Windows", + arch="x64", + core_type="onnxruntime", + gpu_type=GPUType.CUDA, + ), + CoreInfo( + name="core_gpu_x64_directml.dll", + platform="Windows", + arch="x64", + core_type="onnxruntime", + gpu_type=GPUType.DIRECT_ML, + ), + CoreInfo( + name="core_cpu_x64.dll", + platform="Windows", + arch="x64", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="core_cpu_x86.dll", + platform="Windows", + arch="x86", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="core_gpu_x86_directml.dll", + platform="Windows", + arch="x86", + core_type="onnxruntime", + gpu_type=GPUType.DIRECT_ML, + ), + CoreInfo( + name="core_cpu_arm.dll", + platform="Windows", + arch="armv7l", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="core_gpu_arm_directml.dll", + platform="Windows", + arch="armv7l", + core_type="onnxruntime", + gpu_type=GPUType.DIRECT_ML, + ), + CoreInfo( + name="core_cpu_arm64.dll", + platform="Windows", + arch="aarch64", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="core_gpu_arm64_directml.dll", + platform="Windows", + arch="aarch64", + core_type="onnxruntime", + gpu_type=GPUType.DIRECT_ML, + ), + # Linux + CoreInfo( + name="libcore.so", + platform="Linux", + arch="x64", + core_type="libtorch", + gpu_type=GPUType.CUDA, + ), + CoreInfo( + name="libcore_cpu.so", + platform="Linux", + arch="x64", + core_type="libtorch", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="libcore_gpu_x64_nvidia.so", + platform="Linux", + arch="x64", + core_type="onnxruntime", + gpu_type=GPUType.CUDA, + ), + CoreInfo( + name="libcore_cpu_x64.so", + platform="Linux", + arch="x64", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="libcore_cpu_armhf.so", + platform="Linux", + arch="armv7l", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + CoreInfo( + name="libcore_cpu_arm64.so", + platform="Linux", + arch="aarch64", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), + # macOS + CoreInfo( + name="libcore_cpu_universal2.dylib", + platform="Darwin", + arch="universal", + core_type="onnxruntime", + gpu_type=GPUType.NONE, + ), +] + + +# version 0.12 以降のコアの名前の辞書 +# - version 0.12, 0.13 のコアの名前: core +# - version 0.14 からのコアの名前: voicevox_core +CORENAME_DICT = { + "Windows": ("voicevox_core.dll", "core.dll"), + "Linux": ("libvoicevox_core.so", "libcore.so"), + "Darwin": ("libvoicevox_core.dylib", "libcore.dylib"), +} + + +def find_version_0_12_core_or_later(core_dir: Path) -> Optional[str]: + """ + core_dir で指定したディレクトリにあるコアライブラリが Version 0.12 以降である場合、 + 見つかった共有ライブラリの名前を返す。 + + Version 0.12 以降と判定する条件は、 + + - core_dir に metas.json が存在しない + - コアライブラリの名前が CORENAME_DICT の定義に従っている + + の両方が真のときである。 + cf. https://github.com/VOICEVOX/voicevox_engine/issues/385 + """ + if (core_dir / "metas.json").exists(): + return None + + for core_name in CORENAME_DICT[platform.system()]: + if (core_dir / core_name).is_file(): + return core_name + + return None + + +def get_arch_name() -> Optional[str]: + """ + platform.machine() が特定のアーキテクチャ上で複数パターンの文字列を返し得るので、 + 一意な文字列に変換する + サポート外のアーキテクチャである場合、None を返す + """ + machine = platform.machine() + if machine == "x86_64" or machine == "x64" or machine == "AMD64": + return "x64" + elif machine == "i386" or machine == "x86": + return "x86" + elif machine == "arm64": + return "aarch64" + elif machine in ["armv7l", "aarch64"]: + return machine + else: + return None + + +def get_core_name( + arch_name: str, + platform_name: str, + model_type: str, + gpu_type: GPUType, +) -> Optional[str]: + if platform_name == "Darwin": + if gpu_type == GPUType.NONE and (arch_name == "x64" or arch_name == "aarch64"): + arch_name = "universal" + else: + return None + for core_info in CORE_INFOS: + if ( + core_info.platform == platform_name + and core_info.arch == arch_name + and core_info.core_type == model_type + and core_info.gpu_type == gpu_type + ): + return core_info.name + return None + + +def get_suitable_core_name( + model_type: str, + gpu_type: GPUType, +) -> Optional[str]: + arch_name = get_arch_name() + if arch_name is None: + return None + platform_name = platform.system() + return get_core_name(arch_name, platform_name, model_type, gpu_type) + + +def check_core_type(core_dir: Path) -> Optional[str]: + # libtorch版はDirectML未対応なので、ここでは`gpu_type=GPUType.DIRECT_ML`は入れない + libtorch_core_names = [ + get_suitable_core_name("libtorch", gpu_type=GPUType.CUDA), + get_suitable_core_name("libtorch", gpu_type=GPUType.NONE), + ] + onnxruntime_core_names = [ + get_suitable_core_name("onnxruntime", gpu_type=GPUType.CUDA), + get_suitable_core_name("onnxruntime", gpu_type=GPUType.DIRECT_ML), + get_suitable_core_name("onnxruntime", gpu_type=GPUType.NONE), + ] + if any([(core_dir / name).is_file() for name in libtorch_core_names if name]): + return "libtorch" + elif any([(core_dir / name).is_file() for name in onnxruntime_core_names if name]): + return "onnxruntime" + else: + return None + + +def load_core(core_dir: Path, use_gpu: bool) -> CDLL: + core_name = find_version_0_12_core_or_later(core_dir) + if core_name: + try: + # NOTE: CDLL クラスのコンストラクタの引数 name には文字列を渡す必要がある。 + # Windows 環境では PathLike オブジェクトを引数として渡すと初期化に失敗する。 + return CDLL(str((core_dir / core_name).resolve(strict=True))) + except OSError as err: + raise RuntimeError(f"コアの読み込みに失敗しました:{err}") + + model_type = check_core_type(core_dir) + if model_type is None: + raise RuntimeError("コアが見つかりません") + if use_gpu or model_type == "onnxruntime": + core_name = get_suitable_core_name(model_type, gpu_type=GPUType.CUDA) + if core_name: + try: + return CDLL(str((core_dir / core_name).resolve(strict=True))) + except OSError: + pass + core_name = get_suitable_core_name(model_type, gpu_type=GPUType.DIRECT_ML) + if core_name: + try: + return CDLL(str((core_dir / core_name).resolve(strict=True))) + except OSError: + pass + core_name = get_suitable_core_name(model_type, gpu_type=GPUType.NONE) + if core_name: + try: + return CDLL(str((core_dir / core_name).resolve(strict=True))) + except OSError as err: + if model_type == "libtorch": + core_name = get_suitable_core_name(model_type, gpu_type=GPUType.CUDA) + if core_name: + try: + return CDLL(str((core_dir / core_name).resolve(strict=True))) + except OSError as err_: + err = err_ + raise RuntimeError(f"コアの読み込みに失敗しました:{err}") + else: + raise RuntimeError(f"このコンピュータのアーキテクチャ {platform.machine()} で利用可能なコアがありません") + + +class CoreWrapper: + def __init__( + self, + use_gpu: bool, + core_dir: Path, + cpu_num_threads: int = 0, + load_all_models: bool = False, + ) -> None: + + self.core = load_core(core_dir, use_gpu) + + self.core.initialize.restype = c_bool + self.core.metas.restype = c_char_p + self.core.yukarin_s_forward.restype = c_bool + self.core.yukarin_sa_forward.restype = c_bool + self.core.decode_forward.restype = c_bool + self.core.last_error_message.restype = c_char_p + + self.exist_supported_devices = False + self.exist_finalize = False + exist_cpu_num_threads = False + self.exist_load_model = False + self.exist_is_model_loaded = False + + is_version_0_12_core_or_later = ( + find_version_0_12_core_or_later(core_dir) is not None + ) + if is_version_0_12_core_or_later: + model_type = "onnxruntime" + self.exist_load_model = True + self.exist_is_model_loaded = True + self.core.load_model.argtypes = (c_long,) + self.core.load_model.restype = c_bool + self.core.is_model_loaded.argtypes = (c_long,) + self.core.is_model_loaded.restype = c_bool + else: + model_type = check_core_type(core_dir) + assert model_type is not None + + if model_type == "onnxruntime": + self.core.supported_devices.restype = c_char_p + self.core.finalize.restype = None + self.exist_supported_devices = True + self.exist_finalize = True + exist_cpu_num_threads = True + + self.core.yukarin_s_forward.argtypes = ( + c_int, + POINTER(c_long), + POINTER(c_long), + POINTER(c_float), + ) + self.core.yukarin_sa_forward.argtypes = ( + c_int, + POINTER(c_long), + POINTER(c_long), + POINTER(c_long), + POINTER(c_long), + POINTER(c_long), + POINTER(c_long), + POINTER(c_long), + POINTER(c_float), + ) + self.core.decode_forward.argtypes = ( + c_int, + c_int, + POINTER(c_float), + POINTER(c_float), + POINTER(c_long), + POINTER(c_float), + ) + + cwd = os.getcwd() + os.chdir(core_dir) + try: + if is_version_0_12_core_or_later: + self.assert_core_success( + self.core.initialize(use_gpu, cpu_num_threads, load_all_models) + ) + elif exist_cpu_num_threads: + self.assert_core_success( + self.core.initialize(".", use_gpu, cpu_num_threads) + ) + else: + self.assert_core_success(self.core.initialize(".", use_gpu)) + finally: + os.chdir(cwd) + + def metas(self) -> str: + return self.core.metas().decode("utf-8") + + def yukarin_s_forward( + self, + length: int, + phoneme_list: np.ndarray, + speaker_id: np.ndarray, + ) -> np.ndarray: + output = np.zeros((length,), dtype=np.float32) + self.assert_core_success( + self.core.yukarin_s_forward( + c_int(length), + phoneme_list.ctypes.data_as(POINTER(c_long)), + speaker_id.ctypes.data_as(POINTER(c_long)), + output.ctypes.data_as(POINTER(c_float)), + ) + ) + return output + + def yukarin_sa_forward( + self, + length: int, + vowel_phoneme_list: np.ndarray, + consonant_phoneme_list: np.ndarray, + start_accent_list: np.ndarray, + end_accent_list: np.ndarray, + start_accent_phrase_list: np.ndarray, + end_accent_phrase_list: np.ndarray, + speaker_id: np.ndarray, + ) -> np.ndarray: + output = np.empty( + ( + len(speaker_id), + length, + ), + dtype=np.float32, + ) + self.assert_core_success( + self.core.yukarin_sa_forward( + c_int(length), + vowel_phoneme_list.ctypes.data_as(POINTER(c_long)), + consonant_phoneme_list.ctypes.data_as(POINTER(c_long)), + start_accent_list.ctypes.data_as(POINTER(c_long)), + end_accent_list.ctypes.data_as(POINTER(c_long)), + start_accent_phrase_list.ctypes.data_as(POINTER(c_long)), + end_accent_phrase_list.ctypes.data_as(POINTER(c_long)), + speaker_id.ctypes.data_as(POINTER(c_long)), + output.ctypes.data_as(POINTER(c_float)), + ) + ) + return output + + def decode_forward( + self, + length: int, + phoneme_size: int, + f0: np.ndarray, + phoneme: np.ndarray, + speaker_id: np.ndarray, + ) -> np.ndarray: + output = np.empty((length * 256,), dtype=np.float32) + self.assert_core_success( + self.core.decode_forward( + c_int(length), + c_int(phoneme_size), + f0.ctypes.data_as(POINTER(c_float)), + phoneme.ctypes.data_as(POINTER(c_float)), + speaker_id.ctypes.data_as(POINTER(c_long)), + output.ctypes.data_as(POINTER(c_float)), + ) + ) + return output + + def supported_devices(self) -> str: + if self.exist_supported_devices: + return self.core.supported_devices().decode("utf-8") + raise OldCoreError + + def finalize(self) -> None: + if self.exist_finalize: + self.core.finalize() + return + raise OldCoreError + + def load_model(self, speaker_id: int) -> None: + if self.exist_load_model: + self.assert_core_success(self.core.load_model(c_long(speaker_id))) + raise OldCoreError + + def is_model_loaded(self, speaker_id: int) -> bool: + if self.exist_is_model_loaded: + return self.core.is_model_loaded(c_long(speaker_id)) + raise OldCoreError + + def assert_core_success(self, result: bool) -> None: + if not result: + raise CoreError( + self.core.last_error_message().decode("utf-8", "backslashreplace") + ) diff --git a/voicevox_engine/synthesis_engine/make_synthesis_engines.py b/voicevox_engine/synthesis_engine/make_synthesis_engines.py new file mode 100644 index 0000000000000000000000000000000000000000..3027516a122c7382d54dfea1ea2b00b6d801023f --- /dev/null +++ b/voicevox_engine/synthesis_engine/make_synthesis_engines.py @@ -0,0 +1,122 @@ +import json +import sys +from pathlib import Path +from typing import Dict, List, Optional + +from ..utility import engine_root, get_save_dir +from .core_wrapper import CoreWrapper, load_runtime_lib +from .synthesis_engine import SynthesisEngine, SynthesisEngineBase + + +def make_synthesis_engines( + use_gpu: bool, + voicelib_dirs: Optional[List[Path]] = None, + voicevox_dir: Optional[Path] = None, + runtime_dirs: Optional[List[Path]] = None, + cpu_num_threads: Optional[int] = None, + enable_mock: bool = True, + load_all_models: bool = False, +) -> Dict[str, SynthesisEngineBase]: + """ + 音声ライブラリをロードして、音声合成エンジンを生成 + + Parameters + ---------- + use_gpu: bool + 音声ライブラリに GPU を使わせるか否か + voicelib_dirs: List[Path], optional, default=None + 音声ライブラリ自体があるディレクトリのリスト + voicevox_dir: Path, optional, default=None + コンパイル済みのvoicevox、またはvoicevox_engineがあるディレクトリ + runtime_dirs: List[Path], optional, default=None + コアで使用するライブラリのあるディレクトリのリスト + None のとき、voicevox_dir、カレントディレクトリになる + cpu_num_threads: int, optional, default=None + 音声ライブラリが、推論に用いるCPUスレッド数を設定する + Noneのとき、ライブラリ側の挙動により論理コア数の半分か、物理コア数が指定される + enable_mock: bool, optional, default=True + コア読み込みに失敗したとき、代わりにmockを使用するかどうか + load_all_models: bool, optional, default=False + 起動時に全てのモデルを読み込むかどうか + """ + if cpu_num_threads == 0 or cpu_num_threads is None: + print( + "Warning: cpu_num_threads is set to 0. " + + "( The library leaves the decision to the synthesis runtime )", + file=sys.stderr, + ) + cpu_num_threads = 0 + + if voicevox_dir is not None: + if voicelib_dirs is not None: + voicelib_dirs.append(voicevox_dir) + else: + voicelib_dirs = [voicevox_dir] + if runtime_dirs is not None: + runtime_dirs.append(voicevox_dir) + else: + runtime_dirs = [voicevox_dir] + else: + root_dir = engine_root() + if voicelib_dirs is None: + voicelib_dirs = [root_dir] + if runtime_dirs is None: + runtime_dirs = [root_dir] + + voicelib_dirs = [p.expanduser() for p in voicelib_dirs] + runtime_dirs = [p.expanduser() for p in runtime_dirs] + + load_runtime_lib(runtime_dirs) + + synthesis_engines = {} + + if not enable_mock: + + def load_core_library(core_dir: Path, suppress_error: bool = False): + """ + 指定されたディレクトリにあるコアを読み込む。 + ユーザーディレクトリの場合は存在しないこともあるので、エラーを抑制すると良い。 + """ + try: + core = CoreWrapper(use_gpu, core_dir, cpu_num_threads, load_all_models) + metas = json.loads(core.metas()) + core_version = metas[0]["version"] + if core_version in synthesis_engines: + print( + "Warning: Core loading is skipped because of version duplication.", + file=sys.stderr, + ) + else: + synthesis_engines[core_version] = SynthesisEngine(core=core) + except Exception: + if not suppress_error: + raise + + for core_dir in voicelib_dirs: + load_core_library(core_dir) + + # ユーザーディレクトリにあるコアを読み込む + user_voicelib_dirs = [] + core_libraries_dir = get_save_dir() / "core_libraries" + core_libraries_dir.mkdir(exist_ok=True) + user_voicelib_dirs.append(core_libraries_dir) + for path in core_libraries_dir.glob("*"): + if not path.is_dir(): + continue + user_voicelib_dirs.append(path) + + for core_dir in user_voicelib_dirs: + load_core_library(core_dir, suppress_error=True) + + else: + # モック追加 + from ..dev.core import metas as mock_metas + from ..dev.core import supported_devices as mock_supported_devices + from ..dev.synthesis_engine import MockSynthesisEngine + + if "0.0.0" not in synthesis_engines: + synthesis_engines["0.0.0"] = MockSynthesisEngine( + speakers=mock_metas(), supported_devices=mock_supported_devices() + ) + + return synthesis_engines diff --git a/voicevox_engine/synthesis_engine/synthesis_engine.py b/voicevox_engine/synthesis_engine/synthesis_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..f617e94a2589e5bb1ce1210af6a24178070b24c7 --- /dev/null +++ b/voicevox_engine/synthesis_engine/synthesis_engine.py @@ -0,0 +1,502 @@ +import threading +from itertools import chain +from typing import List, Optional, Tuple + +import numpy +from scipy.signal import resample + +from ..acoustic_feature_extractor import OjtPhoneme +from ..model import AccentPhrase, AudioQuery, Mora +from .core_wrapper import CoreWrapper, OldCoreError +from .synthesis_engine_base import SynthesisEngineBase + +unvoiced_mora_phoneme_list = ["A", "I", "U", "E", "O", "cl", "pau"] +mora_phoneme_list = ["a", "i", "u", "e", "o", "N"] + unvoiced_mora_phoneme_list + + +# TODO: move mora utility to mora module +def to_flatten_moras(accent_phrases: List[AccentPhrase]) -> List[Mora]: + """ + accent_phrasesに含まれるMora(とpause_moraがあればそれも)を + すべて一つのリストに結合する + Parameters + ---------- + accent_phrases : List[AccentPhrase] + AccentPhraseのリスト + Returns + ------- + moras : List[Mora] + 結合されたMoraのリストを返す + """ + return list( + chain.from_iterable( + accent_phrase.moras + + ( + [accent_phrase.pause_mora] + if accent_phrase.pause_mora is not None + else [] + ) + for accent_phrase in accent_phrases + ) + ) + + +def to_phoneme_data_list(phoneme_str_list: List[str]): + """ + phoneme文字列のリストを、OjtPhonemeクラスのリストに変換する + Parameters + ---------- + phoneme_str_list : List[str] + phoneme文字列のリスト + Returns + ------- + phoneme_list : List[OjtPhoneme] + 変換されたOjtPhonemeクラスのリスト + """ + phoneme_data_list = [ + OjtPhoneme(phoneme=p, start=i, end=i + 1) + for i, p in enumerate(phoneme_str_list) + ] + phoneme_data_list = OjtPhoneme.convert(phoneme_data_list) + return phoneme_data_list + + +def split_mora(phoneme_list: List[OjtPhoneme]): + """ + OjtPhonemeのリストから、 + 母音の位置(vowel_indexes) + 母音の音素列(vowel_phoneme_list) + 子音の音素列(consonant_phoneme_list) + を生成し、返す + Parameters + ---------- + phoneme_list : List[OjtPhoneme] + phonemeクラスのリスト + Returns + ------- + consonant_phoneme_list : List[OjtPhoneme] + 子音の音素列 + vowel_phoneme_list : List[OjtPhoneme] + 母音の音素列 + vowel_indexes : : List[int] + 母音の位置 + """ + vowel_indexes = [ + i for i, p in enumerate(phoneme_list) if p.phoneme in mora_phoneme_list + ] + vowel_phoneme_list = [phoneme_list[i] for i in vowel_indexes] + # postとprevのvowel_indexの差として考えられる値は1か2 + # 理由としてはphoneme_listは、consonant、vowelの組み合わせか、vowel一つの連続であるから + # 1の場合はconsonant(子音)が存在しない=母音のみ(a/i/u/e/o/N/cl/pau)で構成されるモーラ(音)である + # 2の場合はconsonantが存在するモーラである + # なので、2の場合(else)でphonemeを取り出している + consonant_phoneme_list: List[Optional[OjtPhoneme]] = [None] + [ + None if post - prev == 1 else phoneme_list[post - 1] + for prev, post in zip(vowel_indexes[:-1], vowel_indexes[1:]) + ] + return consonant_phoneme_list, vowel_phoneme_list, vowel_indexes + + +def pre_process( + accent_phrases: List[AccentPhrase], +) -> Tuple[List[Mora], List[OjtPhoneme]]: + """ + AccentPhraseモデルのリストを整形し、処理に必要なデータの原型を作り出す + Parameters + ---------- + accent_phrases : List[AccentPhrase] + AccentPhraseモデルのリスト + Returns + ------- + flatten_moras : List[Mora] + AccentPhraseモデルのリスト内に含まれるすべてのMoraをリスト化したものを返す + phoneme_data_list : List[OjtPhoneme] + flatten_morasから取り出したすべてのPhonemeをOjtPhonemeに変換したものを返す + """ + flatten_moras = to_flatten_moras(accent_phrases) + + phoneme_each_mora = [ + ([mora.consonant] if mora.consonant is not None else []) + [mora.vowel] + for mora in flatten_moras + ] + phoneme_str_list = list(chain.from_iterable(phoneme_each_mora)) + phoneme_str_list = ["pau"] + phoneme_str_list + ["pau"] + + phoneme_data_list = to_phoneme_data_list(phoneme_str_list) + + return flatten_moras, phoneme_data_list + + +class SynthesisEngine(SynthesisEngineBase): + def __init__( + self, + core: CoreWrapper, + ): + """ + core.yukarin_s_forward: 音素列から、音素ごとの長さを求める関数 + length: 音素列の長さ + phoneme_list: 音素列 + speaker_id: 話者番号 + return: 音素ごとの長さ + + core.yukarin_sa_forward: モーラごとの音素列とアクセント情報から、モーラごとの音高を求める関数 + length: モーラ列の長さ + vowel_phoneme_list: 母音の音素列 + consonant_phoneme_list: 子音の音素列 + start_accent_list: アクセントの開始位置 + end_accent_list: アクセントの終了位置 + start_accent_phrase_list: アクセント句の開始位置 + end_accent_phrase_list: アクセント句の終了位置 + speaker_id: 話者番号 + return: モーラごとの音高 + + core.decode_forward: フレームごとの音素と音高から波形を求める関数 + length: フレームの長さ + phoneme_size: 音素の種類数 + f0: フレームごとの音高 + phoneme: フレームごとの音素 + speaker_id: 話者番号 + return: 音声波形 + + speakers: coreから取得したspeakersに関するjsonデータの文字列 + + supported_devices: + coreから取得した対応デバイスに関するjsonデータの文字列 + Noneの場合はコアが情報の取得に対応していないため、対応デバイスは不明 + """ + super().__init__() + self.core = core + self._speakers = self.core.metas() + self.mutex = threading.Lock() + try: + self._supported_devices = self.core.supported_devices() + except OldCoreError: + self._supported_devices = None + self.default_sampling_rate = 24000 + + @property + def speakers(self) -> str: + return self._speakers + + @property + def supported_devices(self) -> Optional[str]: + return self._supported_devices + + def initialize_speaker_synthesis(self, speaker_id: int, skip_reinit: bool): + try: + with self.mutex: + # 以下の条件のいずれかを満たす場合, 初期化を実行する + # 1. 引数 skip_reinit が False の場合 + # 2. 話者が初期化されていない場合 + if (not skip_reinit) or (not self.core.is_model_loaded(speaker_id)): + self.core.load_model(speaker_id) + except OldCoreError: + pass # コアが古い場合はどうしようもないので何もしない + + def is_initialized_speaker_synthesis(self, speaker_id: int) -> bool: + try: + return self.core.is_model_loaded(speaker_id) + except OldCoreError: + return True # コアが古い場合はどうしようもないのでTrueを返す + + def replace_phoneme_length( + self, accent_phrases: List[AccentPhrase], speaker_id: int + ) -> List[AccentPhrase]: + """ + accent_phrasesの母音・子音の長さを設定する + Parameters + ---------- + accent_phrases : List[AccentPhrase] + アクセント句モデルのリスト + speaker_id : int + 話者ID + Returns + ------- + accent_phrases : List[AccentPhrase] + 母音・子音の長さが設定されたアクセント句モデルのリスト + """ + # モデルがロードされていない場合はロードする + self.initialize_speaker_synthesis(speaker_id, skip_reinit=True) + # phoneme + # AccentPhraseをすべてMoraおよびOjtPhonemeの形に分解し、処理可能な形にする + flatten_moras, phoneme_data_list = pre_process(accent_phrases) + # OjtPhonemeの形に分解されたもの(phoneme_data_list)から、vowel(母音)の位置を抜き出す + _, _, vowel_indexes_data = split_mora(phoneme_data_list) + + # yukarin_s + # OjtPhonemeのリストからOjtPhonemeのPhoneme ID(OpenJTalkにおける音素のID)のリストを作る + phoneme_list_s = numpy.array( + [p.phoneme_id for p in phoneme_data_list], dtype=numpy.int64 + ) + # Phoneme IDのリスト(phoneme_list_s)をyukarin_s_forwardにかけ、推論器によって適切な音素の長さを割り当てる + with self.mutex: + phoneme_length = self.core.yukarin_s_forward( + length=len(phoneme_list_s), + phoneme_list=phoneme_list_s, + speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1), + ) + + # yukarin_s_forwarderの結果をaccent_phrasesに反映する + # flatten_moras変数に展開された値を変更することでコード量を削減しつつaccent_phrases内のデータを書き換えている + for i, mora in enumerate(flatten_moras): + mora.consonant_length = ( + phoneme_length[vowel_indexes_data[i + 1] - 1] + if mora.consonant is not None + else None + ) + mora.vowel_length = phoneme_length[vowel_indexes_data[i + 1]] + + return accent_phrases + + def replace_mora_pitch( + self, accent_phrases: List[AccentPhrase], speaker_id: int + ) -> List[AccentPhrase]: + """ + accent_phrasesの音高(ピッチ)を設定する + Parameters + ---------- + accent_phrases : List[AccentPhrase] + アクセント句モデルのリスト + speaker_id : int + 話者ID + Returns + ------- + accent_phrases : List[AccentPhrase] + 音高(ピッチ)が設定されたアクセント句モデルのリスト + """ + # モデルがロードされていない場合はロードする + self.initialize_speaker_synthesis(speaker_id, skip_reinit=True) + # numpy.concatenateが空リストだとエラーを返すのでチェック + if len(accent_phrases) == 0: + return [] + + # phoneme + # AccentPhraseをすべてMoraおよびOjtPhonemeの形に分解し、処理可能な形にする + flatten_moras, phoneme_data_list = pre_process(accent_phrases) + + # accent + def _create_one_hot(accent_phrase: AccentPhrase, position: int): + """ + 単位行列(numpy.eye)を応用し、accent_phrase内でone hotな配列(リスト)を作る + 例えば、accent_phraseのmorasの長さが12、positionが1なら + [0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] + morasの長さが同じく12、positionが-1なら + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1] + のような配列を生成する + accent_phraseがpause_moraを含む場合はさらに後ろに0が足される + Parameters + ---------- + accent_phrase : AccentPhrase + アクセント句モデル + position : int + one hotにするindex + Returns + ------- + one_hot : numpy.ndarray + one hotな配列(リスト) + """ + return numpy.r_[ + numpy.eye(len(accent_phrase.moras))[position], + (0 if accent_phrase.pause_mora is not None else []), + ] + + # accent_phrasesから、アクセントの開始位置のリストを作る + start_accent_list = numpy.concatenate( + [ + # accentはプログラミング言語におけるindexのように0始まりではなく1始まりなので、 + # accentが1の場合は0番目を指定している + # accentが1ではない場合、accentはend_accent_listに用いられる + _create_one_hot(accent_phrase, 0 if accent_phrase.accent == 1 else 1) + for accent_phrase in accent_phrases + ] + ) + + # accent_phrasesから、アクセントの終了位置のリストを作る + end_accent_list = numpy.concatenate( + [ + # accentはプログラミング言語におけるindexのように0始まりではなく1始まりなので、1を引いている + _create_one_hot(accent_phrase, accent_phrase.accent - 1) + for accent_phrase in accent_phrases + ] + ) + + # accent_phrasesから、アクセント句の開始位置のリストを作る + # これによって、yukarin_sa_forwarder内でアクセント句を区別できる + start_accent_phrase_list = numpy.concatenate( + [_create_one_hot(accent_phrase, 0) for accent_phrase in accent_phrases] + ) + + # accent_phrasesから、アクセント句の終了位置のリストを作る + end_accent_phrase_list = numpy.concatenate( + [_create_one_hot(accent_phrase, -1) for accent_phrase in accent_phrases] + ) + + # 最初と最後に0を付け加える。これによってpau(前後の無音のためのもの)を付け加えたことになる + start_accent_list = numpy.r_[0, start_accent_list, 0] + end_accent_list = numpy.r_[0, end_accent_list, 0] + start_accent_phrase_list = numpy.r_[0, start_accent_phrase_list, 0] + end_accent_phrase_list = numpy.r_[0, end_accent_phrase_list, 0] + + # アクセント・アクセント句関連のデータをyukarin_sa_forwarderに渡すための最終処理、リスト内のデータをint64に変換する + start_accent_list = numpy.array(start_accent_list, dtype=numpy.int64) + end_accent_list = numpy.array(end_accent_list, dtype=numpy.int64) + start_accent_phrase_list = numpy.array( + start_accent_phrase_list, dtype=numpy.int64 + ) + end_accent_phrase_list = numpy.array(end_accent_phrase_list, dtype=numpy.int64) + + # phonemeに関するデータを取得(変換)する + ( + consonant_phoneme_data_list, + vowel_phoneme_data_list, + _, + ) = split_mora(phoneme_data_list) + + # yukarin_sa + # Phoneme関連のデータをyukarin_sa_forwarderに渡すための最終処理、リスト内のデータをint64に変換する + vowel_phoneme_list = numpy.array( + [p.phoneme_id for p in vowel_phoneme_data_list], dtype=numpy.int64 + ) + consonant_phoneme_list = numpy.array( + [ + p.phoneme_id if p is not None else -1 + for p in consonant_phoneme_data_list + ], + dtype=numpy.int64, + ) + + # 今までに生成された情報をyukarin_sa_forwardにかけ、推論器によってモーラごとに適切な音高(ピッチ)を割り当てる + with self.mutex: + f0_list = self.core.yukarin_sa_forward( + length=vowel_phoneme_list.shape[0], + vowel_phoneme_list=vowel_phoneme_list[numpy.newaxis], + consonant_phoneme_list=consonant_phoneme_list[numpy.newaxis], + start_accent_list=start_accent_list[numpy.newaxis], + end_accent_list=end_accent_list[numpy.newaxis], + start_accent_phrase_list=start_accent_phrase_list[numpy.newaxis], + end_accent_phrase_list=end_accent_phrase_list[numpy.newaxis], + speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1), + )[0] + + # 無声母音を含むMoraに関しては、音高(ピッチ)を0にする + for i, p in enumerate(vowel_phoneme_data_list): + if p.phoneme in unvoiced_mora_phoneme_list: + f0_list[i] = 0 + + # yukarin_sa_forwarderの結果をaccent_phrasesに反映する + # flatten_moras変数に展開された値を変更することでコード量を削減しつつaccent_phrases内のデータを書き換えている + for i, mora in enumerate(flatten_moras): + mora.pitch = f0_list[i + 1] + + return accent_phrases + + def _synthesis_impl(self, query: AudioQuery, speaker_id: int): + """ + 音声合成クエリから音声合成に必要な情報を構成し、実際に音声合成を行う + Parameters + ---------- + query : AudioQuery + 音声合成クエリ + speaker_id : int + 話者ID + Returns + ------- + wave : numpy.ndarray + 音声合成結果 + """ + # モデルがロードされていない場合はロードする + self.initialize_speaker_synthesis(speaker_id, skip_reinit=True) + # phoneme + # AccentPhraseをすべてMoraおよびOjtPhonemeの形に分解し、処理可能な形にする + flatten_moras, phoneme_data_list = pre_process(query.accent_phrases) + + # OjtPhonemeのリストからOjtPhonemeのPhoneme ID(OpenJTalkにおける音素のID)のリストを作る + phoneme_list_s = numpy.array( + [p.phoneme_id for p in phoneme_data_list], dtype=numpy.int64 + ) + + # length + # 音素の長さをリストに展開・結合する。ここには前後の無音時間も含まれる + phoneme_length_list = ( + [query.prePhonemeLength] + + [ + length + for mora in flatten_moras + for length in ( + [mora.consonant_length] if mora.consonant is not None else [] + ) + + [mora.vowel_length] + ] + + [query.postPhonemeLength] + ) + # floatにキャスト + phoneme_length = numpy.array(phoneme_length_list, dtype=numpy.float32) + + # lengthにSpeed Scale(話速)を適用する + phoneme_length /= query.speedScale + + # pitch + # モーラの音高(ピッチ)を展開・結合し、floatにキャストする + f0_list = [0] + [mora.pitch for mora in flatten_moras] + [0] + f0 = numpy.array(f0_list, dtype=numpy.float32) + # 音高(ピッチ)の調節を適用する(2のPitch Scale乗を掛ける) + f0 *= 2**query.pitchScale + + # 有声音素(音高(ピッチ)が0より大きいもの)か否かを抽出する + voiced = f0 > 0 + # 有声音素の音高(ピッチ)の平均値を求める + mean_f0 = f0[voiced].mean() + # 平均値がNaNではないとき、抑揚を適用する + # 抑揚は音高と音高の平均値の差に抑揚を掛けたもの((f0 - mean_f0) * Intonation Scale)に抑揚の平均値(mean_f0)を足したもの + if not numpy.isnan(mean_f0): + f0[voiced] = (f0[voiced] - mean_f0) * query.intonationScale + mean_f0 + + # OjtPhonemeの形に分解された音素リストから、vowel(母音)の位置を抜き出し、numpyのarrayにする + _, _, vowel_indexes_data = split_mora(phoneme_data_list) + vowel_indexes = numpy.array(vowel_indexes_data) + + # forward decode + # 音素の長さにrateを掛け、intにキャストする + rate = 24000 / 256 + phoneme_bin_num = numpy.round(phoneme_length * rate).astype(numpy.int32) + + # Phoneme IDを音素の長さ分繰り返す + phoneme = numpy.repeat(phoneme_list_s, phoneme_bin_num) + # f0を母音と子音の長さの合計分繰り返す + f0 = numpy.repeat( + f0, + [a.sum() for a in numpy.split(phoneme_bin_num, vowel_indexes[:-1] + 1)], + ) + + # phonemeの長さとOjtPhonemeのnum_phoneme(45)分の0で初期化された2次元配列を用意する + array = numpy.zeros((len(phoneme), OjtPhoneme.num_phoneme), dtype=numpy.float32) + # 初期化された2次元配列の各行をone hotにする + array[numpy.arange(len(phoneme)), phoneme] = 1 + phoneme = array + + # 今まで生成された情報をdecode_forwardにかけ、推論器によって音声波形を生成する + with self.mutex: + wave = self.core.decode_forward( + length=phoneme.shape[0], + phoneme_size=phoneme.shape[1], + f0=f0[:, numpy.newaxis], + phoneme=phoneme, + speaker_id=numpy.array(speaker_id, dtype=numpy.int64).reshape(-1), + ) + + # volume: ゲイン適用 + wave *= query.volumeScale + + # 出力サンプリングレートがデフォルト(decode forwarderによるもの、24kHz)でなければ、それを適用する + if query.outputSamplingRate != self.default_sampling_rate: + wave = resample( + wave, + query.outputSamplingRate * len(wave) // self.default_sampling_rate, + ) + + # ステレオ変換 + # 出力設定がステレオなのであれば、ステレオ化する + if query.outputStereo: + wave = numpy.array([wave, wave]).T + + return wave diff --git a/voicevox_engine/synthesis_engine/synthesis_engine_base.py b/voicevox_engine/synthesis_engine/synthesis_engine_base.py new file mode 100644 index 0000000000000000000000000000000000000000..aaf4fc4a10e35b85c794793424a1e1f10698838b --- /dev/null +++ b/voicevox_engine/synthesis_engine/synthesis_engine_base.py @@ -0,0 +1,259 @@ +import copy +from abc import ABCMeta, abstractmethod +from typing import List, Optional + +import numpy as np + +from .. import full_context_label +from ..full_context_label import extract_full_context_label +from ..model import AccentPhrase, AudioQuery, Mora +from ..mora_list import openjtalk_mora2text + + +def mora_to_text(mora: str) -> str: + if mora[-1:] in ["A", "I", "U", "E", "O"]: + # 無声化母音を小文字に + mora = mora[:-1] + mora[-1].lower() + if mora in openjtalk_mora2text: + return openjtalk_mora2text[mora] + else: + return mora + + +def adjust_interrogative_accent_phrases( + accent_phrases: List[AccentPhrase], +) -> List[AccentPhrase]: + """ + enable_interrogative_upspeakが有効になっていて与えられたaccent_phrasesに疑問系のものがあった場合、 + 各accent_phraseの末尾にある疑問系発音用のMoraに対して直前のMoraより少し音を高くすることで疑問文ぽくする + NOTE: リファクタリング時に適切な場所へ移動させること + """ + return [ + AccentPhrase( + moras=adjust_interrogative_moras(accent_phrase), + accent=accent_phrase.accent, + pause_mora=accent_phrase.pause_mora, + is_interrogative=accent_phrase.is_interrogative, + ) + for accent_phrase in accent_phrases + ] + + +def adjust_interrogative_moras(accent_phrase: AccentPhrase) -> List[Mora]: + moras = copy.deepcopy(accent_phrase.moras) + if accent_phrase.is_interrogative and not (len(moras) == 0 or moras[-1].pitch == 0): + interrogative_mora = make_interrogative_mora(moras[-1]) + moras.append(interrogative_mora) + return moras + else: + return moras + + +def make_interrogative_mora(last_mora: Mora) -> Mora: + fix_vowel_length = 0.15 + adjust_pitch = 0.3 + max_pitch = 6.5 + return Mora( + text=openjtalk_mora2text[last_mora.vowel], + consonant=None, + consonant_length=None, + vowel=last_mora.vowel, + vowel_length=fix_vowel_length, + pitch=min(last_mora.pitch + adjust_pitch, max_pitch), + ) + + +def full_context_label_moras_to_moras( + full_context_moras: List[full_context_label.Mora], +) -> List[Mora]: + return [ + Mora( + text=mora_to_text("".join([p.phoneme for p in mora.phonemes])), + consonant=(mora.consonant.phoneme if mora.consonant is not None else None), + consonant_length=0 if mora.consonant is not None else None, + vowel=mora.vowel.phoneme, + vowel_length=0, + pitch=0, + ) + for mora in full_context_moras + ] + + +class SynthesisEngineBase(metaclass=ABCMeta): + # FIXME: jsonではなくModelを返すようにする + @property + @abstractmethod + def speakers(self) -> str: + raise NotImplementedError + + @property + @abstractmethod + def supported_devices(self) -> Optional[str]: + raise NotImplementedError + + def initialize_speaker_synthesis( # noqa: B027 + self, speaker_id: int, skip_reinit: bool + ): + + """ + 指定した話者での音声合成を初期化する。何度も実行可能。 + 未実装の場合は何もしない + Parameters + ---------- + speaker_id : int + 話者ID + skip_reinit : bool + True の場合, 既に初期化済みの話者の再初期化をスキップします + """ + pass + + def is_initialized_speaker_synthesis(self, speaker_id: int) -> bool: + """ + 指定した話者での音声合成が初期化されているかどうかを返す + Parameters + ---------- + speaker_id : int + 話者ID + Returns + ------- + bool + 初期化されているかどうか + """ + return True + + @abstractmethod + def replace_phoneme_length( + self, accent_phrases: List[AccentPhrase], speaker_id: int + ) -> List[AccentPhrase]: + """ + accent_phrasesの母音・子音の長さを設定する + Parameters + ---------- + accent_phrases : List[AccentPhrase] + アクセント句モデルのリスト + speaker_id : int + 話者ID + Returns + ------- + accent_phrases : List[AccentPhrase] + 母音・子音の長さが設定されたアクセント句モデルのリスト + """ + raise NotImplementedError() + + @abstractmethod + def replace_mora_pitch( + self, accent_phrases: List[AccentPhrase], speaker_id: int + ) -> List[AccentPhrase]: + """ + accent_phrasesの音高(ピッチ)を設定する + Parameters + ---------- + accent_phrases : List[AccentPhrase] + アクセント句モデルのリスト + speaker_id : int + 話者ID + Returns + ------- + accent_phrases : List[AccentPhrase] + 音高(ピッチ)が設定されたアクセント句モデルのリスト + """ + raise NotImplementedError() + + def replace_mora_data( + self, + accent_phrases: List[AccentPhrase], + speaker_id: int, + ) -> List[AccentPhrase]: + return self.replace_mora_pitch( + accent_phrases=self.replace_phoneme_length( + accent_phrases=accent_phrases, + speaker_id=speaker_id, + ), + speaker_id=speaker_id, + ) + + def create_accent_phrases(self, text: str, speaker_id: int) -> List[AccentPhrase]: + if len(text.strip()) == 0: + return [] + + utterance = extract_full_context_label(text) + if len(utterance.breath_groups) == 0: + return [] + + accent_phrases = self.replace_mora_data( + accent_phrases=[ + AccentPhrase( + moras=full_context_label_moras_to_moras(accent_phrase.moras), + accent=accent_phrase.accent, + pause_mora=( + Mora( + text="、", + consonant=None, + consonant_length=None, + vowel="pau", + vowel_length=0, + pitch=0, + ) + if ( + i_accent_phrase == len(breath_group.accent_phrases) - 1 + and i_breath_group != len(utterance.breath_groups) - 1 + ) + else None + ), + is_interrogative=accent_phrase.is_interrogative, + ) + for i_breath_group, breath_group in enumerate(utterance.breath_groups) + for i_accent_phrase, accent_phrase in enumerate( + breath_group.accent_phrases + ) + ], + speaker_id=speaker_id, + ) + return accent_phrases + + def synthesis( + self, + query: AudioQuery, + speaker_id: int, + enable_interrogative_upspeak: bool = True, + ) -> np.ndarray: + """ + 音声合成クエリ内の疑問文指定されたMoraを変形した後、 + 継承先における実装`_synthesis_impl`を使い音声合成を行う + Parameters + ---------- + query : AudioQuery + 音声合成クエリ + speaker_id : int + 話者ID + enable_interrogative_upspeak : bool + 疑問系のテキストの語尾を自動調整する機能を有効にするか + Returns + ------- + wave : numpy.ndarray + 音声合成結果 + """ + # モーフィング時などに同一参照のqueryで複数回呼ばれる可能性があるので、元の引数のqueryに破壊的変更を行わない + query = copy.deepcopy(query) + if enable_interrogative_upspeak: + query.accent_phrases = adjust_interrogative_accent_phrases( + query.accent_phrases + ) + return self._synthesis_impl(query, speaker_id) + + @abstractmethod + def _synthesis_impl(self, query: AudioQuery, speaker_id: int) -> np.ndarray: + """ + 音声合成クエリから音声合成に必要な情報を構成し、実際に音声合成を行う + Parameters + ---------- + query : AudioQuery + 音声合成クエリ + speaker_id : int + 話者ID + Returns + ------- + wave : numpy.ndarray + 音声合成結果 + """ + raise NotImplementedError() diff --git a/voicevox_engine/user_dict.py b/voicevox_engine/user_dict.py new file mode 100644 index 0000000000000000000000000000000000000000..819059bc529f8df52411ad94892b12eacc3b270c --- /dev/null +++ b/voicevox_engine/user_dict.py @@ -0,0 +1,298 @@ +import json +import sys +import threading +import traceback +from pathlib import Path +from typing import Dict, List, Optional +from uuid import UUID, uuid4 + +import numpy as np +import pyopenjtalk +from fastapi import HTTPException +from pydantic import conint + +from .model import UserDictWord, WordTypes +from .part_of_speech_data import MAX_PRIORITY, MIN_PRIORITY, part_of_speech_data +from .utility import engine_root, get_save_dir, mutex_wrapper + +root_dir = engine_root() +save_dir = get_save_dir() + +if not save_dir.is_dir(): + save_dir.mkdir(parents=True) + +default_dict_path = root_dir / "default.csv" +user_dict_path = save_dir / "user_dict.json" +compiled_dict_path = save_dir / "user.dic" + + +mutex_user_dict = threading.Lock() +mutex_openjtalk_dict = threading.Lock() + + +@mutex_wrapper(mutex_user_dict) +def write_to_json(user_dict: Dict[str, UserDictWord], user_dict_path: Path): + converted_user_dict = {} + for word_uuid, word in user_dict.items(): + word_dict = word.dict() + word_dict["cost"] = priority2cost( + word_dict["context_id"], word_dict["priority"] + ) + del word_dict["priority"] + converted_user_dict[word_uuid] = word_dict + # 予めjsonに変換できることを確かめる + user_dict_json = json.dumps(converted_user_dict, ensure_ascii=False) + user_dict_path.write_text(user_dict_json, encoding="utf-8") + + +@mutex_wrapper(mutex_openjtalk_dict) +def update_dict( + default_dict_path: Path = default_dict_path, + user_dict_path: Path = user_dict_path, + compiled_dict_path: Path = compiled_dict_path, +): + random_string = uuid4() + tmp_csv_path = save_dir / f".tmp.dict_csv-{random_string}" + tmp_compiled_path = save_dir / f".tmp.dict_compiled-{random_string}" + + try: + # 辞書.csvを作成 + csv_text = "" + if not default_dict_path.is_file(): + print("Warning: Cannot find default dictionary.", file=sys.stderr) + return + default_dict = default_dict_path.read_text(encoding="utf-8") + if default_dict == default_dict.rstrip(): + default_dict += "\n" + csv_text += default_dict + user_dict = read_dict(user_dict_path=user_dict_path) + for word_uuid in user_dict: + word = user_dict[word_uuid] + csv_text += ( + "{surface},{context_id},{context_id},{cost},{part_of_speech}," + + "{part_of_speech_detail_1},{part_of_speech_detail_2}," + + "{part_of_speech_detail_3},{inflectional_type}," + + "{inflectional_form},{stem},{yomi},{pronunciation}," + + "{accent_type}/{mora_count},{accent_associative_rule}\n" + ).format( + surface=word.surface, + context_id=word.context_id, + cost=priority2cost(word.context_id, word.priority), + part_of_speech=word.part_of_speech, + part_of_speech_detail_1=word.part_of_speech_detail_1, + part_of_speech_detail_2=word.part_of_speech_detail_2, + part_of_speech_detail_3=word.part_of_speech_detail_3, + inflectional_type=word.inflectional_type, + inflectional_form=word.inflectional_form, + stem=word.stem, + yomi=word.yomi, + pronunciation=word.pronunciation, + accent_type=word.accent_type, + mora_count=word.mora_count, + accent_associative_rule=word.accent_associative_rule, + ) + tmp_csv_path.write_text(csv_text, encoding="utf-8") + + # 辞書.csvをOpenJTalk用にコンパイル + pyopenjtalk.create_user_dict(str(tmp_csv_path), str(tmp_compiled_path)) + if not tmp_compiled_path.is_file(): + raise RuntimeError("辞書のコンパイル時にエラーが発生しました。") + + # コンパイル済み辞書の置き換え・読み込み + pyopenjtalk.unset_user_dict() + tmp_compiled_path.replace(compiled_dict_path) + if compiled_dict_path.is_file(): + pyopenjtalk.set_user_dict(str(compiled_dict_path.resolve(strict=True))) + + except Exception as e: + print("Error: Failed to update dictionary.", file=sys.stderr) + traceback.print_exc(file=sys.stderr) + raise e + + finally: + # 後処理 + if tmp_csv_path.exists(): + tmp_csv_path.unlink() + if tmp_compiled_path.exists(): + tmp_compiled_path.unlink() + + +@mutex_wrapper(mutex_user_dict) +def read_dict(user_dict_path: Path = user_dict_path) -> Dict[str, UserDictWord]: + if not user_dict_path.is_file(): + return {} + with user_dict_path.open(encoding="utf-8") as f: + result = {} + for word_uuid, word in json.load(f).items(): + # cost2priorityで変換を行う際にcontext_idが必要となるが、 + # 0.12以前の辞書は、context_idがハードコーディングされていたためにユーザー辞書内に保管されていない + # ハードコーディングされていたcontext_idは固有名詞を意味するものなので、固有名詞のcontext_idを補完する + if word.get("context_id") is None: + word["context_id"] = part_of_speech_data[ + WordTypes.PROPER_NOUN + ].context_id + word["priority"] = cost2priority(word["context_id"], word["cost"]) + del word["cost"] + result[str(UUID(word_uuid))] = UserDictWord(**word) + + return result + + +def create_word( + surface: str, + pronunciation: str, + accent_type: int, + word_type: Optional[WordTypes] = None, + priority: Optional[int] = None, +) -> UserDictWord: + if word_type is None: + word_type = WordTypes.PROPER_NOUN + if word_type not in part_of_speech_data.keys(): + raise HTTPException(status_code=422, detail="不明な品詞です") + if priority is None: + priority = 5 + if not MIN_PRIORITY <= priority <= MAX_PRIORITY: + raise HTTPException(status_code=422, detail="優先度の値が無効です") + pos_detail = part_of_speech_data[word_type] + return UserDictWord( + surface=surface, + context_id=pos_detail.context_id, + priority=priority, + part_of_speech=pos_detail.part_of_speech, + part_of_speech_detail_1=pos_detail.part_of_speech_detail_1, + part_of_speech_detail_2=pos_detail.part_of_speech_detail_2, + part_of_speech_detail_3=pos_detail.part_of_speech_detail_3, + inflectional_type="*", + inflectional_form="*", + stem="*", + yomi=pronunciation, + pronunciation=pronunciation, + accent_type=accent_type, + accent_associative_rule="*", + ) + + +def apply_word( + surface: str, + pronunciation: str, + accent_type: int, + word_type: Optional[WordTypes] = None, + priority: Optional[int] = None, + user_dict_path: Path = user_dict_path, + compiled_dict_path: Path = compiled_dict_path, +) -> str: + word = create_word( + surface=surface, + pronunciation=pronunciation, + accent_type=accent_type, + word_type=word_type, + priority=priority, + ) + user_dict = read_dict(user_dict_path=user_dict_path) + word_uuid = str(uuid4()) + user_dict[word_uuid] = word + write_to_json(user_dict, user_dict_path) + update_dict(user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path) + return word_uuid + + +def rewrite_word( + word_uuid: str, + surface: str, + pronunciation: str, + accent_type: int, + word_type: Optional[WordTypes] = None, + priority: Optional[int] = None, + user_dict_path: Path = user_dict_path, + compiled_dict_path: Path = compiled_dict_path, +): + word = create_word( + surface=surface, + pronunciation=pronunciation, + accent_type=accent_type, + word_type=word_type, + priority=priority, + ) + user_dict = read_dict(user_dict_path=user_dict_path) + if word_uuid not in user_dict: + raise HTTPException(status_code=422, detail="UUIDに該当するワードが見つかりませんでした") + user_dict[word_uuid] = word + write_to_json(user_dict, user_dict_path) + update_dict(user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path) + + +def delete_word( + word_uuid: str, + user_dict_path: Path = user_dict_path, + compiled_dict_path: Path = compiled_dict_path, +): + user_dict = read_dict(user_dict_path=user_dict_path) + if word_uuid not in user_dict: + raise HTTPException(status_code=422, detail="IDに該当するワードが見つかりませんでした") + del user_dict[word_uuid] + write_to_json(user_dict, user_dict_path) + update_dict(user_dict_path=user_dict_path, compiled_dict_path=compiled_dict_path) + + +def import_user_dict( + dict_data: Dict[str, UserDictWord], + override: bool = False, + user_dict_path: Path = user_dict_path, + default_dict_path: Path = default_dict_path, + compiled_dict_path: Path = compiled_dict_path, +): + # 念のため型チェックを行う + for word_uuid, word in dict_data.items(): + UUID(word_uuid) + assert type(word) == UserDictWord + for pos_detail in part_of_speech_data.values(): + if word.context_id == pos_detail.context_id: + assert word.part_of_speech == pos_detail.part_of_speech + assert ( + word.part_of_speech_detail_1 == pos_detail.part_of_speech_detail_1 + ) + assert ( + word.part_of_speech_detail_2 == pos_detail.part_of_speech_detail_2 + ) + assert ( + word.part_of_speech_detail_3 == pos_detail.part_of_speech_detail_3 + ) + assert ( + word.accent_associative_rule in pos_detail.accent_associative_rules + ) + break + else: + raise ValueError("対応していない品詞です") + old_dict = read_dict(user_dict_path=user_dict_path) + if override: + new_dict = {**old_dict, **dict_data} + else: + new_dict = {**dict_data, **old_dict} + write_to_json(user_dict=new_dict, user_dict_path=user_dict_path) + update_dict( + default_dict_path=default_dict_path, + user_dict_path=user_dict_path, + compiled_dict_path=compiled_dict_path, + ) + + +def search_cost_candidates(context_id: int) -> List[int]: + for value in part_of_speech_data.values(): + if value.context_id == context_id: + return value.cost_candidates + raise HTTPException(status_code=422, detail="品詞IDが不正です") + + +def cost2priority(context_id: int, cost: conint(ge=-32768, le=32767)) -> int: + cost_candidates = search_cost_candidates(context_id) + # cost_candidatesの中にある値で最も近い値を元にpriorityを返す + # 参考: https://qiita.com/Krypf/items/2eada91c37161d17621d + # この関数とpriority2cost関数によって、辞書ファイルのcostを操作しても最も近いpriorityのcostに上書きされる + return MAX_PRIORITY - np.argmin(np.abs(np.array(cost_candidates) - cost)) + + +def priority2cost( + context_id: int, priority: conint(ge=MIN_PRIORITY, le=MAX_PRIORITY) +) -> int: + cost_candidates = search_cost_candidates(context_id) + return cost_candidates[MAX_PRIORITY - priority] diff --git a/voicevox_engine/utility/__init__.py b/voicevox_engine/utility/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d40fea3e6c22f8bcb960ca12cf626e1f3a40afef --- /dev/null +++ b/voicevox_engine/utility/__init__.py @@ -0,0 +1,20 @@ +from .connect_base64_waves import ( + ConnectBase64WavesException, + connect_base64_waves, + decode_base64_waves, +) +from .core_version_utility import get_latest_core_version, parse_core_version +from .mutex_utility import mutex_wrapper +from .path_utility import delete_file, engine_root, get_save_dir + +__all__ = [ + "ConnectBase64WavesException", + "connect_base64_waves", + "decode_base64_waves", + "get_latest_core_version", + "parse_core_version", + "delete_file", + "engine_root", + "get_save_dir", + "mutex_wrapper", +] diff --git a/voicevox_engine/utility/connect_base64_waves.py b/voicevox_engine/utility/connect_base64_waves.py new file mode 100644 index 0000000000000000000000000000000000000000..37f95240966f9bfed1cfe6e9090f871cea331ef7 --- /dev/null +++ b/voicevox_engine/utility/connect_base64_waves.py @@ -0,0 +1,60 @@ +import base64 +import io +from typing import List, Tuple + +import numpy as np +import soundfile +from scipy.signal import resample + + +class ConnectBase64WavesException(Exception): + def __init__(self, message: str): + self.message = message + + +def decode_base64_waves(waves: List[str]) -> List[Tuple[np.ndarray, int]]: + """ + base64エンコードされた複数のwavデータをデコードする + Parameters + ---------- + waves: list[str] + base64エンコードされたwavデータのリスト + Returns + ------- + waves_nparray_sr: List[Tuple[np.ndarray, int]] + (NumPy配列の音声波形データ, サンプリングレート) 形式のタプルのリスト + """ + if len(waves) == 0: + raise ConnectBase64WavesException("wavファイルが含まれていません") + + waves_nparray_sr = [] + for wave in waves: + try: + wav_bin = base64.standard_b64decode(wave) + except ValueError: + raise ConnectBase64WavesException("base64デコードに失敗しました") + try: + _data = soundfile.read(io.BytesIO(wav_bin)) + except Exception: + raise ConnectBase64WavesException("wavファイルを読み込めませんでした") + waves_nparray_sr.append(_data) + + return waves_nparray_sr + + +def connect_base64_waves(waves: List[str]) -> Tuple[np.ndarray, int]: + waves_nparray_sr = decode_base64_waves(waves) + + max_sampling_rate = max([sr for _, sr in waves_nparray_sr]) + max_channels = max([x.ndim for x, _ in waves_nparray_sr]) + assert 0 < max_channels <= 2 + + waves_nparray_list = [] + for nparray, sr in waves_nparray_sr: + if sr != max_sampling_rate: + nparray = resample(nparray, max_sampling_rate * len(nparray) // sr) + if nparray.ndim < max_channels: + nparray = np.array([nparray, nparray]).T + waves_nparray_list.append(nparray) + + return np.concatenate(waves_nparray_list), max_sampling_rate diff --git a/voicevox_engine/utility/core_version_utility.py b/voicevox_engine/utility/core_version_utility.py new file mode 100644 index 0000000000000000000000000000000000000000..25f2d3a3e7e7ed3a25e52075eb74be08c96451db --- /dev/null +++ b/voicevox_engine/utility/core_version_utility.py @@ -0,0 +1,14 @@ +from typing import Iterable + +from semver.version import Version + + +def parse_core_version(version: str) -> Version: + return Version.parse(version) + + +def get_latest_core_version(versions: Iterable[str]) -> str: + if len(versions) == 0: + raise Exception("versions must be non-empty.") + + return str(max(map(parse_core_version, versions))) diff --git a/voicevox_engine/utility/mutex_utility.py b/voicevox_engine/utility/mutex_utility.py new file mode 100644 index 0000000000000000000000000000000000000000..09d8cb9680f71758018bffe82838a763ca46fe31 --- /dev/null +++ b/voicevox_engine/utility/mutex_utility.py @@ -0,0 +1,15 @@ +import threading + + +def mutex_wrapper(lock: threading.Lock): + def wrap(f): + def func(*args, **kw): + lock.acquire() + try: + return f(*args, **kw) + finally: + lock.release() + + return func + + return wrap diff --git a/voicevox_engine/utility/path_utility.py b/voicevox_engine/utility/path_utility.py new file mode 100644 index 0000000000000000000000000000000000000000..4de943624496c5ac189fd8d668ea230310802389 --- /dev/null +++ b/voicevox_engine/utility/path_utility.py @@ -0,0 +1,51 @@ +import os +import sys +import traceback +from pathlib import Path + +from appdirs import user_data_dir + + +def engine_root() -> Path: + if is_development(): + root_dir = Path(__file__).parents[2] + + # Nuitka/Pyinstallerでビルドされている場合 + else: + root_dir = Path(sys.argv[0]).parent + + return root_dir.resolve(strict=True) + + +def is_development() -> bool: + """ + 開発版かどうか判定する関数 + Nuitka/Pyinstallerでコンパイルされていない場合は開発環境とする。 + """ + # nuitkaビルドをした際はグローバルに__compiled__が含まれる + if "__compiled__" in globals(): + return False + + # pyinstallerでビルドをした際はsys.frozenが設定される + elif getattr(sys, "frozen", False): + return False + + return True + + +def get_save_dir(): + # FIXME: ファイル保存場所をエンジン固有のIDが入ったものにする + # FIXME: Windowsは`voicevox-engine/voicevox-engine`ディレクトリに保存されているので + # `VOICEVOX/voicevox-engine`に変更する + if is_development(): + app_name = "voicevox-engine-dev" + else: + app_name = "voicevox-engine" + return Path(user_data_dir(app_name)) + + +def delete_file(file_path: str) -> None: + try: + os.remove(file_path) + except OSError: + traceback.print_exc()