Add application file
Browse files- app.py +38 -0
- requirements.txt +5 -0
app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import gradio as gr
|
3 |
+
from bark import SAMPLE_RATE, generate_audio, preload_models
|
4 |
+
|
5 |
+
DEBUG_MODE = False
|
6 |
+
|
7 |
+
if not DEBUG_MODE:
|
8 |
+
_ = preload_models()
|
9 |
+
|
10 |
+
default_text = "Hello, my name is Suno. And, uh — and I like pizza. [laughs]\nBut I also have other interests such as playing tic tac toe."
|
11 |
+
|
12 |
+
def gen_tts(text, history_prompt, temp_semantic, temp_waveform):
|
13 |
+
if history_prompt == "Unconditional":
|
14 |
+
history_prompt = None
|
15 |
+
else:
|
16 |
+
history_prompt = history_prompt.lower().replace(" ", "_")
|
17 |
+
if DEBUG_MODE:
|
18 |
+
audio_arr = np.zeros(SAMPLE_RATE)
|
19 |
+
else:
|
20 |
+
audio_arr = generate_audio(text, history_prompt=history_prompt, text_temp=temp_semantic, waveform_temp=temp_waveform)
|
21 |
+
return (SAMPLE_RATE, audio_arr)
|
22 |
+
|
23 |
+
iface = gr.Interface(
|
24 |
+
title="<div style='text-align:left'>🐶 Bark</div>",
|
25 |
+
description="Bark is a universal text-to-audio model created by [Suno](www.suno.ai), with code publicly available [here](https://github.com/suno-ai/bark). Bark can generate highly realistic, multilingual speech as well as other audio - including music, background noise and simple sound effects. This demo should be used for research purposes only. Commercial use is strictly prohibited. The model output is not censored and the authors do not endorse the opinions in the generated content. Use at your own risk.",
|
26 |
+
fn=gen_tts,
|
27 |
+
inputs=[
|
28 |
+
gr.Textbox(label="Input Text", lines=3, value=default_text),
|
29 |
+
gr.Dropdown(
|
30 |
+
["Unconditional"] + [f"Speech {n}" for n in range(8)] + [f"Music {n}" for n in range(6)],
|
31 |
+
value="None", label="Acoustic Prompt", info="This choice primes the model on how to condition the generated audio."
|
32 |
+
),
|
33 |
+
gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Temp 1", info="Gen. temperature of semantic tokens. (lower is more conservative, higher is more diverse)"),
|
34 |
+
gr.Slider(minimum=0, maximum=1, step=0.01, value=0.7, label="Temp 2", info="Gen. temperature of waveform tokens. (lower is more conservative, higher is more diverse)"),
|
35 |
+
],
|
36 |
+
outputs="audio",
|
37 |
+
)
|
38 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
git+https://github.com/suno-ai/bark.git
|
2 |
+
https://download.pytorch.org/whl/nightly/pytorch_triton-2.1.0%2B46672772b4-cp38-cp38-linux_x86_64.whl
|
3 |
+
https://download.pytorch.org/whl/nightly/cu117/torch-2.1.0.dev20230413%2Bcu117-cp38-cp38-linux_x86_64.whl
|
4 |
+
https://download.pytorch.org/whl/nightly/cu117/torchvision-0.16.0.dev20230413%2Bcu117-cp38-cp38-linux_x86_64.whl
|
5 |
+
https://download.pytorch.org/whl/nightly/cu117/torchaudio-2.1.0.dev20230413%2Bcu117-cp38-cp38-linux_x86_64.whl
|