Spaces:
Running
on
A10G
Running
on
A10G
Update app.py
Browse files
app.py
CHANGED
@@ -33,7 +33,7 @@ def initialize_model(config, ckpt):
|
|
33 |
|
34 |
return sampler
|
35 |
|
36 |
-
sampler = initialize_model('configs/text_to_audio/txt2audio_args.yaml', 'useful_ckpts/
|
37 |
vocoder = VocoderBigVGAN('vocoder/logs/bigvnat',device=device)
|
38 |
clap_model = CLAPWrapper('useful_ckpts/CLAP/CLAP_weights_2022.pth','useful_ckpts/CLAP/config.yml',use_cuda=torch.cuda.is_available())
|
39 |
|
@@ -114,7 +114,7 @@ with gr.Blocks() as demo:
|
|
114 |
ddim_steps = gr.Slider(label="Steps", minimum=1,
|
115 |
maximum=150, value=100, step=1)
|
116 |
scale = gr.Slider(
|
117 |
-
label="Guidance Scale:(Large => more relevant to text but the quality may drop)", minimum=0.1, maximum=
|
118 |
)
|
119 |
seed = gr.Slider(
|
120 |
label="Seed:Change this value (any integer number) will lead to a different generation result.",
|
@@ -136,8 +136,8 @@ with gr.Blocks() as demo:
|
|
136 |
with gr.Row():
|
137 |
with gr.Column():
|
138 |
gr.Examples(
|
139 |
-
examples = [['a dog barking and a bird chirping',100,3,
|
140 |
-
['
|
141 |
inputs = [prompt,ddim_steps, num_samples, scale, seed],
|
142 |
outputs = [outaudio]
|
143 |
)
|
|
|
33 |
|
34 |
return sampler
|
35 |
|
36 |
+
sampler = initialize_model('configs/text_to_audio/txt2audio_args.yaml', 'useful_ckpts/maa1_full.ckpt')
|
37 |
vocoder = VocoderBigVGAN('vocoder/logs/bigvnat',device=device)
|
38 |
clap_model = CLAPWrapper('useful_ckpts/CLAP/CLAP_weights_2022.pth','useful_ckpts/CLAP/config.yml',use_cuda=torch.cuda.is_available())
|
39 |
|
|
|
114 |
ddim_steps = gr.Slider(label="Steps", minimum=1,
|
115 |
maximum=150, value=100, step=1)
|
116 |
scale = gr.Slider(
|
117 |
+
label="Guidance Scale:(Large => more relevant to text but the quality may drop)", minimum=0.1, maximum=8.0, value=3.0, step=0.1
|
118 |
)
|
119 |
seed = gr.Slider(
|
120 |
label="Seed:Change this value (any integer number) will lead to a different generation result.",
|
|
|
136 |
with gr.Row():
|
137 |
with gr.Column():
|
138 |
gr.Examples(
|
139 |
+
examples = [['a dog barking and a bird chirping',100,3,3,55],['Pigeons peck, coo, and flap their wings before a man speaks',100,3,3,55],
|
140 |
+
['music of violin and piano',100,3,2,88],['wind thunder and rain falling',100,3,3,55],['music made by drum kit',100,3,3,55]],
|
141 |
inputs = [prompt,ddim_steps, num_samples, scale, seed],
|
142 |
outputs = [outaudio]
|
143 |
)
|