chuks-cmu commited on
Commit
c7f2982
1 Parent(s): 4328a67

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torchaudio
4
+ from einops import rearrange
5
+ from stable_audio_tools import get_pretrained_model
6
+ from stable_audio_tools.inference.generation import generate_diffusion_cond
7
+
8
+ def gen_music(description):
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+
11
+ st.title("Generate Music with Stability Audio!!")
12
+
13
+ # Download model
14
+ model, model_config = get_pretrained_model("stabilityai/stable-audio-open-1.0")
15
+ sample_rate = model_config["sample_rate"]
16
+ sample_size = model_config["sample_size"]
17
+
18
+ model = model.to(device)
19
+
20
+ # Set up text and timing conditioning
21
+ conditioning = [{
22
+ "prompt": f"{description.placeholder}",
23
+ }]
24
+
25
+ # Generate stereo audio
26
+ output = generate_diffusion_cond(
27
+ model,
28
+ conditioning=conditioning,
29
+ sample_size=sample_size,
30
+ device=device
31
+ )
32
+
33
+ # Rearrange audio batch to a single sequence
34
+ output = rearrange(output, "b d n -> d (b n)")
35
+
36
+ # Peak normalize, clip, convert to int16, and save to file
37
+ output = output.to(torch.float32).div(torch.max(torch.abs(output))).clamp(-1, 1).mul(32767).to(torch.int16).cpu()
38
+ torchaudio.save("output.wav", output, sample_rate)
39
+
40
+ return "output.wav"
41
+
42
+
43
+ # Define a interface Gradio
44
+ description = gr.Textbox(label="Description", placeholder="128 BPM tech house drum loop")
45
+ output_path = gr.Audio(label="Generated Music", type="filepath")
46
+
47
+ gr.Interface(
48
+ fn=gen_music,
49
+ inputs=[description],
50
+ outputs=output_path,
51
+ title="StableAudio Music Generation Demo",
52
+ ).launch()