thecollabagepatch commited on
Commit
dd93602
1 Parent(s): d700110

getting spicy now

Browse files
Files changed (1) hide show
  1. app.py +13 -4
app.py CHANGED
@@ -19,8 +19,8 @@ def rms_normalize(y, target_rms=0.05):
19
 
20
  def preprocess_audio(waveform):
21
  waveform_np = waveform.cpu().squeeze().numpy() # Move to CPU before converting to NumPy
22
- processed_waveform_np = rms_normalize(peak_normalize(waveform_np))
23
- return torch.from_numpy(processed_waveform_np).unsqueeze(0).to(device)
24
 
25
  def create_slices(song, sr, slice_duration, bpm, num_slices=5):
26
  song_length = song.shape[-1] / sr
@@ -113,7 +113,8 @@ def generate_music(seed, use_chords, chord_progression, prompt_duration, musicge
113
  slices = create_slices(song, sr, 35, bpm, num_slices=5)
114
 
115
  # Load the model
116
- model_continue = MusicGen.get_pretrained(musicgen_model)
 
117
 
118
  # Setting generation parameters
119
  model_continue.set_generation_params(
@@ -205,7 +206,15 @@ with gr.Blocks() as iface:
205
  use_chords = gr.Checkbox(label="Control Chord Progression", value=False)
206
  chord_progression = gr.Textbox(label="Chord Progression (e.g., Am CM Dm E7 Am)", visible=True)
207
  prompt_duration = gr.Dropdown(label="Prompt Duration (seconds)", choices=list(range(1, 11)), value=7)
208
- musicgen_model = gr.Textbox(label="MusicGen Model", value="thepatch/vanya_ai_dnb_0.1")
 
 
 
 
 
 
 
 
209
  num_iterations = gr.Slider(label="Number of Iterations", minimum=1, maximum=10, step=1, value=3)
210
  bpm = gr.Slider(label="BPM", minimum=60, maximum=200, step=1, value=140)
211
  generate_button = gr.Button("Generate Music")
 
19
 
20
  def preprocess_audio(waveform):
21
  waveform_np = waveform.cpu().squeeze().numpy() # Move to CPU before converting to NumPy
22
+ # processed_waveform_np = rms_normalize(peak_normalize(waveform_np))
23
+ return torch.from_numpy(waveform_np).unsqueeze(0).to(device)
24
 
25
  def create_slices(song, sr, slice_duration, bpm, num_slices=5):
26
  song_length = song.shape[-1] / sr
 
113
  slices = create_slices(song, sr, 35, bpm, num_slices=5)
114
 
115
  # Load the model
116
+ model_name = musicgen_model.split(" ")[0]
117
+ model_continue = MusicGen.get_pretrained(model_name)
118
 
119
  # Setting generation parameters
120
  model_continue.set_generation_params(
 
206
  use_chords = gr.Checkbox(label="Control Chord Progression", value=False)
207
  chord_progression = gr.Textbox(label="Chord Progression (e.g., Am CM Dm E7 Am)", visible=True)
208
  prompt_duration = gr.Dropdown(label="Prompt Duration (seconds)", choices=list(range(1, 11)), value=7)
209
+ musicgen_models = [
210
+ "thepatch/vanya_ai_dnb_0.1 (small)",
211
+ "thepatch/budots_remix (small)",
212
+ "thepatch/PhonkV2 (small)",
213
+ "thepatch/bleeps-medium (medium)",
214
+ "thepatch/hoenn_lofi (large)"
215
+ ]
216
+
217
+ musicgen_model = gr.Dropdown(label="MusicGen Model", choices=musicgen_models, value=musicgen_models[0])
218
  num_iterations = gr.Slider(label="Number of Iterations", minimum=1, maximum=10, step=1, value=3)
219
  bpm = gr.Slider(label="BPM", minimum=60, maximum=200, step=1, value=140)
220
  generate_button = gr.Button("Generate Music")