thecollabagepatch commited on
Commit
140426b
1 Parent(s): 2c5b1fe
Files changed (1) hide show
  1. app.py +15 -16
app.py CHANGED
@@ -118,12 +118,11 @@ def generate_midi(seed, use_chords, chord_progression, bpm):
118
  os.remove(midi_filename)
119
 
120
  sample_rate = 44100 # Assuming fixed sample rate from fluidsynth command
121
- return (wav_filename, sample_rate)
122
 
123
  @spaces.GPU(duration=120)
124
- def generate_music(midi_audio_info, prompt_duration, musicgen_model, num_iterations, bpm):
125
- wav_filename, sample_rate = midi_audio_info
126
- # Correctly use wav_filename to load the audio
127
  song, sr = torchaudio.load(wav_filename)
128
  song = song.to(device)
129
  # Use the user-provided BPM value for duration calculation
@@ -219,29 +218,29 @@ with gr.Blocks() as iface:
219
 
220
  with gr.Row():
221
  with gr.Column():
222
- seed = gr.Textbox(label="seed (leave blank for random)", value="")
223
- use_chords = gr.Checkbox(label="control chord progression", value=False)
224
- chord_progression = gr.Textbox(label="chord progression (e.g., Am CM Dm E7 Am)", visible=True)
225
- bpm = gr.Slider(label="BPM", minimum=60, maximum=200, step=1, value=110)
 
226
  generate_midi_button = gr.Button("Generate MIDI")
227
- midi_audio = gr.Audio(label="Generated MIDI Audio")
228
 
229
  with gr.Column():
230
- prompt_duration = gr.Dropdown(label="prompt duration (seconds)", choices=list(range(1, 11)), value=7)
231
- musicgen_models = [
 
232
  "thepatch/vanya_ai_dnb_0.1 (small)",
233
  "thepatch/budots_remix (small)",
234
  "thepatch/PhonkV2 (small)",
235
  "thepatch/bleeps-medium (medium)",
236
  "thepatch/hoenn_lofi (large)"
237
- ]
238
- musicgen_model = gr.Dropdown(label="musicGen model", choices=musicgen_models, value=musicgen_models[0])
239
- num_iterations = gr.Slider(label="number of iterations", minimum=1, maximum=3, step=1, value=3)
240
  generate_music_button = gr.Button("Generate Music")
241
  output_audio = gr.Audio(label="Generated Music")
242
 
243
-
244
- generate_midi_button.click(generate_midi, inputs=[seed, use_chords, chord_progression, bpm], outputs=[midi_audio])
245
  generate_music_button.click(generate_music, inputs=[midi_audio, prompt_duration, musicgen_model, num_iterations, bpm], outputs=output_audio)
246
 
247
  iface.launch()
 
118
  os.remove(midi_filename)
119
 
120
  sample_rate = 44100 # Assuming fixed sample rate from fluidsynth command
121
+ return wav_filename
122
 
123
  @spaces.GPU(duration=120)
124
+ def generate_music(wav_filename, prompt_duration, musicgen_model, num_iterations, bpm):
125
+ # Load the audio from the passed file path
 
126
  song, sr = torchaudio.load(wav_filename)
127
  song = song.to(device)
128
  # Use the user-provided BPM value for duration calculation
 
218
 
219
  with gr.Row():
220
  with gr.Column():
221
+ # Input components for MIDI generation
222
+ seed = gr.Textbox(label="Seed (leave blank for random)", value="")
223
+ use_chords = gr.Checkbox(label="Control Chord Progression", value=False)
224
+ chord_progression = gr.Textbox(label="Chord Progression (e.g., Am CM Dm E7 Am)", visible=False)
225
+ bpm = gr.Slider(label="BPM", minimum=60, maximum=200, step=1, value=120)
226
  generate_midi_button = gr.Button("Generate MIDI")
227
+ midi_audio = gr.Audio(label="Generated MIDI Audio", source="upload", type="filepath")
228
 
229
  with gr.Column():
230
+ # Input components for music generation based on MIDI
231
+ prompt_duration = gr.Dropdown(label="Prompt Duration (seconds)", choices=list(range(1, 11)), value=5)
232
+ musicgen_model = gr.Dropdown(label="MusicGen Model", choices=[
233
  "thepatch/vanya_ai_dnb_0.1 (small)",
234
  "thepatch/budots_remix (small)",
235
  "thepatch/PhonkV2 (small)",
236
  "thepatch/bleeps-medium (medium)",
237
  "thepatch/hoenn_lofi (large)"
238
+ ], value="thepatch/vanya_ai_dnb_0.1 (small)")
239
+ num_iterations = gr.Slider(label="Number of Iterations", minimum=1, maximum=10, step=1, value=3)
 
240
  generate_music_button = gr.Button("Generate Music")
241
  output_audio = gr.Audio(label="Generated Music")
242
 
243
+ generate_midi_button.click(generate_midi, inputs=[seed, use_chords, chord_progression, bpm], outputs=midi_audio)
 
244
  generate_music_button.click(generate_music, inputs=[midi_audio, prompt_duration, musicgen_model, num_iterations, bpm], outputs=output_audio)
245
 
246
  iface.launch()