thecollabagepatch commited on
Commit
717ff8a
1 Parent(s): 0265de2

multiple robots trying here

Browse files
Files changed (1) hide show
  1. app.py +8 -18
app.py CHANGED
@@ -117,27 +117,19 @@ def generate_midi(seed, use_chords, chord_progression, bpm):
117
  # Clean up temporary MIDI file
118
  os.remove(midi_filename)
119
 
120
- # Load the generated audio
121
- song, sr = torchaudio.load(wav_filename)
122
-
123
- # Clean up temporary MIDI file
124
- os.remove(midi_filename)
125
- os.remove(wav_filename)
126
-
127
- return song.numpy(), sr
128
 
129
  @spaces.GPU(duration=120)
130
- def generate_music(midi_data, prompt_duration, musicgen_model, num_iterations, bpm):
131
- audio_data, sample_rate = midi_data
132
-
133
- # Convert the audio data to a PyTorch tensor
134
- song = torch.from_numpy(audio_data).to(device)
135
 
136
  # Use the user-provided BPM value for duration calculation
137
  duration = calculate_duration(bpm)
138
 
139
  # Create slices from the song using the user-provided BPM value
140
- slices = create_slices(song, sample_rate, 35, bpm, num_slices=5)
141
 
142
  # Load the model
143
  model_name = musicgen_model.split(" ")[0]
@@ -160,10 +152,10 @@ def generate_music(midi_data, prompt_duration, musicgen_model, num_iterations, b
160
 
161
  print(f"Running iteration {i + 1} using slice {slice_idx}...")
162
 
163
- prompt_waveform = slices[slice_idx][..., :int(prompt_duration * sample_rate)]
164
  prompt_waveform = preprocess_audio(prompt_waveform)
165
 
166
- output = model_continue.generate_continuation(prompt_waveform, prompt_sample_rate=sample_rate, progress=True)
167
  output = output.cpu() # Move the output tensor back to CPU
168
 
169
  # Make sure the output tensor has at most 2 dimensions
@@ -190,8 +182,6 @@ def generate_music(midi_data, prompt_duration, musicgen_model, num_iterations, b
190
 
191
  return combined_audio_filename
192
 
193
-
194
-
195
  # Define the expandable sections
196
  musiclang_blurb = """
197
  ## musiclang
 
117
  # Clean up temporary MIDI file
118
  os.remove(midi_filename)
119
 
120
+ return wav_filename
 
 
 
 
 
 
 
121
 
122
  @spaces.GPU(duration=120)
123
+ def generate_music(midi_audio_path, prompt_duration, musicgen_model, num_iterations, bpm):
124
+ # Load the generated audio
125
+ song, sr = torchaudio.load(midi_audio_path)
126
+ song = song.to(device)
 
127
 
128
  # Use the user-provided BPM value for duration calculation
129
  duration = calculate_duration(bpm)
130
 
131
  # Create slices from the song using the user-provided BPM value
132
+ slices = create_slices(song, sr, 35, bpm, num_slices=5)
133
 
134
  # Load the model
135
  model_name = musicgen_model.split(" ")[0]
 
152
 
153
  print(f"Running iteration {i + 1} using slice {slice_idx}...")
154
 
155
+ prompt_waveform = slices[slice_idx][..., :int(prompt_duration * sr)]
156
  prompt_waveform = preprocess_audio(prompt_waveform)
157
 
158
+ output = model_continue.generate_continuation(prompt_waveform, prompt_sample_rate=sr, progress=True)
159
  output = output.cpu() # Move the output tensor back to CPU
160
 
161
  # Make sure the output tensor has at most 2 dimensions
 
182
 
183
  return combined_audio_filename
184
 
 
 
185
  # Define the expandable sections
186
  musiclang_blurb = """
187
  ## musiclang