asigalov61's picture
Update app.py
b55d194 verified
raw
history blame
7.47 kB
# https://huggingface.co/spaces/asigalov61/Melody2Song-Seq2Seq-Music-Transformer
import os
import time as reqtime
import datetime
from pytz import timezone
import torch
import spaces
import gradio as gr
from x_transformer_1_23_2 import *
import random
import tqdm
from midi_to_colab_audio import midi_to_colab_audio
import TMIDIX
import matplotlib.pyplot as plt
in_space = os.getenv("SYSTEM") == "spaces"
# =================================================================================================
@spaces.GPU
def GenerateSong(input_melody_seed_number):
print('=' * 70)
print('Req start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
start_time = reqtime.time()
print('Loading model...')
SEQ_LEN = 2560
PAD_IDX = 514
DEVICE = 'cuda' # 'cuda'
# instantiate the model
model = TransformerWrapper(
num_tokens = PAD_IDX+1,
max_seq_len = SEQ_LEN,
attn_layers = Decoder(dim = 1024, depth = 24, heads = 16, attn_flash = True)
)
model = AutoregressiveWrapper(model, ignore_index = PAD_IDX)
model.to(DEVICE)
print('=' * 70)
print('Loading model checkpoint...')
model.load_state_dict(
torch.load('Melody2Song_Seq2Seq_Music_Transformer_Trained_Model_28482_steps_0.719_loss_0.7865_acc.pth',
map_location=DEVICE))
print('=' * 70)
model.eval()
if DEVICE == 'cpu':
dtype = torch.bfloat16
else:
dtype = torch.bfloat16
ctx = torch.amp.autocast(device_type=DEVICE, dtype=dtype)
print('Done!')
print('=' * 70)
print('Input melody seed number:', input_melody_seed_number)
print('-' * 70)
#==================================================================
print('=' * 70)
print('Sample output events', melody_chords[:5])
print('=' * 70)
print('Generating...')
output = []
max_chords_limit = 8
temperature=0.9
num_memory_tokens=4096
output = []
idx = 0
for c in chords[:input_num_tokens]:
output.append(c)
if input_conditioning_type == 'Chords-Times' or input_conditioning_type == 'Chords-Times-Durations':
output.append(times[idx])
if input_conditioning_type == 'Chords-Times-Durations':
output.append(durs[idx])
x = torch.tensor([output] * 1, dtype=torch.long, device='cuda')
o = 0
ncount = 0
while o < 384 and ncount < max_chords_limit:
with ctx:
out = model.generate(x[-num_memory_tokens:],
1,
temperature=temperature,
return_prime=False,
verbose=False)
o = out.tolist()[0][0]
if 256 <= o < 384:
ncount += 1
if o < 384:
x = torch.cat((x, out), 1)
outy = x.tolist()[0][len(output):]
output.extend(outy)
idx += 1
if idx == len(chords[:input_num_tokens])-1:
break
print('=' * 70)
print('Done!')
print('=' * 70)
#===============================================================================
print('Rendering results...')
print('=' * 70)
print('Sample INTs', output[:12])
print('=' * 70)
out1 = output
if len(out1) != 0:
song = out1
song_f = []
time = 0
dur = 0
vel = 90
pitch = 0
channel = 0
patches = [0] * 16
channel = 0
for ss in song:
if 0 <= ss < 128:
time += ss * 32
if 128 <= ss < 256:
dur = (ss-128) * 32
if 256 <= ss < 384:
pitch = (ss-256)
vel = max(40, pitch)
song_f.append(['note', time, dur, channel, pitch, vel, 0])
fn1 = "Chords-Progressions-Transformer-Composition"
detailed_stats = TMIDIX.Tegridy_ms_SONG_to_MIDI_Converter(song_f,
output_signature = 'Chords Progressions Transformer',
output_file_name = fn1,
track_name='Project Los Angeles',
list_of_MIDI_patches=patches
)
new_fn = fn1+'.mid'
audio = midi_to_colab_audio(new_fn,
soundfont_path=soundfont,
sample_rate=16000,
volume_scale=10,
output_for_gradio=True
)
print('Done!')
print('=' * 70)
#========================================================
output_midi_title = str(fn1)
output_midi_summary = str(song_f[:3])
output_midi = str(new_fn)
output_audio = (16000, audio)
output_plot = TMIDIX.plot_ms_SONG(song_f, plot_title=output_midi, return_plt=True)
print('Output MIDI file name:', output_midi)
print('Output MIDI title:', output_midi_title)
print('Output MIDI summary:', '')
print('=' * 70)
#========================================================
print('-' * 70)
print('Req end time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('-' * 70)
print('Req execution time:', (reqtime.time() - start_time), 'sec')
return output_midi_title, output_midi_summary, output_midi, output_audio, output_plot
# =================================================================================================
if __name__ == "__main__":
PDT = timezone('US/Pacific')
print('=' * 70)
print('App start time: {:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now(PDT)))
print('=' * 70)
soundfont = "SGM-v2.01-YamahaGrand-Guit-Bass-v2.7.sf2"
app = gr.Blocks()
with app:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Melody2Song Seq2Seq Music Transformer</h1>")
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>Generate unique songs from melodies with se2seq music transformer</h1>")
gr.Markdown(
"![Visitors](https://api.visitorbadge.io/api/visitors?path=asigalov61.Melody2Song-Seq2Seq-Music-Transformer&style=flat)\n\n")
input_melody_seed_number = gr.Slider(0, 200000, value=0, step=1, label="Select seed melody number")
run_btn = gr.Button("generate", variant="primary")
gr.Markdown("## Generation results")
output_midi_title = gr.Textbox(label="Output MIDI title")
output_midi_summary = gr.Textbox(label="Output MIDI summary")
output_audio = gr.Audio(label="Output MIDI audio", format="wav", elem_id="midi_audio")
output_plot = gr.Plot(label="Output MIDI score plot")
output_midi = gr.File(label="Output MIDI file", file_types=[".mid"])
run_event = run_btn.click(GenerateSong, [input_melody_seed_number],
[output_midi_title, output_midi_summary, output_midi, output_audio, output_plot])
app.queue().launch()