Documenting pt. 2
Browse files- audio_methods.py +23 -11
audio_methods.py
CHANGED
@@ -2,6 +2,7 @@
|
|
2 |
import tensorflow as tf
|
3 |
import numpy as np
|
4 |
import pandas as pd
|
|
|
5 |
|
6 |
# Audio
|
7 |
import pretty_midi
|
@@ -57,7 +58,7 @@ def midi_to_notes(midi_file: str) -> pd.DataFrame:
|
|
57 |
return notes_df / _SCALING_FACTORS # Scale
|
58 |
|
59 |
|
60 |
-
def display_audio(pm: pretty_midi.PrettyMIDI, seconds=-1):
|
61 |
"""
|
62 |
Display a song in PrettyMIDI format as a display.Audio object.
|
63 |
This method specially comes in useful in a jupyter notebook.
|
@@ -83,21 +84,21 @@ def display_audio(pm: pretty_midi.PrettyMIDI, seconds=-1):
|
|
83 |
|
84 |
# Define function to convert song map to wav
|
85 |
|
86 |
-
def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100):
|
87 |
"""
|
88 |
-
Convert "song map" to midi file (reverse process with respect to
|
|
|
89 |
|
90 |
Parameters:
|
91 |
-
song_map (pd.DataFrame):
|
92 |
-
|
|
|
|
|
93 |
|
94 |
Returns:
|
95 |
-
|
96 |
-
composed of pitch, duration and step.
|
97 |
"""
|
98 |
|
99 |
-
#
|
100 |
-
|
101 |
contracted_map = tf.squeeze(song_map)
|
102 |
song_map_T = contracted_map.numpy().T
|
103 |
notes = pd.DataFrame(song_map_T, columns=["pitch", "step", "duration"]).mul(_SCALING_FACTORS, axis=1)
|
@@ -122,10 +123,21 @@ def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100):
|
|
122 |
prev_start = start
|
123 |
|
124 |
pm.instruments.append(instrument)
|
125 |
-
|
|
|
126 |
return pm
|
127 |
|
128 |
-
def generate_and_display(out_file, model, z_sample=None, velocity=100, seconds=120):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
129 |
song_map = model.generate(z_sample)
|
130 |
wav = map_to_wav(song_map, out_file, velocity)
|
131 |
|
|
|
2 |
import tensorflow as tf
|
3 |
import numpy as np
|
4 |
import pandas as pd
|
5 |
+
import VAE from model
|
6 |
|
7 |
# Audio
|
8 |
import pretty_midi
|
|
|
58 |
return notes_df / _SCALING_FACTORS # Scale
|
59 |
|
60 |
|
61 |
+
def display_audio(pm: pretty_midi.PrettyMIDI, seconds=-1) -> display.Audio:
|
62 |
"""
|
63 |
Display a song in PrettyMIDI format as a display.Audio object.
|
64 |
This method specially comes in useful in a jupyter notebook.
|
|
|
84 |
|
85 |
# Define function to convert song map to wav
|
86 |
|
87 |
+
def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pretty_midi.PrettyMIDI:
|
88 |
"""
|
89 |
+
Convert "song map" to midi file (reverse process with respect to
|
90 |
+
midi_to_notes) and (optionally) save it, generating a PrettyMidi object in the process.
|
91 |
|
92 |
Parameters:
|
93 |
+
song_map (pd.DataFrame): 3xN matrix where each column is a note, composed of
|
94 |
+
pitch, duration and step.
|
95 |
+
out_file (str): Path or file to write .mid file to. If null, no saving is done.
|
96 |
+
velocity: Note loudness, i. e. the hardness a piano key is struck with.
|
97 |
|
98 |
Returns:
|
99 |
+
pretty_midi.PrettyMIDI: PrettyMIDI object containing the song's representation.
|
|
|
100 |
"""
|
101 |
|
|
|
|
|
102 |
contracted_map = tf.squeeze(song_map)
|
103 |
song_map_T = contracted_map.numpy().T
|
104 |
notes = pd.DataFrame(song_map_T, columns=["pitch", "step", "duration"]).mul(_SCALING_FACTORS, axis=1)
|
|
|
123 |
prev_start = start
|
124 |
|
125 |
pm.instruments.append(instrument)
|
126 |
+
if (out_file):
|
127 |
+
pm.write(out_file)
|
128 |
return pm
|
129 |
|
130 |
+
def generate_and_display(out_file:str, model: VAE, out_file: str=null, z_sample: tf.Tensor=None, velocity: int=100, seconds: int=120) -> display.Audio:
|
131 |
+
"""
|
132 |
+
Generate a song, (optionally) save it and display it [CONTINUE]
|
133 |
+
|
134 |
+
Parameters:
|
135 |
+
|
136 |
+
|
137 |
+
Returns:
|
138 |
+
|
139 |
+
"""
|
140 |
+
|
141 |
song_map = model.generate(z_sample)
|
142 |
wav = map_to_wav(song_map, out_file, velocity)
|
143 |
|