Documenting pt. 3
Browse files- audio_methods.py +16 -8
audio_methods.py
CHANGED
@@ -80,9 +80,7 @@ def display_audio(pm: pretty_midi.PrettyMIDI, seconds=-1) -> display.Audio:
|
|
80 |
waveform_short = waveform[:seconds*_SAMPLING_RATE]
|
81 |
|
82 |
return display.Audio(waveform_short, rate=_SAMPLING_RATE)
|
83 |
-
|
84 |
-
|
85 |
-
# Define function to convert song map to wav
|
86 |
|
87 |
def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pretty_midi.PrettyMIDI:
|
88 |
"""
|
@@ -92,7 +90,7 @@ def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pret
|
|
92 |
Parameters:
|
93 |
song_map (pd.DataFrame): 3xN matrix where each column is a note, composed of
|
94 |
pitch, duration and step.
|
95 |
-
out_file (str): Path or file to write .mid file to. If
|
96 |
velocity: Note loudness, i. e. the hardness a piano key is struck with.
|
97 |
|
98 |
Returns:
|
@@ -127,15 +125,25 @@ def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pret
|
|
127 |
pm.write(out_file)
|
128 |
return pm
|
129 |
|
130 |
-
def generate_and_display(
|
|
|
|
|
|
|
|
|
131 |
"""
|
132 |
-
Generate a song, (optionally) save it and display it
|
133 |
|
134 |
Parameters:
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
136 |
|
137 |
Returns:
|
138 |
-
|
139 |
"""
|
140 |
|
141 |
song_map = model.generate(z_sample)
|
|
|
80 |
waveform_short = waveform[:seconds*_SAMPLING_RATE]
|
81 |
|
82 |
return display.Audio(waveform_short, rate=_SAMPLING_RATE)
|
83 |
+
|
|
|
|
|
84 |
|
85 |
def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pretty_midi.PrettyMIDI:
|
86 |
"""
|
|
|
90 |
Parameters:
|
91 |
song_map (pd.DataFrame): 3xN matrix where each column is a note, composed of
|
92 |
pitch, duration and step.
|
93 |
+
out_file (str): Path or file to write .mid file to. If None, no saving is done.
|
94 |
velocity: Note loudness, i. e. the hardness a piano key is struck with.
|
95 |
|
96 |
Returns:
|
|
|
125 |
pm.write(out_file)
|
126 |
return pm
|
127 |
|
128 |
+
def generate_and_display(model: VAE,
|
129 |
+
out_file: str=None,
|
130 |
+
z_sample: tf.Tensor=None,
|
131 |
+
velocity: int=100,
|
132 |
+
seconds: int=120) -> display.Audio:
|
133 |
"""
|
134 |
+
Generate a song, (optionally) save it and display it.
|
135 |
|
136 |
Parameters:
|
137 |
+
model (VAE): Instance of VAE to generate the song with.
|
138 |
+
out_file (str): Path or file to write .mid file to. If None, no saving is done.
|
139 |
+
z_sample (tf.Tensor): Song encoding used to generate a song. If None, perform
|
140 |
+
generate an unconditioned piece.
|
141 |
+
velocity: Note loudness, i. e. the hardness a piano key is struck with.
|
142 |
+
seconds (int): Time fraction of the song to be displayed. When
|
143 |
+
set to -1, the full length is taken.
|
144 |
|
145 |
Returns:
|
146 |
+
display.Audio: Song as an object allowing for display.
|
147 |
"""
|
148 |
|
149 |
song_map = model.generate(z_sample)
|