TomRB22 commited on
Commit
e878ec7
1 Parent(s): edac276

Documenting pt. 3

Browse files
Files changed (1) hide show
  1. audio_methods.py +16 -8
audio_methods.py CHANGED
@@ -80,9 +80,7 @@ def display_audio(pm: pretty_midi.PrettyMIDI, seconds=-1) -> display.Audio:
80
  waveform_short = waveform[:seconds*_SAMPLING_RATE]
81
 
82
  return display.Audio(waveform_short, rate=_SAMPLING_RATE)
83
-
84
-
85
- # Define function to convert song map to wav
86
 
87
  def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pretty_midi.PrettyMIDI:
88
  """
@@ -92,7 +90,7 @@ def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pret
92
  Parameters:
93
  song_map (pd.DataFrame): 3xN matrix where each column is a note, composed of
94
  pitch, duration and step.
95
- out_file (str): Path or file to write .mid file to. If null, no saving is done.
96
  velocity: Note loudness, i. e. the hardness a piano key is struck with.
97
 
98
  Returns:
@@ -127,15 +125,25 @@ def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pret
127
  pm.write(out_file)
128
  return pm
129
 
130
- def generate_and_display(out_file:str, model: VAE, out_file: str=null, z_sample: tf.Tensor=None, velocity: int=100, seconds: int=120) -> display.Audio:
 
 
 
 
131
  """
132
- Generate a song, (optionally) save it and display it [CONTINUE]
133
 
134
  Parameters:
135
-
 
 
 
 
 
 
136
 
137
  Returns:
138
-
139
  """
140
 
141
  song_map = model.generate(z_sample)
 
80
  waveform_short = waveform[:seconds*_SAMPLING_RATE]
81
 
82
  return display.Audio(waveform_short, rate=_SAMPLING_RATE)
83
+
 
 
84
 
85
  def map_to_wav(song_map: pd.DataFrame, out_file: str, velocity: int=100) -> pretty_midi.PrettyMIDI:
86
  """
 
90
  Parameters:
91
  song_map (pd.DataFrame): 3xN matrix where each column is a note, composed of
92
  pitch, duration and step.
93
+ out_file (str): Path or file to write .mid file to. If None, no saving is done.
94
  velocity: Note loudness, i. e. the hardness a piano key is struck with.
95
 
96
  Returns:
 
125
  pm.write(out_file)
126
  return pm
127
 
128
+ def generate_and_display(model: VAE,
129
+ out_file: str=None,
130
+ z_sample: tf.Tensor=None,
131
+ velocity: int=100,
132
+ seconds: int=120) -> display.Audio:
133
  """
134
+ Generate a song, (optionally) save it and display it.
135
 
136
  Parameters:
137
+ model (VAE): Instance of VAE to generate the song with.
138
+ out_file (str): Path or file to write .mid file to. If None, no saving is done.
139
+ z_sample (tf.Tensor): Song encoding used to generate a song. If None, perform
140
+ generate an unconditioned piece.
141
+ velocity: Note loudness, i. e. the hardness a piano key is struck with.
142
+ seconds (int): Time fraction of the song to be displayed. When
143
+ set to -1, the full length is taken.
144
 
145
  Returns:
146
+ display.Audio: Song as an object allowing for display.
147
  """
148
 
149
  song_map = model.generate(z_sample)