TheStinger commited on
Commit
7822118
β€’
1 Parent(s): 0b67cea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +49 -66
app.py CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
2
  import requests
3
  import random
4
  import os
5
- import zipfile # built in module for unzipping files (thank god)
6
  import librosa
7
  import time
8
  from infer_rvc_python import BaseLoader
@@ -11,12 +11,10 @@ from tts_voice import tts_order_voice
11
  import edge_tts
12
  import tempfile
13
  import anyio
14
- from audio_separator.separator import Separator
15
 
16
 
17
  language_dict = tts_order_voice
18
 
19
- # ilaria tts implementation :rofl:
20
  async def text_to_speech_edge(text, language_code):
21
  voice = language_dict[language_code]
22
  communicate = edge_tts.Communicate(text, voice)
@@ -27,7 +25,6 @@ async def text_to_speech_edge(text, language_code):
27
 
28
  return tmp_path
29
 
30
- # fucking dogshit toggle
31
  try:
32
  import spaces
33
  spaces_status = True
@@ -65,51 +62,50 @@ UVR_5_MODELS = [
65
  os.makedirs(TEMP_DIR, exist_ok=True)
66
 
67
  def unzip_file(file):
68
- filename = os.path.basename(file).split(".")[0] # converts "model.zip" to "model" so we can do things
69
  with zipfile.ZipFile(file, 'r') as zip_ref:
70
- zip_ref.extractall(os.path.join(TEMP_DIR, filename)) # might not be very ram efficient...
71
  return True
72
 
73
 
74
- def progress_bar(total, current): # best progress bar ever trust me sunglasses emoji 😎
75
  return "[" + "=" * int(current / total * 20) + ">" + " " * (20 - int(current / total * 20)) + "] " + str(int(current / total * 100)) + "%"
76
 
77
  def download_from_url(url, filename=None):
78
  if "/blob/" in url:
79
- url = url.replace("/blob/", "/resolve/") # made it delik proof 😎
80
  if "huggingface" not in url:
81
  return ["The URL must be from huggingface", "Failed", "Failed"]
82
  if filename is None:
83
  filename = os.path.join(TEMP_DIR, MODEL_PREFIX + str(random.randint(1, 1000)) + ".zip")
84
  response = requests.get(url)
85
- total = int(response.headers.get('content-length', 0)) # bytes to download (length of the file)
86
  if total > 500000000:
87
 
88
  return ["The file is too large. You can only download files up to 500 MB in size.", "Failed", "Failed"]
89
  current = 0
90
  with open(filename, "wb") as f:
91
- for data in response.iter_content(chunk_size=4096): # download in chunks of 4096 bytes (4kb - helps with memory usage and speed)
92
  f.write(data)
93
  current += len(data)
94
- print(progress_bar(total, current), end="\r") # \r is a carriage return, it moves the cursor to the start of the line so its like tqdm sunglasses emoji 😎
95
 
96
- # unzip because the model is in a zip file lel
97
 
98
  try:
99
  unzip_file(filename)
100
  except Exception as e:
101
- return ["Failed to unzip the file", "Failed", "Failed"] # return early if it fails and like tell the user but its dogshit hahahahahahaha 😎 According to all known laws aviation, there is no way a bee should be able to fly.
102
- unzipped_dir = os.path.join(TEMP_DIR, os.path.basename(filename).split(".")[0]) # just do what we did in unzip_file because we need the directory
103
  pth_files = []
104
  index_files = []
105
- for root, dirs, files in os.walk(unzipped_dir): # could be done more efficiently because nobody stores models in subdirectories but like who cares (it's a futureproofing thing lel)
106
  for file in files:
107
  if file.endswith(".pth"):
108
  pth_files.append(os.path.join(root, file))
109
  elif file.endswith(".index"):
110
  index_files.append(os.path.join(root, file))
111
 
112
- print(pth_files, index_files) # debug print because im fucking stupid and i need to see what is going on
113
  global pth_file
114
  global index_file
115
  pth_file = pth_files[0]
@@ -162,7 +158,7 @@ def calculate_remaining_time(epochs, seconds_per_epoch):
162
  else:
163
  return f"{int(hours)} hours and {int(minutes)} minutes"
164
 
165
- def inf_handler(audio, model_name): # its a shame that zerogpu just WONT cooperate with us
166
  model_found = False
167
  for model_info in UVR_5_MODELS:
168
  if model_info["model_name"] == model_name:
@@ -241,22 +237,31 @@ def upload_model(index_file, pth_file):
241
  with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC πŸ’–") as demo:
242
  gr.Markdown("## Ilaria RVC πŸ’–")
243
  with gr.Tab("Inference"):
244
- sound_gui = gr.Audio(value=None,type="filepath",autoplay=False,visible=True,)
245
- pth_file_ui = gr.Textbox(label="Model pth file",value=pth_file,visible=False,interactive=False,)
246
- index_file_ui = gr.Textbox(label="Index pth file",value=index_file,visible=False,interactive=False,)
247
-
 
 
 
 
 
 
 
 
 
248
  with gr.Accordion("Settings", open=False):
249
- pitch_algo_conf = gr.Dropdown(PITCH_ALGO_OPT,value=PITCH_ALGO_OPT[4],label="Pitch algorithm",visible=True,interactive=True,)
250
- pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')",minimum=-24,maximum=24,step=1,value=0,visible=True,interactive=True,)
251
- index_inf_conf = gr.Slider(minimum=0,maximum=1,label="Index influence -> How much accent is applied",value=0.75,)
252
- respiration_filter_conf = gr.Slider(minimum=0,maximum=7,label="Respiration median filtering",value=3,step=1,interactive=True,)
253
- envelope_ratio_conf = gr.Slider(minimum=0,maximum=1,label="Envelope ratio",value=0.25,interactive=True,)
254
- consonant_protec_conf = gr.Slider(minimum=0,maximum=0.5,label="Consonant breath protection",value=0.5,interactive=True,)
255
-
256
- button_conf = gr.Button("Convert",variant="primary",)
257
- output_conf = gr.Audio(type="filepath",label="Output",)
258
-
259
- button_conf.click(lambda :None, None, output_conf)
260
  button_conf.click(
261
  run,
262
  inputs=[
@@ -270,17 +275,6 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
270
  ],
271
  outputs=[output_conf],
272
  )
273
-
274
- with gr.Tab("Ilaria TTS"):
275
- text_tts = gr.Textbox(label="Text", placeholder="Hello!", lines=3, interactive=True,)
276
- dropdown_tts = gr.Dropdown(label="Language and Model",choices=list(language_dict.keys()),interactive=True, value=list(language_dict.keys())[0])
277
-
278
- button_tts = gr.Button("Speak", variant="primary",)
279
-
280
- output_tts = gr.Audio(type="filepath", label="Output",)
281
-
282
- button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=[output_tts])
283
-
284
 
285
  with gr.Tab("Model Loader (Download and Upload)"):
286
  with gr.Accordion("Model Downloader", open=False):
@@ -301,20 +295,6 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
301
 
302
  upload_button.click(upload_model, [index_file_upload, pth_file_upload], upload_status)
303
 
304
-
305
- with gr.Tab("Vocal Separator (UVR)"):
306
- gr.Markdown("Separate vocals and instruments from an audio file using UVR models. - This is only on CPU due to ZeroGPU being ZeroGPU :(")
307
- uvr5_audio_file = gr.Audio(label="Audio File",type="filepath")
308
-
309
- with gr.Row():
310
- uvr5_model = gr.Dropdown(label="Model", choices=[model["model_name"] for model in UVR_5_MODELS])
311
- uvr5_button = gr.Button("Separate Vocals", variant="primary",)
312
-
313
- uvr5_output_voc = gr.Audio(type="filepath", label="Output 1",) # UVR models sometimes output it in a weird way where it's like the positions swap randomly, so let's just call them Outputs lol
314
- uvr5_output_inst = gr.Audio(type="filepath", label="Output 2",)
315
-
316
- uvr5_button.click(inference, [uvr5_audio_file, uvr5_model], [uvr5_output_voc, uvr5_output_inst])
317
-
318
  with gr.Tab("Extra"):
319
  with gr.Accordion("Training Time Calculator", open=False):
320
  with gr.Column():
@@ -328,15 +308,18 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
328
  inputs=[epochs_input, seconds_input],
329
  outputs=[remaining_time_output]
330
  )
331
-
332
- with gr.Accordion("Model Fusion", open=False):
333
- gr.Markdown(value="Fusion of two models to create a new model - coming soon! 😎")
334
-
335
- with gr.Accordion("Model Quantization", open=False):
336
- gr.Markdown(value="Quantization of a model to reduce its size - coming soon! 😎")
337
 
338
- with gr.Accordion("Training Helper", open=False):
339
- gr.Markdown(value="Help for training models - coming soon! 😎")
 
 
 
 
 
 
 
 
 
340
 
341
  with gr.Tab("Credits"):
342
  gr.Markdown(
@@ -351,4 +334,4 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose")
351
  """
352
  )
353
 
354
- demo.queue(api_open=False).launch(show_api=False) # idk ilaria if you want or dont want to
 
2
  import requests
3
  import random
4
  import os
5
+ import zipfile
6
  import librosa
7
  import time
8
  from infer_rvc_python import BaseLoader
 
11
  import edge_tts
12
  import tempfile
13
  import anyio
 
14
 
15
 
16
  language_dict = tts_order_voice
17
 
 
18
  async def text_to_speech_edge(text, language_code):
19
  voice = language_dict[language_code]
20
  communicate = edge_tts.Communicate(text, voice)
 
25
 
26
  return tmp_path
27
 
 
28
  try:
29
  import spaces
30
  spaces_status = True
 
62
  os.makedirs(TEMP_DIR, exist_ok=True)
63
 
64
  def unzip_file(file):
65
+ filename = os.path.basename(file).split(".")[0]
66
  with zipfile.ZipFile(file, 'r') as zip_ref:
67
+ zip_ref.extractall(os.path.join(TEMP_DIR, filename))
68
  return True
69
 
70
 
71
+ def progress_bar(total, current):
72
  return "[" + "=" * int(current / total * 20) + ">" + " " * (20 - int(current / total * 20)) + "] " + str(int(current / total * 100)) + "%"
73
 
74
  def download_from_url(url, filename=None):
75
  if "/blob/" in url:
76
+ url = url.replace("/blob/", "/resolve/")
77
  if "huggingface" not in url:
78
  return ["The URL must be from huggingface", "Failed", "Failed"]
79
  if filename is None:
80
  filename = os.path.join(TEMP_DIR, MODEL_PREFIX + str(random.randint(1, 1000)) + ".zip")
81
  response = requests.get(url)
82
+ total = int(response.headers.get('content-length', 0))
83
  if total > 500000000:
84
 
85
  return ["The file is too large. You can only download files up to 500 MB in size.", "Failed", "Failed"]
86
  current = 0
87
  with open(filename, "wb") as f:
88
+ for data in response.iter_content(chunk_size=4096):
89
  f.write(data)
90
  current += len(data)
91
+ print(progress_bar(total, current), end="\r")
92
 
 
93
 
94
  try:
95
  unzip_file(filename)
96
  except Exception as e:
97
+ return ["Failed to unzip the file", "Failed", "Failed"]
98
+ unzipped_dir = os.path.join(TEMP_DIR, os.path.basename(filename).split(".")[0])
99
  pth_files = []
100
  index_files = []
101
+ for root, dirs, files in os.walk(unzipped_dir):
102
  for file in files:
103
  if file.endswith(".pth"):
104
  pth_files.append(os.path.join(root, file))
105
  elif file.endswith(".index"):
106
  index_files.append(os.path.join(root, file))
107
 
108
+ print(pth_files, index_files)
109
  global pth_file
110
  global index_file
111
  pth_file = pth_files[0]
 
158
  else:
159
  return f"{int(hours)} hours and {int(minutes)} minutes"
160
 
161
+ def inf_handler(audio, model_name):
162
  model_found = False
163
  for model_info in UVR_5_MODELS:
164
  if model_info["model_name"] == model_name:
 
237
  with gr.Blocks(theme=gr.themes.Default(primary_hue="pink", secondary_hue="rose"), title="Ilaria RVC πŸ’–") as demo:
238
  gr.Markdown("## Ilaria RVC πŸ’–")
239
  with gr.Tab("Inference"):
240
+ sound_gui = gr.Audio(value=None, type="filepath", autoplay=False, visible=True)
241
+ pth_file_ui = gr.Textbox(label="Model pth file", value=pth_file, visible=False, interactive=False)
242
+ index_file_ui = gr.Textbox(label="Index pth file", value=index_file, visible=False, interactive=False)
243
+
244
+ with gr.Accordion("Ilaria TTS", open=False):
245
+ text_tts = gr.Textbox(label="Text", placeholder="Hello!", lines=3, interactive=True)
246
+ dropdown_tts = gr.Dropdown(label="Language and Model", choices=list(language_dict.keys()), interactive=True, value=list(language_dict.keys())[0])
247
+
248
+ button_tts = gr.Button("Speak", variant="primary")
249
+
250
+ # Rimuovi l'output_tts e usa solo sound_gui come output
251
+ button_tts.click(text_to_speech_edge, inputs=[text_tts, dropdown_tts], outputs=sound_gui)
252
+
253
  with gr.Accordion("Settings", open=False):
254
+ pitch_algo_conf = gr.Dropdown(PITCH_ALGO_OPT, value=PITCH_ALGO_OPT[4], label="Pitch algorithm", visible=True, interactive=True)
255
+ pitch_lvl_conf = gr.Slider(label="Pitch level (lower -> 'male' while higher -> 'female')", minimum=-24, maximum=24, step=1, value=0, visible=True, interactive=True)
256
+ index_inf_conf = gr.Slider(minimum=0, maximum=1, label="Index influence -> How much accent is applied", value=0.75)
257
+ respiration_filter_conf = gr.Slider(minimum=0, maximum=7, label="Respiration median filtering", value=3, step=1, interactive=True)
258
+ envelope_ratio_conf = gr.Slider(minimum=0, maximum=1, label="Envelope ratio", value=0.25, interactive=True)
259
+ consonant_protec_conf = gr.Slider(minimum=0, maximum=0.5, label="Consonant breath protection", value=0.5, interactive=True)
260
+
261
+ button_conf = gr.Button("Convert", variant="primary")
262
+ output_conf = gr.Audio(type="filepath", label="Output")
263
+
264
+ button_conf.click(lambda: None, None, output_conf)
265
  button_conf.click(
266
  run,
267
  inputs=[
 
275
  ],
276
  outputs=[output_conf],
277
  )
 
 
 
 
 
 
 
 
 
 
 
278
 
279
  with gr.Tab("Model Loader (Download and Upload)"):
280
  with gr.Accordion("Model Downloader", open=False):
 
295
 
296
  upload_button.click(upload_model, [index_file_upload, pth_file_upload], upload_status)
297
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
298
  with gr.Tab("Extra"):
299
  with gr.Accordion("Training Time Calculator", open=False):
300
  with gr.Column():
 
308
  inputs=[epochs_input, seconds_input],
309
  outputs=[remaining_time_output]
310
  )
 
 
 
 
 
 
311
 
312
+ with gr.Accordion('Training Helper', open=False):
313
+ with gr.Column():
314
+ audio_input = gr.Audio(type="filepath", label="Upload your audio file")
315
+ gr.Text("Please note that these results are approximate and intended to provide a general idea for beginners.", label='Notice:')
316
+ training_info_output = gr.Markdown(label="Training Information:")
317
+ get_info_button = gr.Button("Get Training Info")
318
+ get_info_button.click(
319
+ fn=on_button_click,
320
+ inputs=[audio_input],
321
+ outputs=[training_info_output]
322
+ )
323
 
324
  with gr.Tab("Credits"):
325
  gr.Markdown(
 
334
  """
335
  )
336
 
337
+ demo.queue(api_open=False).launch(show_api=False)