Spaces:
Running
Running
modify app
Browse files- app.py +2 -2
- inference.py +4 -4
app.py
CHANGED
@@ -234,8 +234,8 @@ with gr.Blocks() as demo:
|
|
234 |
with gr.Row():
|
235 |
with gr.Column():
|
236 |
ito_output_audio = gr.Audio(label="ITO Output Audio")
|
237 |
-
ito_param_output = gr.Textbox(label="ITO Predicted Parameters", lines=15)
|
238 |
ito_step_slider = gr.Slider(minimum=1, maximum=100, step=1, label="ITO Step", interactive=True)
|
|
|
239 |
with gr.Column():
|
240 |
ito_loss_plot = gr.LinePlot(
|
241 |
x="step",
|
@@ -249,7 +249,7 @@ with gr.Blocks() as demo:
|
|
249 |
ito_log = gr.Textbox(label="ITO Log", lines=10)
|
250 |
|
251 |
all_results = gr.State([])
|
252 |
-
|
253 |
ito_button.click(
|
254 |
perform_ito,
|
255 |
inputs=[input_audio, reference_audio, ito_reference_audio, num_steps, optimizer, learning_rate, af_weights],
|
|
|
234 |
with gr.Row():
|
235 |
with gr.Column():
|
236 |
ito_output_audio = gr.Audio(label="ITO Output Audio")
|
|
|
237 |
ito_step_slider = gr.Slider(minimum=1, maximum=100, step=1, label="ITO Step", interactive=True)
|
238 |
+
ito_param_output = gr.Textbox(label="ITO Predicted Parameters", lines=15)
|
239 |
with gr.Column():
|
240 |
ito_loss_plot = gr.LinePlot(
|
241 |
x="step",
|
|
|
249 |
ito_log = gr.Textbox(label="ITO Log", lines=10)
|
250 |
|
251 |
all_results = gr.State([])
|
252 |
+
|
253 |
ito_button.click(
|
254 |
perform_ito,
|
255 |
inputs=[input_audio, reference_audio, ito_reference_audio, num_steps, optimizer, learning_rate, af_weights],
|
inference.py
CHANGED
@@ -245,11 +245,11 @@ class MasteringStyleTransfer:
|
|
245 |
for param_name, param_value in fx_params.items():
|
246 |
if isinstance(param_value, torch.Tensor):
|
247 |
param_value = param_value.item()
|
248 |
-
output.append(f" {param_name}: {param_value:.
|
249 |
elif isinstance(fx_params, torch.Tensor):
|
250 |
-
output.append(f" {fx_params.item():.
|
251 |
else:
|
252 |
-
output.append(f" {fx_params:.
|
253 |
|
254 |
return "\n".join(output)
|
255 |
|
@@ -278,7 +278,7 @@ class MasteringStyleTransfer:
|
|
278 |
|
279 |
output = [f" Top {top_n} parameter differences (initial / ITO / normalized diff):"]
|
280 |
for fx_name, param_name, initial_value, ito_value, normalized_diff in top_diffs:
|
281 |
-
output.append(f" {fx_name.upper()} - {param_name}: {initial_value:.
|
282 |
|
283 |
return "\n".join(output)
|
284 |
|
|
|
245 |
for param_name, param_value in fx_params.items():
|
246 |
if isinstance(param_value, torch.Tensor):
|
247 |
param_value = param_value.item()
|
248 |
+
output.append(f" {param_name}: {param_value:.2f}")
|
249 |
elif isinstance(fx_params, torch.Tensor):
|
250 |
+
output.append(f" {fx_params.item():.2f}")
|
251 |
else:
|
252 |
+
output.append(f" {fx_params:.2f}")
|
253 |
|
254 |
return "\n".join(output)
|
255 |
|
|
|
278 |
|
279 |
output = [f" Top {top_n} parameter differences (initial / ITO / normalized diff):"]
|
280 |
for fx_name, param_name, initial_value, ito_value, normalized_diff in top_diffs:
|
281 |
+
output.append(f" {fx_name.upper()} - {param_name}: {initial_value:.2f} / {ito_value:.2f} / {normalized_diff:.2f}")
|
282 |
|
283 |
return "\n".join(output)
|
284 |
|