Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -97,7 +97,7 @@ def get_result(text_prompts, style_indx, musicAI_indx, duration):
|
|
97 |
spec_image, music_output = get_music(interrogate_prompt + ", " + style_list_EN[style_indx], musicAI_indx, duration)
|
98 |
|
99 |
video_merged = merge_video(music_output, image_output)
|
100 |
-
return {spec_result:spec_image, video_result:video_merged, status_text:'Success', share_button:gr.update(visible=True), community_icon:gr.update(visible=True), loading_icon:gr.update(visible=True)}
|
101 |
|
102 |
def get_music(prompt, musicAI_indx, duration):
|
103 |
if musicAI_indx == 0:
|
@@ -502,7 +502,9 @@ with block:
|
|
502 |
share_button.click(None, [], [], _js=share_js)
|
503 |
|
504 |
video_result = gr.Video(type=None, label='Final Merged video', elem_id="output-video")
|
505 |
-
|
|
|
|
|
506 |
|
507 |
trigger_component = gr.Textbox(vaule="", visible=False) # This component is used for triggering inference funtion.
|
508 |
translated_language = gr.Textbox(vaule="", visible=False)
|
@@ -515,7 +517,7 @@ with block:
|
|
515 |
musicAI.change(fn=change_music_generator, inputs=[musicAI], outputs=[duration_input])
|
516 |
text.submit(translate_language, inputs=[text], outputs=[language_tips_text, status_text, trigger_component, translated_language])
|
517 |
btn.click(translate_language, inputs=[text], outputs=[language_tips_text, status_text, trigger_component, translated_language])
|
518 |
-
trigger_component.change(fn=get_result, inputs=[translated_language, styles, musicAI, duration_input], outputs=[spec_result, video_result, status_text, share_button, community_icon, loading_icon])
|
519 |
|
520 |
|
521 |
gr.Markdown(
|
@@ -533,4 +535,4 @@ with block:
|
|
533 |
''')
|
534 |
|
535 |
|
536 |
-
block.queue().launch()
|
|
|
97 |
spec_image, music_output = get_music(interrogate_prompt + ", " + style_list_EN[style_indx], musicAI_indx, duration)
|
98 |
|
99 |
video_merged = merge_video(music_output, image_output)
|
100 |
+
return {spec_result:spec_image, imgfile_result:image_output, musicfile_result:music_output, video_result:video_merged, status_text:'Success', share_button:gr.update(visible=True), community_icon:gr.update(visible=True), loading_icon:gr.update(visible=True)}
|
101 |
|
102 |
def get_music(prompt, musicAI_indx, duration):
|
103 |
if musicAI_indx == 0:
|
|
|
502 |
share_button.click(None, [], [], _js=share_js)
|
503 |
|
504 |
video_result = gr.Video(type=None, label='Final Merged video', elem_id="output-video")
|
505 |
+
imgfile_result = gr.Image(label="Art Cover", elem_id="output-img")
|
506 |
+
musicfile_result = gr.Audio(type='filepath', label="Generated Music Track", elem_id="output-music")
|
507 |
+
spec_result = gr.Image(label="Spectrogram Image")
|
508 |
|
509 |
trigger_component = gr.Textbox(vaule="", visible=False) # This component is used for triggering inference funtion.
|
510 |
translated_language = gr.Textbox(vaule="", visible=False)
|
|
|
517 |
musicAI.change(fn=change_music_generator, inputs=[musicAI], outputs=[duration_input])
|
518 |
text.submit(translate_language, inputs=[text], outputs=[language_tips_text, status_text, trigger_component, translated_language])
|
519 |
btn.click(translate_language, inputs=[text], outputs=[language_tips_text, status_text, trigger_component, translated_language])
|
520 |
+
trigger_component.change(fn=get_result, inputs=[translated_language, styles, musicAI, duration_input], outputs=[spec_result, imgfile_result, musicfile_result, video_result, status_text, share_button, community_icon, loading_icon])
|
521 |
|
522 |
|
523 |
gr.Markdown(
|
|
|
535 |
''')
|
536 |
|
537 |
|
538 |
+
block.queue().launch()
|