import gradio as gr def create_demo(get_video_lora): block = gr.Blocks(css='style.css').queue() with block: with gr.Group(): with gr.Box(): with gr.Row(elem_id='prompt-container').style(equal_height=True): prompt = gr.Text( label='Prompt', show_label=False, max_lines=1, placeholder='Enter your prompt', elem_id='prompt-text-input').style(container=False) run_button = gr.Button('Generate video').style( full_width=False) result = gr.Video(label='Result', show_label=False, elem_id='gallery') with gr.Accordion('Advanced options', open=False): seed = gr.Slider( label='Seed', minimum=-1, maximum=1000000, step=1, value=-1, info='If set to -1, a different seed will be used each time.') # num_frames = gr.Slider( # label='Number of frames', # minimum=16, # maximum=MAX_NUM_FRAMES, # step=1, # value=16, # info= # 'Note that the content of the video also changes when you change the number of frames.' # ) # num_inference_steps = gr.Slider(label='Number of inference steps', # minimum=10, # maximum=50, # step=1, # value=25) inputs = [ prompt, seed, # num_frames, # num_inference_steps, ] gr.Examples(examples=[["A monkey is playing a piano, frozenmovie style", 431]], inputs=inputs, outputs=result, fn=get_video_lora, cache_examples=True) prompt.submit(fn=get_video_lora, inputs=inputs, outputs=result) run_button.click(fn=get_video_lora, inputs=inputs, outputs=result) return block