fffiloni commited on
Commit
071420e
1 Parent(s): 98b60eb

Update gradio_app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py +119 -23
gradio_app.py CHANGED
@@ -13,17 +13,21 @@ from huggingface_hub import snapshot_download
13
  weights_dir = './allegro_weights'
14
  os.makedirs(weights_dir, exist_ok=True)
15
 
16
- snapshot_download(
17
- repo_id='rhymes-ai/Allegro',
18
- allow_patterns=[
19
- 'scheduler/**',
20
- 'text_encoder/**',
21
- 'tokenizer/**',
22
- 'transformer/**',
23
- 'vae/**',
24
- ],
25
- local_dir=weights_dir,
26
- )
 
 
 
 
27
 
28
 
29
  def single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload):
@@ -97,21 +101,113 @@ def run_inference(user_prompt, guidance_scale, num_sampling_steps, seed, enable_
97
  result_path = single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload)
98
  return result_path
99
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
100
 
101
  # Create Gradio interface
102
- iface = gr.Interface(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  fn=run_inference,
104
- inputs=[
105
- gr.Textbox(label="User Prompt"),
106
- gr.Slider(minimum=0, maximum=20, step=0.1, label="Guidance Scale", value=7.5),
107
- gr.Slider(minimum=10, maximum=200, step=1, label="Number of Sampling Steps", value=100),
108
- gr.Slider(minimum=0, maximum=10000, step=1, label="Random Seed", value=42),
109
- gr.Checkbox(label="Enable CPU Offload", value=False),
110
- ],
111
- outputs=gr.Video(label="Generated Video"),
112
- title="Allegro Video Generation",
113
- description="Generate a video based on a text prompt using the Allegro pipeline."
114
  )
115
 
116
  # Launch the interface
117
- iface.launch()
 
13
  weights_dir = './allegro_weights'
14
  os.makedirs(weights_dir, exist_ok=True)
15
 
16
+ is_shared_ui = True if "fffiloni/allegro-t2v" in os.environ['SPACE_ID'] else False
17
+ is_gpu_associated = torch.cuda.is_available()
18
+
19
+ if not is_shared_ui:
20
+ snapshot_download(
21
+ repo_id='rhymes-ai/Allegro',
22
+ allow_patterns=[
23
+ 'scheduler/**',
24
+ 'text_encoder/**',
25
+ 'tokenizer/**',
26
+ 'transformer/**',
27
+ 'vae/**',
28
+ ],
29
+ local_dir=weights_dir,
30
+ )
31
 
32
 
33
  def single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload):
 
101
  result_path = single_inference(user_prompt, save_path, guidance_scale, num_sampling_steps, seed, enable_cpu_offload)
102
  return result_path
103
 
104
+ css="""
105
+ #upl-dataset-group {background-color: none!important;}
106
+ div#warning-ready {
107
+ background-color: #ecfdf5;
108
+ padding: 0 16px 16px;
109
+ margin: 20px 0;
110
+ }
111
+ div#warning-ready > .gr-prose > h2, div#warning-ready > .gr-prose > p {
112
+ color: #057857!important;
113
+ }
114
+ div#warning-duplicate {
115
+ background-color: #ebf5ff;
116
+ padding: 0 16px 16px;
117
+ margin: 20px 0;
118
+ }
119
+ div#warning-duplicate > .gr-prose > h2, div#warning-duplicate > .gr-prose > p {
120
+ color: #0f4592!important;
121
+ }
122
+ div#warning-duplicate strong {
123
+ color: #0f4592;
124
+ }
125
+ p.actions {
126
+ display: flex;
127
+ align-items: center;
128
+ margin: 20px 0;
129
+ }
130
+ div#warning-duplicate .actions a {
131
+ display: inline-block;
132
+ margin-right: 10px;
133
+ }
134
+ div#warning-setgpu {
135
+ background-color: #fff4eb;
136
+ padding: 0 16px 16px;
137
+ margin: 20px 0;
138
+ }
139
+ div#warning-setgpu > .gr-prose > h2, div#warning-setgpu > .gr-prose > p {
140
+ color: #92220f!important;
141
+ }
142
+ div#warning-setgpu a, div#warning-setgpu b {
143
+ color: #91230f;
144
+ }
145
+ div#warning-setgpu p.actions > a {
146
+ display: inline-block;
147
+ background: #1f1f23;
148
+ border-radius: 40px;
149
+ padding: 6px 24px;
150
+ color: antiquewhite;
151
+ text-decoration: none;
152
+ font-weight: 600;
153
+ font-size: 1.2em;
154
+ }
155
+ """
156
 
157
  # Create Gradio interface
158
+ with gr.Blocks(css=css) as demo:
159
+ with gr.Column():
160
+ gr.Markdown("# Allegro Video Generation")
161
+ gr.Markdown("Generate a video based on a text prompt using the Allegro pipeline.")
162
+ with gr.Row():
163
+ with gr.Column():
164
+ user_prompt=gr.Textbox(label="User Prompt")
165
+ with gr.Row():
166
+ guidance_scale=gr.Slider(minimum=0, maximum=20, step=0.1, label="Guidance Scale", value=7.5)
167
+ num_sampling_steps=gr.Slider(minimum=10, maximum=100, step=1, label="Number of Sampling Steps", value=20)
168
+ with gr.Row():
169
+ seed=gr.Slider(minimum=0, maximum=10000, step=1, label="Random Seed", value=42)
170
+ enable_cpu_offload=gr.Checkbox(label="Enable CPU Offload", value=False, scale=1)
171
+ if is_shared_ui:
172
+ top_description = gr.HTML(f'''
173
+ <div class="gr-prose">
174
+ <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
175
+ Attention: this Space need to be duplicated to work</h2>
176
+ <p class="main-message">
177
+ To make it work, <strong>duplicate the Space</strong> and run it on your own profile using a <strong>private</strong> GPU.<br />
178
+ You'll be able to offload the model into CPU for less GPU memory cost (about 9.3G, compared to 27.5G if CPU offload is not enabled), but the inference time will increase significantly.
179
+ </p>
180
+ <p class="actions">
181
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}?duplicate=true">
182
+ <img src="https://huggingface.co/datasets/huggingface/badges/resolve/main/duplicate-this-space-lg-dark.svg" alt="Duplicate this Space" />
183
+ </a>
184
+ </p>
185
+ </div>
186
+ ''', elem_id="warning-duplicate")
187
+ else:
188
+ if(is_gpu_associated):
189
+ submit_btn = gr.Button("Generate Video", visible=False)
190
+ else:
191
+ top_description = gr.HTML(f'''
192
+ <div class="gr-prose">
193
+ <h2><svg xmlns="http://www.w3.org/2000/svg" width="18px" height="18px" style="margin-right: 0px;display: inline-block;"fill="none"><path fill="#fff" d="M7 13.2a6.3 6.3 0 0 0 4.4-10.7A6.3 6.3 0 0 0 .6 6.9 6.3 6.3 0 0 0 7 13.2Z"/><path fill="#fff" fill-rule="evenodd" d="M7 0a6.9 6.9 0 0 1 4.8 11.8A6.9 6.9 0 0 1 0 7 6.9 6.9 0 0 1 7 0Zm0 0v.7V0ZM0 7h.6H0Zm7 6.8v-.6.6ZM13.7 7h-.6.6ZM9.1 1.7c-.7-.3-1.4-.4-2.2-.4a5.6 5.6 0 0 0-4 1.6 5.6 5.6 0 0 0-1.6 4 5.6 5.6 0 0 0 1.6 4 5.6 5.6 0 0 0 4 1.7 5.6 5.6 0 0 0 4-1.7 5.6 5.6 0 0 0 1.7-4 5.6 5.6 0 0 0-1.7-4c-.5-.5-1.1-.9-1.8-1.2Z" clip-rule="evenodd"/><path fill="#000" fill-rule="evenodd" d="M7 2.9a.8.8 0 1 1 0 1.5A.8.8 0 0 1 7 3ZM5.8 5.7c0-.4.3-.6.6-.6h.7c.3 0 .6.2.6.6v3.7h.5a.6.6 0 0 1 0 1.3H6a.6.6 0 0 1 0-1.3h.4v-3a.6.6 0 0 1-.6-.7Z" clip-rule="evenodd"/></svg>
194
+ You have successfully duplicated the Allegro Video Generation Space 🎉</h2>
195
+ <p>There's only one step left before you can generate a video: <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings" style="text-decoration: underline" target="_blank">attribute a GPU</b> to it (via the Settings tab)</a>.
196
+ You will be billed by the minute from when you activate the GPU until when it is turned off.</p>
197
+ <p class="actions">
198
+ <a href="https://huggingface.co/spaces/{os.environ['SPACE_ID']}/settings">🔥 &nbsp; Set recommended GPU</a>
199
+ </p>
200
+ </div>
201
+ ''', elem_id="warning-setgpu")
202
+
203
+ with gr.Column():
204
+ video_output=gr.Video(label="Generated Video")
205
+
206
+ submit_btn.click(
207
  fn=run_inference,
208
+ inputs=[user_prompt, guidance_scale, num_sampling_steps, seed, enable_cpu_offload],
209
+ outputs=video_output
 
 
 
 
 
 
 
 
210
  )
211
 
212
  # Launch the interface
213
+ demo.launch(show_error=True, show_api=False)