JoPmt commited on
Commit
cf99997
1 Parent(s): 323d2e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -133
app.py CHANGED
@@ -1,10 +1,4 @@
1
- import os, json, re, sys, subprocess, gc, tqdm
2
- import math
3
- import time
4
- import numpy
5
- import random
6
- import spaces
7
- import threading
8
  import gradio as gr
9
  from PIL import Image, ImageOps
10
  from moviepy import VideoFileClip
@@ -16,13 +10,10 @@ import insightface
16
  from insightface.app import FaceAnalysis
17
  from facexlib.parsing import init_parsing_model
18
  from facexlib.utils.face_restoration_helper import FaceRestoreHelper
19
-
20
- import torch
21
  from diffusers import CogVideoXDPMScheduler
22
  from diffusers.utils import load_image
23
  from diffusers.image_processor import VaeImageProcessor
24
  from diffusers.training_utils import free_memory
25
-
26
  from util.utils import *
27
  from util.rife_model import load_rife_model, rife_inference_with_latents
28
  from models.utils import process_face_embeddings
@@ -94,7 +85,6 @@ face_clip_model.to(device, dtype=dtype)
94
  face_helper.face_det.to(device)
95
  face_helper.face_parse.to(device)
96
  transformer.to(device, dtype=dtype)
97
- ##free_memory()
98
 
99
  pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)
100
  # If you're using with lora, add this code
@@ -118,74 +108,34 @@ os.makedirs("./gradio_tmp", exist_ok=True)
118
  upscale_model = load_sd_upscale("model_real_esran/RealESRGAN_x4.pth", device)
119
  frame_interpolation_model = load_rife_model("model_rife")
120
 
121
-
122
  def convert_to_gif(video_path):
123
  clip = VideoFileClip(video_path)
124
  gif_path = video_path.replace(".mp4", ".gif")
125
  clip.write_gif(gif_path, fps=8)
126
  return gif_path
127
 
128
-
129
- def delete_old_files():
130
- while True:
131
- now = datetime.now()
132
- cutoff = now - timedelta(minutes=10)
133
- directories = ["./output", "./gradio_tmp"]
134
-
135
- for directory in directories:
136
- for filename in os.listdir(directory):
137
- file_path = os.path.join(directory, filename)
138
- if os.path.isfile(file_path):
139
- file_mtime = datetime.fromtimestamp(os.path.getmtime(file_path))
140
- if file_mtime < cutoff:
141
- os.remove(file_path)
142
- time.sleep(600)
143
-
144
  @spaces.GPU(duration=180)
145
  def plex(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)):
146
  seed = seed_value
147
  if seed == -1:
148
  seed = random.randint(0, 2**8 - 1)
149
-
150
  id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB"))
151
  id_image = resize_numpy_image_long(id_image, 1024)
152
- id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante,
153
- eva_transform_mean, eva_transform_std,
154
- face_main_model, device, dtype, id_image,
155
- original_id_image=id_image, is_align_face=True,
156
- cal_uncond=False)
157
  if is_kps:
158
  kps_cond = face_kps
159
  else:
160
  kps_cond = None
161
-
162
  tensor = align_crop_face_image.cpu().detach()
163
  tensor = tensor.squeeze()
164
  tensor = tensor.permute(1, 2, 0)
165
  tensor = tensor.numpy() * 255
166
  tensor = tensor.astype(np.uint8)
167
  image = ImageOps.exif_transpose(Image.fromarray(tensor))
168
-
169
  prompt = prompt.strip('"')
170
-
171
  generator = torch.Generator(device).manual_seed(seed) if seed else None
172
-
173
- video_pt = pipe(
174
- prompt=prompt,
175
- image=image,
176
- num_videos_per_prompt=1,
177
- num_inference_steps=10,
178
- num_frames=49,
179
- use_dynamic_cfg=False,
180
- guidance_scale=7.0,
181
- generator=generator,
182
- id_vit_hidden=id_vit_hidden,
183
- id_cond=id_cond,
184
- kps_cond=kps_cond,
185
- output_type="pt",
186
- ).frames
187
-
188
- latents = video_pt
189
  ##free_memory()
190
  if scale_status:
191
  latents = upscale_batch_and_concatenate(upscale_model, latents, device)
@@ -207,12 +157,9 @@ def plex(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Prog
207
  gif_path = convert_to_gif(video_path)
208
  gif_update = gr.update(visible=True, value=gif_path)
209
  seed_update = gr.update(visible=True, value=seed)
210
-
211
  return video_path, video_update, gif_update, seed_update
212
 
213
- ##return video_pt, seed
214
- ##threading.Thread(target=delete_old_files, daemon=True).start()
215
-
216
  examples_images = [
217
  ["asserts/example_images/1.png", "A woman adorned with a delicate flower crown, is standing amidst a field of gently swaying wildflowers. Her eyes sparkle with a serene gaze, and a faint smile graces her lips, suggesting a moment of peaceful contentment. The shot is framed from the waist up, highlighting the gentle breeze lightly tousling her hair. The background reveals an expansive meadow under a bright blue sky, capturing the tranquility of a sunny afternoon."],
218
  ["asserts/example_images/2.png", "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel."],
@@ -246,12 +193,6 @@ with gr.Blocks() as demo:
246
  with gr.Accordion("IPT2V: Face Input", open=True):
247
  image_input = gr.Image(label="Input Image (should contain clear face)")
248
  prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
249
- with gr.Accordion("Examples", open=False):
250
- examples_component_images = gr.Examples(
251
- examples_images,
252
- inputs=[image_input, prompt],
253
- cache_examples=False,
254
- )
255
 
256
  with gr.Group():
257
  with gr.Column():
@@ -261,7 +202,7 @@ with gr.Blocks() as demo:
261
  )
262
  with gr.Row():
263
  enable_scale = gr.Checkbox(label="Super-Resolution (720 × 480 -> 2880 × 1920)", value=False)
264
- enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps)", value=False)
265
  gr.Markdown(
266
  "✨In this demo, we use [RIFE](https://github.com/hzwer/ECCV2022-RIFE) for frame interpolation and [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) for upscaling(Super-Resolution)."
267
  )
@@ -269,77 +210,17 @@ with gr.Blocks() as demo:
269
  generate_button = gr.Button("🎬 Generate Video")
270
 
271
  with gr.Column():
272
- video_output = gr.Video(label="ConsisID Generate Video", width=720, height=480)
273
  with gr.Row():
274
  download_video_button = gr.File(label="📥 Download Video", visible=False)
275
  download_gif_button = gr.File(label="📥 Download GIF", visible=False)
276
  seed_text = gr.Number(label="Seed Used for Video Generation", visible=False)
277
-
278
- gr.Markdown("""
279
- <table border="0" style="width: 100%; text-align: left; margin-top: 20px;">
280
- <div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
281
- 🎥 Video Gallery
282
- </div>
283
- <tr>
284
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
285
- <p>The video features a woman in exquisite hybrid armor adorned with iridescent gemstones, standing amidst gently falling cherry blossoms. Her piercing yet serene gaze hints at quiet determination, as a breeze catches a loose strand of her hair. She stands in a tranquil courtyard framed by moss-covered stone walls and wooden arches, with blossoms casting soft shadows on the ground. The petals swirl around her, adding a dreamlike quality, while the blurred backdrop emphasizes her poised figure. The scene conveys elegance, strength, and tranquil readiness, capturing a moment of peace before an upcoming challenge.</p>
286
- </td>
287
- <td style="width: 25%; vertical-align: top;">
288
- <video src="https://github.com/user-attachments/assets/97fa0710-4f14-4a6d-b6f7-f3a2e9f7486e" width="100%" controls autoplay loop></video>
289
- </td>
290
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
291
- <p>The video features a baby wearing a bright superhero cape, standing confidently with arms raised in a powerful pose. The baby has a determined look on their face, with eyes wide and lips pursed in concentration, as if ready to take on a challenge. The setting appears playful, with colorful toys scattered around and a soft rug underfoot, while sunlight streams through a nearby window, highlighting the fluttering cape and adding to the impression of heroism. The overall atmosphere is lighthearted and fun, with the baby's expressions capturing a mix of innocence and an adorable attempt at bravery, as if truly ready to save the day.</p>
292
- </td>
293
- <td style="width: 25%; vertical-align: top;">
294
- <video src="https://github.com/user-attachments/assets/90b547a3-247c-4bb0-abae-ba53483b7b6e" width="100%" controls autoplay loop></video>
295
- </td>
296
- </tr>
297
- <tr>
298
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
299
- <p>The video features a man standing next to an airplane, engaged in a conversation on his cell phone. he is wearing sunglasses and a black top, and he appears to be talking seriously. The airplane has a green stripe running along its side, and there is a large engine visible behind his. The man seems to be standing near the entrance of the airplane, possibly preparing to board or just having disembarked. The setting suggests that he might be at an airport or a private airfield. The overall atmosphere of the video is professional and focused, with the man's attire and the presence of the airplane indicating a business or travel context.</p>
300
- </td>
301
- <td style="width: 25%; vertical-align: top;">
302
- <video src="https://github.com/user-attachments/assets/55680c58-de86-48b4-8d86-e9906a3185c3" width="100%" controls autoplay loop></video>
303
- </td>
304
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
305
- <p>The video features a woman with blonde hair standing on a beach near the water's edge. She is wearing a black swimsuit and appears to be enjoying her time by the sea. The sky above is clear with some clouds, and the ocean waves gently lap against the shore. The woman seems to be holding something white in her hand, possibly a piece of driftwood or a small object found on the beach. The overall atmosphere of the video is serene and relaxing, capturing the beauty of nature and the simple pleasure of being by the ocean.</p>
306
- </td>
307
- <td style="width: 25%; vertical-align: top;">
308
- <video src="https://github.com/user-attachments/assets/8d06e702-f80e-4cb2-abc2-b0f519ec3f11" width="100%" controls autoplay loop></video>
309
- </td>
310
- </tr>
311
- <tr>
312
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
313
- <p>The video features a man sitting in a red armchair, enjoying a cup of coffee or tea. he is dressed in a light-colored outfit and has long dark-haired hair. The setting appears to be indoors, with large windows providing a view of a misty or foggy coastal landscape outside. The room has a modern design with geometric structures visible in the background. There is a small round table next to the armchair, also holding a cup. The overall atmosphere suggests a calm and serene moment, possibly during a cold or rainy day by the sea.</p>
314
- </td>
315
- <td style="width: 25%; vertical-align: top;">
316
- <video src="https://github.com/user-attachments/assets/ab9c655e-84c2-47ed-85d9-039a7f64adfe" width="100%" controls autoplay loop></video>
317
- </td>
318
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
319
- <p>The video shows a young boy sitting at a table, eating a piece of food. He appears to be enjoying his meal, as he takes a bite and chews it. The boy is wearing a blue shirt and has short hair. The background is dark, with some light coming from the left side of the frame. There is a straw visible on the right side of the frame, suggesting that there may be a drink next to the boy's plate. The overall atmosphere of the video seems casual and relaxed, with the focus on the boy's enjoyment of his food.</p>
320
- </td>
321
- <td style="width: 25%; vertical-align: top;">
322
- <video src="https://github.com/user-attachments/assets/8014b02e-e1c4-4df7-b7f3-cebfb01fa373" width="100%" controls autoplay loop></video>
323
- </td>
324
- </tr>
325
- <tr>
326
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
327
- <p>The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel.</p>
328
- </td>
329
- <td style="width: 25%; vertical-align: top;">
330
- <video src="https://github.com/user-attachments/assets/e4bc3169-d3d4-46e2-a667-8b456ead9465" width="100%" controls autoplay loop></video>
331
- </td>
332
- <td style="width: 25%; vertical-align: top; font-size: 0.9em;">
333
- <p>The video features a young man standing outdoors in a snowy park. he is wearing a colorful winter jacket with a floral pattern and a white knit hat. The background shows a snowy landscape with trees, benches, and a metal fence. The ground is covered in snow, and there is a light snowfall in the air. The man appears to be enjoying the winter weather, as he smiles and gives a thumbs-up gesture towards the camera. The overall atmosphere of the video is cheerful and festive, capturing the beauty of a snowy day in a park.</p>
334
- </td>
335
- <td style="width: 25%; vertical-align: top;">
336
- <video src="https://github.com/user-attachments/assets/e4e3e519-95d4-44e0-afa7-9a833f99e090" width="100%" controls autoplay loop></video>
337
- </td>
338
- </tr>
339
- </table>
340
- """)
341
-
342
-
343
  generate_button.click(
344
  fn=plex,
345
  inputs=[prompt, image_input, seed_param, enable_scale, enable_rife],
@@ -347,4 +228,4 @@ with gr.Blocks() as demo:
347
  )
348
 
349
  demo.queue(max_size=15)
350
- demo.launch(debug=True)
 
1
+ import os, json, re, sys, subprocess, gc, tqdm, math, time, random, threading, spaces, numpy, torch
 
 
 
 
 
 
2
  import gradio as gr
3
  from PIL import Image, ImageOps
4
  from moviepy import VideoFileClip
 
10
  from insightface.app import FaceAnalysis
11
  from facexlib.parsing import init_parsing_model
12
  from facexlib.utils.face_restoration_helper import FaceRestoreHelper
 
 
13
  from diffusers import CogVideoXDPMScheduler
14
  from diffusers.utils import load_image
15
  from diffusers.image_processor import VaeImageProcessor
16
  from diffusers.training_utils import free_memory
 
17
  from util.utils import *
18
  from util.rife_model import load_rife_model, rife_inference_with_latents
19
  from models.utils import process_face_embeddings
 
85
  face_helper.face_det.to(device)
86
  face_helper.face_parse.to(device)
87
  transformer.to(device, dtype=dtype)
 
88
 
89
  pipe = ConsisIDPipeline.from_pretrained(model_path, transformer=transformer, scheduler=scheduler, torch_dtype=dtype)
90
  # If you're using with lora, add this code
 
108
  upscale_model = load_sd_upscale("model_real_esran/RealESRGAN_x4.pth", device)
109
  frame_interpolation_model = load_rife_model("model_rife")
110
 
 
111
  def convert_to_gif(video_path):
112
  clip = VideoFileClip(video_path)
113
  gif_path = video_path.replace(".mp4", ".gif")
114
  clip.write_gif(gif_path, fps=8)
115
  return gif_path
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  @spaces.GPU(duration=180)
118
  def plex(prompt,image_input,seed_value,scale_status,rife_status,progress=gr.Progress(track_tqdm=True)):
119
  seed = seed_value
120
  if seed == -1:
121
  seed = random.randint(0, 2**8 - 1)
 
122
  id_image = np.array(ImageOps.exif_transpose(Image.fromarray(image_input)).convert("RGB"))
123
  id_image = resize_numpy_image_long(id_image, 1024)
124
+ id_cond, id_vit_hidden, align_crop_face_image, face_kps = process_face_embeddings(face_helper, face_clip_model, handler_ante, eva_transform_mean, eva_transform_std, face_main_model, device, dtype, id_image, original_id_image=id_image, is_align_face=True, cal_uncond=False)
 
 
 
 
125
  if is_kps:
126
  kps_cond = face_kps
127
  else:
128
  kps_cond = None
 
129
  tensor = align_crop_face_image.cpu().detach()
130
  tensor = tensor.squeeze()
131
  tensor = tensor.permute(1, 2, 0)
132
  tensor = tensor.numpy() * 255
133
  tensor = tensor.astype(np.uint8)
134
  image = ImageOps.exif_transpose(Image.fromarray(tensor))
 
135
  prompt = prompt.strip('"')
 
136
  generator = torch.Generator(device).manual_seed(seed) if seed else None
137
+ video_pt = pipe(prompt=prompt,image=image,num_videos_per_prompt=1,num_inference_steps=10,num_frames=49,use_dynamic_cfg=False,guidance_scale=7.0,generator=generator,id_vit_hidden=id_vit_hidden,id_cond=id_cond,kps_cond=kps_cond,output_type="pt",)
138
+ latents = video_pt.frames
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  ##free_memory()
140
  if scale_status:
141
  latents = upscale_batch_and_concatenate(upscale_model, latents, device)
 
157
  gif_path = convert_to_gif(video_path)
158
  gif_update = gr.update(visible=True, value=gif_path)
159
  seed_update = gr.update(visible=True, value=seed)
160
+ gc.collect()
161
  return video_path, video_update, gif_update, seed_update
162
 
 
 
 
163
  examples_images = [
164
  ["asserts/example_images/1.png", "A woman adorned with a delicate flower crown, is standing amidst a field of gently swaying wildflowers. Her eyes sparkle with a serene gaze, and a faint smile graces her lips, suggesting a moment of peaceful contentment. The shot is framed from the waist up, highlighting the gentle breeze lightly tousling her hair. The background reveals an expansive meadow under a bright blue sky, capturing the tranquility of a sunny afternoon."],
165
  ["asserts/example_images/2.png", "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel."],
 
193
  with gr.Accordion("IPT2V: Face Input", open=True):
194
  image_input = gr.Image(label="Input Image (should contain clear face)")
195
  prompt = gr.Textbox(label="Prompt (Less than 200 Words)", placeholder="Enter your prompt here", lines=5)
 
 
 
 
 
 
196
 
197
  with gr.Group():
198
  with gr.Column():
 
202
  )
203
  with gr.Row():
204
  enable_scale = gr.Checkbox(label="Super-Resolution (720 × 480 -> 2880 × 1920)", value=False)
205
+ enable_rife = gr.Checkbox(label="Frame Interpolation (8fps -> 16fps)", value=True)
206
  gr.Markdown(
207
  "✨In this demo, we use [RIFE](https://github.com/hzwer/ECCV2022-RIFE) for frame interpolation and [Real-ESRGAN](https://github.com/xinntao/Real-ESRGAN) for upscaling(Super-Resolution)."
208
  )
 
210
  generate_button = gr.Button("🎬 Generate Video")
211
 
212
  with gr.Column():
213
+ video_output = gr.Video(label="ConsisID Generate Video",)
214
  with gr.Row():
215
  download_video_button = gr.File(label="📥 Download Video", visible=False)
216
  download_gif_button = gr.File(label="📥 Download GIF", visible=False)
217
  seed_text = gr.Number(label="Seed Used for Video Generation", visible=False)
218
+ with gr.Accordion("Examples", open=False):
219
+ examples_component_images = gr.Examples(
220
+ examples_images,
221
+ inputs=[image_input, prompt],
222
+ cache_examples=False,
223
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  generate_button.click(
225
  fn=plex,
226
  inputs=[prompt, image_input, seed_param, enable_scale, enable_rife],
 
228
  )
229
 
230
  demo.queue(max_size=15)
231
+ demo.launch(debug=True,inline=False,show_api=False,share=False)