vinesmsuic
commited on
Commit
•
dc014e0
1
Parent(s):
927e78e
update
Browse files- app.py +18 -15
- gradio_demo.py +18 -15
app.py
CHANGED
@@ -223,6 +223,15 @@ def perform_anyv2v(
|
|
223 |
return output_path
|
224 |
|
225 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, center_crop, x_offset, y_offset, longest_to_width):
|
227 |
def check_video(video_path):
|
228 |
with VideoFileClip(video_path) as clip:
|
@@ -231,14 +240,6 @@ def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, cen
|
|
231 |
else:
|
232 |
return False
|
233 |
|
234 |
-
def get_first_frame_as_pil(video_path):
|
235 |
-
with VideoFileClip(video_path) as clip:
|
236 |
-
# Extract the first frame (at t=0) as an array
|
237 |
-
first_frame_array = clip.get_frame(0)
|
238 |
-
# Convert the numpy array to a PIL Image
|
239 |
-
first_frame_image = Image.fromarray(first_frame_array)
|
240 |
-
return first_frame_image
|
241 |
-
|
242 |
if check_video(video_path) == False:
|
243 |
processed_video_path = crop_and_resize_video(input_video_path=video_path,
|
244 |
output_folder=TEMP_DIR,
|
@@ -251,11 +252,10 @@ def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, cen
|
|
251 |
x_offset=x_offset,
|
252 |
y_offset=y_offset,
|
253 |
longest_to_width=longest_to_width)
|
254 |
-
|
255 |
-
return processed_video_path
|
256 |
else:
|
257 |
-
|
258 |
-
return video_path, frame
|
259 |
|
260 |
def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg_prompt):
|
261 |
"""
|
@@ -268,7 +268,7 @@ def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg
|
|
268 |
ie_seed = int.from_bytes(os.urandom(2), "big")
|
269 |
print(f"Using seed: {ie_seed}")
|
270 |
|
271 |
-
edited_image_path = perform_edit(video_path=video_path,
|
272 |
prompt=instruct_prompt,
|
273 |
force_512=ie_force_512,
|
274 |
seed=ie_seed,
|
@@ -293,7 +293,7 @@ def btn_infer_fn(video_path,
|
|
293 |
seed = int.from_bytes(os.urandom(2), "big")
|
294 |
print(f"Using seed: {seed}")
|
295 |
|
296 |
-
result_video_path = perform_anyv2v(video_path=video_path,
|
297 |
video_prompt=video_prompt,
|
298 |
video_negative_prompt=video_negative_prompt,
|
299 |
edited_first_frame_path=edited_first_frame_path,
|
@@ -391,7 +391,7 @@ with gr.Blocks() as demo:
|
|
391 |
btn_pv.click(
|
392 |
btn_preprocess_video_fn,
|
393 |
inputs=[video_raw, pv_width, pv_height, pv_start_time, pv_end_time, pv_center_crop, pv_x_offset, pv_y_offset, pv_longest_to_width],
|
394 |
-
outputs=
|
395 |
)
|
396 |
|
397 |
btn_image_edit.click(
|
@@ -416,6 +416,9 @@ with gr.Blocks() as demo:
|
|
416 |
av_seed],
|
417 |
outputs=video_output
|
418 |
)
|
|
|
|
|
|
|
419 |
#=====================================
|
420 |
|
421 |
# Minimizing usage of GPU Resources
|
|
|
223 |
return output_path
|
224 |
|
225 |
|
226 |
+
|
227 |
+
def get_first_frame_as_pil(video_path):
|
228 |
+
with VideoFileClip(video_path) as clip:
|
229 |
+
# Extract the first frame (at t=0) as an array
|
230 |
+
first_frame_array = clip.get_frame(0)
|
231 |
+
# Convert the numpy array to a PIL Image
|
232 |
+
first_frame_image = Image.fromarray(first_frame_array)
|
233 |
+
return first_frame_image
|
234 |
+
|
235 |
def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, center_crop, x_offset, y_offset, longest_to_width):
|
236 |
def check_video(video_path):
|
237 |
with VideoFileClip(video_path) as clip:
|
|
|
240 |
else:
|
241 |
return False
|
242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
if check_video(video_path) == False:
|
244 |
processed_video_path = crop_and_resize_video(input_video_path=video_path,
|
245 |
output_folder=TEMP_DIR,
|
|
|
252 |
x_offset=x_offset,
|
253 |
y_offset=y_offset,
|
254 |
longest_to_width=longest_to_width)
|
255 |
+
|
256 |
+
return processed_video_path
|
257 |
else:
|
258 |
+
return video_path
|
|
|
259 |
|
260 |
def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg_prompt):
|
261 |
"""
|
|
|
268 |
ie_seed = int.from_bytes(os.urandom(2), "big")
|
269 |
print(f"Using seed: {ie_seed}")
|
270 |
|
271 |
+
edited_image_path = Image_Editor.perform_edit(video_path=video_path,
|
272 |
prompt=instruct_prompt,
|
273 |
force_512=ie_force_512,
|
274 |
seed=ie_seed,
|
|
|
293 |
seed = int.from_bytes(os.urandom(2), "big")
|
294 |
print(f"Using seed: {seed}")
|
295 |
|
296 |
+
result_video_path = AnyV2V_Editor.perform_anyv2v(video_path=video_path,
|
297 |
video_prompt=video_prompt,
|
298 |
video_negative_prompt=video_negative_prompt,
|
299 |
edited_first_frame_path=edited_first_frame_path,
|
|
|
391 |
btn_pv.click(
|
392 |
btn_preprocess_video_fn,
|
393 |
inputs=[video_raw, pv_width, pv_height, pv_start_time, pv_end_time, pv_center_crop, pv_x_offset, pv_y_offset, pv_longest_to_width],
|
394 |
+
outputs=video_input
|
395 |
)
|
396 |
|
397 |
btn_image_edit.click(
|
|
|
416 |
av_seed],
|
417 |
outputs=video_output
|
418 |
)
|
419 |
+
|
420 |
+
video_input.change(fn=get_first_frame_as_pil, inputs=video_input, outputs=src_first_frame)
|
421 |
+
|
422 |
#=====================================
|
423 |
|
424 |
# Minimizing usage of GPU Resources
|
gradio_demo.py
CHANGED
@@ -223,6 +223,15 @@ def perform_anyv2v(
|
|
223 |
return output_path
|
224 |
|
225 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
226 |
def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, center_crop, x_offset, y_offset, longest_to_width):
|
227 |
def check_video(video_path):
|
228 |
with VideoFileClip(video_path) as clip:
|
@@ -231,14 +240,6 @@ def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, cen
|
|
231 |
else:
|
232 |
return False
|
233 |
|
234 |
-
def get_first_frame_as_pil(video_path):
|
235 |
-
with VideoFileClip(video_path) as clip:
|
236 |
-
# Extract the first frame (at t=0) as an array
|
237 |
-
first_frame_array = clip.get_frame(0)
|
238 |
-
# Convert the numpy array to a PIL Image
|
239 |
-
first_frame_image = Image.fromarray(first_frame_array)
|
240 |
-
return first_frame_image
|
241 |
-
|
242 |
if check_video(video_path) == False:
|
243 |
processed_video_path = crop_and_resize_video(input_video_path=video_path,
|
244 |
output_folder=TEMP_DIR,
|
@@ -251,11 +252,10 @@ def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, cen
|
|
251 |
x_offset=x_offset,
|
252 |
y_offset=y_offset,
|
253 |
longest_to_width=longest_to_width)
|
254 |
-
|
255 |
-
return processed_video_path
|
256 |
else:
|
257 |
-
|
258 |
-
return video_path, frame
|
259 |
|
260 |
def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg_prompt):
|
261 |
"""
|
@@ -268,7 +268,7 @@ def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg
|
|
268 |
ie_seed = int.from_bytes(os.urandom(2), "big")
|
269 |
print(f"Using seed: {ie_seed}")
|
270 |
|
271 |
-
edited_image_path = perform_edit(video_path=video_path,
|
272 |
prompt=instruct_prompt,
|
273 |
force_512=ie_force_512,
|
274 |
seed=ie_seed,
|
@@ -293,7 +293,7 @@ def btn_infer_fn(video_path,
|
|
293 |
seed = int.from_bytes(os.urandom(2), "big")
|
294 |
print(f"Using seed: {seed}")
|
295 |
|
296 |
-
result_video_path = perform_anyv2v(video_path=video_path,
|
297 |
video_prompt=video_prompt,
|
298 |
video_negative_prompt=video_negative_prompt,
|
299 |
edited_first_frame_path=edited_first_frame_path,
|
@@ -391,7 +391,7 @@ with gr.Blocks() as demo:
|
|
391 |
btn_pv.click(
|
392 |
btn_preprocess_video_fn,
|
393 |
inputs=[video_raw, pv_width, pv_height, pv_start_time, pv_end_time, pv_center_crop, pv_x_offset, pv_y_offset, pv_longest_to_width],
|
394 |
-
outputs=
|
395 |
)
|
396 |
|
397 |
btn_image_edit.click(
|
@@ -416,6 +416,9 @@ with gr.Blocks() as demo:
|
|
416 |
av_seed],
|
417 |
outputs=video_output
|
418 |
)
|
|
|
|
|
|
|
419 |
#=====================================
|
420 |
|
421 |
# Minimizing usage of GPU Resources
|
|
|
223 |
return output_path
|
224 |
|
225 |
|
226 |
+
|
227 |
+
def get_first_frame_as_pil(video_path):
|
228 |
+
with VideoFileClip(video_path) as clip:
|
229 |
+
# Extract the first frame (at t=0) as an array
|
230 |
+
first_frame_array = clip.get_frame(0)
|
231 |
+
# Convert the numpy array to a PIL Image
|
232 |
+
first_frame_image = Image.fromarray(first_frame_array)
|
233 |
+
return first_frame_image
|
234 |
+
|
235 |
def btn_preprocess_video_fn(video_path, width, height, start_time, end_time, center_crop, x_offset, y_offset, longest_to_width):
|
236 |
def check_video(video_path):
|
237 |
with VideoFileClip(video_path) as clip:
|
|
|
240 |
else:
|
241 |
return False
|
242 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
243 |
if check_video(video_path) == False:
|
244 |
processed_video_path = crop_and_resize_video(input_video_path=video_path,
|
245 |
output_folder=TEMP_DIR,
|
|
|
252 |
x_offset=x_offset,
|
253 |
y_offset=y_offset,
|
254 |
longest_to_width=longest_to_width)
|
255 |
+
|
256 |
+
return processed_video_path
|
257 |
else:
|
258 |
+
return video_path
|
|
|
259 |
|
260 |
def btn_image_edit_fn(video_path, instruct_prompt, ie_force_512, ie_seed, ie_neg_prompt):
|
261 |
"""
|
|
|
268 |
ie_seed = int.from_bytes(os.urandom(2), "big")
|
269 |
print(f"Using seed: {ie_seed}")
|
270 |
|
271 |
+
edited_image_path = Image_Editor.perform_edit(video_path=video_path,
|
272 |
prompt=instruct_prompt,
|
273 |
force_512=ie_force_512,
|
274 |
seed=ie_seed,
|
|
|
293 |
seed = int.from_bytes(os.urandom(2), "big")
|
294 |
print(f"Using seed: {seed}")
|
295 |
|
296 |
+
result_video_path = AnyV2V_Editor.perform_anyv2v(video_path=video_path,
|
297 |
video_prompt=video_prompt,
|
298 |
video_negative_prompt=video_negative_prompt,
|
299 |
edited_first_frame_path=edited_first_frame_path,
|
|
|
391 |
btn_pv.click(
|
392 |
btn_preprocess_video_fn,
|
393 |
inputs=[video_raw, pv_width, pv_height, pv_start_time, pv_end_time, pv_center_crop, pv_x_offset, pv_y_offset, pv_longest_to_width],
|
394 |
+
outputs=video_input
|
395 |
)
|
396 |
|
397 |
btn_image_edit.click(
|
|
|
416 |
av_seed],
|
417 |
outputs=video_output
|
418 |
)
|
419 |
+
|
420 |
+
video_input.change(fn=get_first_frame_as_pil, inputs=video_input, outputs=src_first_frame)
|
421 |
+
|
422 |
#=====================================
|
423 |
|
424 |
# Minimizing usage of GPU Resources
|