fffiloni commited on
Commit
2943808
1 Parent(s): 728053a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -7
app.py CHANGED
@@ -1,4 +1,9 @@
1
  from PIL import Image
 
 
 
 
 
2
  import gradio as gr
3
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
4
  import torch
@@ -20,7 +25,51 @@ pipe.enable_xformers_memory_efficient_attention()
20
  pipe.enable_model_cpu_offload()
21
  pipe.enable_attention_slicing()
22
 
23
- def infer(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  prompt,
25
  negative_prompt,
26
  conditioning_image,
@@ -57,12 +106,63 @@ def infer(
57
 
58
  return output_image
59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
60
  with gr.Blocks() as demo:
61
  gr.Markdown(
62
  """
63
- # ControlNet on Brightness
64
 
65
- This is a demo on ControlNet based on brightness.
66
  """)
67
 
68
  with gr.Row():
@@ -73,8 +173,10 @@ with gr.Blocks() as demo:
73
  negative_prompt = gr.Textbox(
74
  label="Negative Prompt",
75
  )
76
- conditioning_image = gr.Image(
77
- label="Conditioning Image",
 
 
78
  )
79
  with gr.Accordion('Advanced options', open=False):
80
  with gr.Row():
@@ -116,7 +218,7 @@ with gr.Blocks() as demo:
116
  submit_btn.click(
117
  fn=infer,
118
  inputs=[
119
- prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed
120
  ],
121
  outputs=output
122
  )
@@ -134,7 +236,7 @@ with gr.Blocks() as demo:
134
  ],
135
  outputs=output,
136
  fn=infer,
137
- cache_examples=True,
138
  )
139
  gr.Markdown(
140
  """
 
1
  from PIL import Image
2
+ import os
3
+ import cv2
4
+ import numpy as np
5
+ from PIL import Image
6
+ from moviepy.editor import *
7
  import gradio as gr
8
  from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
9
  import torch
 
25
  pipe.enable_model_cpu_offload()
26
  pipe.enable_attention_slicing()
27
 
28
+ def get_frames(video_in):
29
+ frames = []
30
+ #resize the video
31
+ clip = VideoFileClip(video_in)
32
+
33
+ #check fps
34
+ if clip.fps > 30:
35
+ print("vide rate is over 30, resetting to 30")
36
+ clip_resized = clip.resize(height=512)
37
+ clip_resized.write_videofile("video_resized.mp4", fps=30)
38
+ else:
39
+ print("video rate is OK")
40
+ clip_resized = clip.resize(height=512)
41
+ clip_resized.write_videofile("video_resized.mp4", fps=clip.fps)
42
+
43
+ print("video resized to 512 height")
44
+
45
+ # Opens the Video file with CV2
46
+ cap= cv2.VideoCapture("video_resized.mp4")
47
+
48
+ fps = cap.get(cv2.CAP_PROP_FPS)
49
+ print("video fps: " + str(fps))
50
+ i=0
51
+ while(cap.isOpened()):
52
+ ret, frame = cap.read()
53
+ if ret == False:
54
+ break
55
+ cv2.imwrite('kang'+str(i)+'.jpg',frame)
56
+ frames.append('kang'+str(i)+'.jpg')
57
+ i+=1
58
+
59
+ cap.release()
60
+ cv2.destroyAllWindows()
61
+ print("broke the video into frames")
62
+
63
+ return frames, fps
64
+
65
+ def create_video(frames, fps):
66
+ print("building video result")
67
+ clip = ImageSequenceClip(frames, fps=fps)
68
+ clip.write_videofile("_result.mp4", fps=fps)
69
+
70
+ return "_result.mp4"
71
+
72
+ def process_brightness(
73
  prompt,
74
  negative_prompt,
75
  conditioning_image,
 
106
 
107
  return output_image
108
 
109
+ def infer(video_in, prompt,
110
+ negative_prompt,
111
+ conditioning_image,
112
+ num_inference_steps=30,
113
+ size=768,
114
+ guidance_scale=7.0,
115
+ seed=1234
116
+ ):
117
+
118
+ # 1. break video into frames and get FPS
119
+ break_vid = get_frames(video_in)
120
+ frames_list= break_vid[0]
121
+ fps = break_vid[1]
122
+ #n_frame = int(trim_value*fps)
123
+ n_frame = len(frames_list)
124
+
125
+ if n_frame >= len(frames_list):
126
+ print("video is shorter than the cut value")
127
+ n_frame = len(frames_list)
128
+
129
+ # 2. prepare frames result arrays
130
+ result_frames = []
131
+ print("set stop frames to: " + str(n_frame))
132
+
133
+ for i, image in enumerate(frames_list[0:int(n_frame)]):
134
+
135
+ image = Image.open(image).convert("RGB")
136
+ image = np.array(image)
137
+ output_frame = process_brightness(image,
138
+ prompt,
139
+ negative_prompt,
140
+ conditioning_image,
141
+ num_inference_steps=30,
142
+ size=768,
143
+ guidance_scale=7.0,
144
+ seed=1234
145
+ )
146
+ print(output_frame)
147
+
148
+ image = Image.open(output_frame)
149
+ #image = Image.fromarray(output_frame[0])
150
+ image.save("_frame_" + str(i) + ".jpeg")
151
+ result_frames.append("_frame_" + str(i) + ".jpeg")
152
+ print("frame " + str(i) + "/" + str(n_frame) + ": done;")
153
+
154
+
155
+ final_vid = create_video(result_frames, fps)
156
+
157
+
158
+ return final_vid
159
+
160
  with gr.Blocks() as demo:
161
  gr.Markdown(
162
  """
163
+ # ControlNet on Brightness • Video
164
 
165
+ This is a demo on ControlNet based on brightness for video.
166
  """)
167
 
168
  with gr.Row():
 
173
  negative_prompt = gr.Textbox(
174
  label="Negative Prompt",
175
  )
176
+ video_in = gr.Video(
177
+ label="Conditioning Video",
178
+ source="upload",
179
+ type="filepath"
180
  )
181
  with gr.Accordion('Advanced options', open=False):
182
  with gr.Row():
 
218
  submit_btn.click(
219
  fn=infer,
220
  inputs=[
221
+ video_in, prompt, negative_prompt, conditioning_image, num_inference_steps, size, guidance_scale, seed
222
  ],
223
  outputs=output
224
  )
 
236
  ],
237
  outputs=output,
238
  fn=infer,
239
+ cache_examples=False,
240
  )
241
  gr.Markdown(
242
  """