spookyuser commited on
Commit
bc1258a
1 Parent(s): 13a5199
Files changed (2) hide show
  1. animate.py +22 -17
  2. app.py +10 -23
animate.py CHANGED
@@ -164,24 +164,25 @@ def resize(width, img):
164
  return img
165
 
166
 
167
- def resize_img(img1, img2):
168
- img_target_size = Image.open(img1)
169
  img_to_resize = resize_and_crop(
170
- img2,
171
  (
172
  img_target_size.size[0],
173
  img_target_size.size[1],
174
- ), # set width and height to match img1
175
  crop_origin="middle",
176
  )
177
- img_to_resize.save("resized_img2.png")
178
 
179
 
180
- def predict(frame1, frame2, times_to_interpolate, model_name):
181
- model = models[model_name]
 
182
 
183
- img1 = cv2.imread(frame1)
184
- img2 = cv2.imread(frame2)
185
 
186
  frame1 = resize(256, frame1)
187
  frame2 = resize(256, frame2)
@@ -190,26 +191,30 @@ def predict(frame1, frame2, times_to_interpolate, model_name):
190
  frame2.save("test2.png")
191
 
192
  resize_img("test1.png", "test2.png")
193
- input_frames = ["test1.png", "resized_img2.png"]
194
 
195
  frames = list(
196
  util.interpolate_recursively_from_files(
197
  input_frames, times_to_interpolate, model
198
  )
199
  )
 
200
 
201
- mediapy.write_video("out.mp4", frames, fps=5)
202
 
203
- print(f"TYPES....{type(img1)},{type(img2)} SHAPES{img1.shape} Img {img1}")
204
- clip1, clip3 = contourfinder(img1, img2) # has a third text option
 
 
 
205
 
206
  # Use open CV and moviepy code
207
  # So we move from open CV video 1 to out.mp4 to open CV video2
208
  clip1 = clip1
209
- clip2 = vfc("out.mp4").resize(8).set_start(clip1.duration - 0.5).crossfadein(2)
210
  clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
211
 
212
  new_clip = CompositeVideoClip([clip1, clip2, clip3])
213
- new_clip.write_videofile("out.mp4")
214
-
215
- return "out.mp4"
 
 
164
  return img
165
 
166
 
167
+ def resize_img(cv2_images[0], cv2_images[1]):
168
+ img_target_size = Image.open(cv2_images[0])
169
  img_to_resize = resize_and_crop(
170
+ cv2_images[1],
171
  (
172
  img_target_size.size[0],
173
  img_target_size.size[1],
174
+ ), # set width and height to match cv2_images[0]
175
  crop_origin="middle",
176
  )
177
+ img_to_resize.save("resized_cv2_images[1].png")
178
 
179
 
180
+ def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
181
+ frame1 = images[0]
182
+ frame2 = images[1]
183
 
184
+ model = models[model_name_index]
185
+ cv2_images = [cv2.imread(frame1), cv2.imread(frame2)]
186
 
187
  frame1 = resize(256, frame1)
188
  frame2 = resize(256, frame2)
 
191
  frame2.save("test2.png")
192
 
193
  resize_img("test1.png", "test2.png")
194
+ input_frames = ["test1.png", "resized_cv2_images[1].png"]
195
 
196
  frames = list(
197
  util.interpolate_recursively_from_files(
198
  input_frames, times_to_interpolate, model
199
  )
200
  )
201
+ return frames, cv2_images
202
 
 
203
 
204
+ def create_mp4_with_audio(frames, cv2_images, duration, audio, output_path):
205
+ temp_vid_path = "TEMP.mp4"
206
+ mediapy.write_video(temp_vid_path, frames, fps=5)
207
+ print(f"TYPES....{type(cv2_images[0])},{type(cv2_images[1])} SHAPES{cv2_images[0].shape} Img {cv2_images[0]}")
208
+ clip1, clip3 = contourfinder(cv2_images[0], cv2_images[1]) # has a third text option
209
 
210
  # Use open CV and moviepy code
211
  # So we move from open CV video 1 to out.mp4 to open CV video2
212
  clip1 = clip1
213
+ clip2 = vfc(temp_vid_path).resize(8).set_start(clip1.duration - 0.5).crossfadein(2)
214
  clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
215
 
216
  new_clip = CompositeVideoClip([clip1, clip2, clip3])
217
+ new_clip.set_audio(audio)
218
+ new_clip.set_duration(duration)
219
+ new_clip.write_videofile(output_path, audio_codec="aac")
220
+ return output_path
app.py CHANGED
@@ -4,8 +4,9 @@ import subprocess
4
  import uuid
5
  from pathlib import Path
6
 
7
- import gradio as gr
8
- from moviepy.editor import AudioFileClip, ImageClip
 
9
 
10
  output_dir = Path("temp/").absolute()
11
  output_dir.mkdir(exist_ok=True, parents=True)
@@ -14,8 +15,6 @@ os.chdir(
14
  output_dir
15
  ) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
16
 
17
- # TODO: Add an if statement that checks if a gpu is available, if one is then do weird stable diffusion stuff, if one isn't, then just use the regular hugging face api
18
-
19
 
20
  class SpotifyApi:
21
  spotify_directory = Path("spotify")
@@ -81,38 +80,26 @@ def process_inputs(
81
  return video
82
 
83
 
84
- def add_static_image_to_audio(image, audio_input) -> str:
85
- """Create and save a video file to `output_path` after
86
- combining a static image that is located in `image_path`
87
- with an audio file in `audio_path`"""
88
  # Generate a random folder name and change directories to there
89
  foldername = str(uuid.uuid4())[:8]
90
  vid_output_dir = Path(output_dir / foldername)
91
  vid_output_dir.mkdir(exist_ok=True, parents=True)
92
  audio_clip = AudioFileClip(audio_input.path)
93
- # Make the audio clip start at the specified time and set the duration to the specified duration
94
  audio_clip = audio_clip.subclip(
95
  audio_input.start_time, audio_input.start_time + audio_input.run_for
96
  )
97
- image_clip = ImageClip(image)
98
- video_clip = image_clip.set_audio(audio_clip)
99
- video_clip.duration = (
100
- audio_clip.duration
101
- ) # The duration here is the cut duration from above
102
- video_clip.fps = 1
103
  path = Path(vid_output_dir / "output.mp4").as_posix()
104
- video_clip.write_videofile(path, audio_codec="aac")
105
- return path
106
-
107
- def add_openv_animation_to_audio(image_paths: list[str], audio_path: str) -> str:
108
-
109
-
110
 
111
 
112
  def get_stable_diffusion_image(prompt) -> str:
113
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
114
  gallery_dir = stable_diffusion(prompt, fn_index=2)
115
- return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)][0]
116
 
117
 
118
  iface = gr.Interface(
@@ -128,4 +115,4 @@ iface = gr.Interface(
128
  )
129
 
130
 
131
- iface.launch()
 
4
  import uuid
5
  from pathlib import Path
6
 
7
+ from moviepy.editor import AudioFileClip
8
+
9
+ from animate import create_mp4_with_audio, get_video_frames
10
 
11
  output_dir = Path("temp/").absolute()
12
  output_dir.mkdir(exist_ok=True, parents=True)
 
15
  output_dir
16
  ) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
17
 
 
 
18
 
19
  class SpotifyApi:
20
  spotify_directory = Path("spotify")
 
80
  return video
81
 
82
 
83
+ def animate_images(image_paths: list[str], audio_input: str) -> str:
 
 
 
84
  # Generate a random folder name and change directories to there
85
  foldername = str(uuid.uuid4())[:8]
86
  vid_output_dir = Path(output_dir / foldername)
87
  vid_output_dir.mkdir(exist_ok=True, parents=True)
88
  audio_clip = AudioFileClip(audio_input.path)
 
89
  audio_clip = audio_clip.subclip(
90
  audio_input.start_time, audio_input.start_time + audio_input.run_for
91
  )
92
+ video_frames, cv2_images = get_video_frames(image_paths)
 
 
 
 
 
93
  path = Path(vid_output_dir / "output.mp4").as_posix()
94
+ return create_mp4_with_audio(
95
+ video_frames, cv2_images, audio_clip.duration, audio_clip, path
96
+ )
 
 
 
97
 
98
 
99
  def get_stable_diffusion_image(prompt) -> str:
100
  stable_diffusion = gr.Blocks.load(name="spaces/stabilityai/stable-diffusion")
101
  gallery_dir = stable_diffusion(prompt, fn_index=2)
102
+ return [os.path.join(gallery_dir, img) for img in os.listdir(gallery_dir)][:2]
103
 
104
 
105
  iface = gr.Interface(
 
115
  )
116
 
117
 
118
+ iface.launch()