fffiloni commited on
Commit
39091be
0 Parent(s):

Duplicate from fffiloni/video2openpose

Browse files
Files changed (4) hide show
  1. .gitattributes +34 -0
  2. README.md +13 -0
  3. app.py +135 -0
  4. requirements.txt +5 -0
.gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Video To Canny Edge
3
+ emoji: 🏢
4
+ colorFrom: yellow
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.23.0
8
+ app_file: app.py
9
+ pinned: false
10
+ duplicated_from: fffiloni/video2openpose
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from controlnet_aux import OpenposeDetector
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
+ from moviepy.editor import *
8
+
9
+ openpose = OpenposeDetector.from_pretrained('lllyasviel/ControlNet')
10
+
11
+ def get_frames(video_in):
12
+ frames = []
13
+ #resize the video
14
+ clip = VideoFileClip(video_in)
15
+
16
+ #check fps
17
+ if clip.fps > 30:
18
+ print("vide rate is over 30, resetting to 30")
19
+ clip_resized = clip.resize(height=512)
20
+ clip_resized.write_videofile("video_resized.mp4", fps=30)
21
+ else:
22
+ print("video rate is OK")
23
+ clip_resized = clip.resize(height=512)
24
+ clip_resized.write_videofile("video_resized.mp4", fps=clip.fps)
25
+
26
+ print("video resized to 512 height")
27
+
28
+ # Opens the Video file with CV2
29
+ cap= cv2.VideoCapture("video_resized.mp4")
30
+
31
+ fps = cap.get(cv2.CAP_PROP_FPS)
32
+ print("video fps: " + str(fps))
33
+ i=0
34
+ while(cap.isOpened()):
35
+ ret, frame = cap.read()
36
+ if ret == False:
37
+ break
38
+ cv2.imwrite('kang'+str(i)+'.jpg',frame)
39
+ frames.append('kang'+str(i)+'.jpg')
40
+ i+=1
41
+
42
+ cap.release()
43
+ cv2.destroyAllWindows()
44
+ print("broke the video into frames")
45
+
46
+ return frames, fps
47
+
48
+ def get_canny_filter(i):
49
+ image = Image.open(i)
50
+
51
+ image = np.array(image)
52
+
53
+ image = openpose(image)
54
+ image = Image.fromarray(image)
55
+ image.save("canny_frame_" + str(i) + ".jpeg")
56
+ return "canny_frame_" + str(i) + ".jpeg"
57
+
58
+ def create_video(frames, fps, type):
59
+ print("building video result")
60
+ clip = ImageSequenceClip(frames, fps=fps)
61
+ clip.write_videofile(type + "_result.mp4", fps=fps)
62
+
63
+ return type + "_result.mp4"
64
+
65
+ def convertG2V(imported_gif):
66
+ clip = VideoFileClip(imported_gif.name)
67
+ clip.write_videofile("my_gif_video.mp4")
68
+ return "my_gif_video.mp4"
69
+
70
+ def infer(video_in):
71
+
72
+
73
+ # 1. break video into frames and get FPS
74
+ break_vid = get_frames(video_in)
75
+ frames_list= break_vid[0]
76
+ fps = break_vid[1]
77
+ #n_frame = int(trim_value*fps)
78
+ n_frame = len(frames_list)
79
+
80
+ if n_frame >= len(frames_list):
81
+ print("video is shorter than the cut value")
82
+ n_frame = len(frames_list)
83
+
84
+ # 2. prepare frames result arrays
85
+ result_frames = []
86
+ print("set stop frames to: " + str(n_frame))
87
+
88
+ for i in frames_list[0:int(n_frame)]:
89
+ canny_frame = get_canny_filter(i)
90
+ result_frames.append(canny_frame)
91
+ print("frame " + i + "/" + str(n_frame) + ": done;")
92
+
93
+
94
+ final_vid = create_video(result_frames, fps, "canny")
95
+
96
+ files = [final_vid]
97
+
98
+ return final_vid, files
99
+
100
+ title="""
101
+ <div style="text-align: center; max-width: 500px; margin: 0 auto;">
102
+ <div
103
+ style="
104
+ display: inline-flex;
105
+ align-items: center;
106
+ gap: 0.8rem;
107
+ font-size: 1.75rem;
108
+ margin-bottom: 10px;
109
+ "
110
+ >
111
+ <h1 style="font-weight: 600; margin-bottom: 7px;">
112
+ Video to Canny Edge
113
+ </h1>
114
+ </div>
115
+
116
+ </div>
117
+ """
118
+
119
+ with gr.Blocks() as demo:
120
+ with gr.Column():
121
+ gr.HTML(title)
122
+ with gr.Row():
123
+ with gr.Column():
124
+ video_input = gr.Video(source="upload", type="filepath")
125
+ gif_input = gr.File(label="import a GIF instead", file_types=['.gif'])
126
+ gif_input.change(fn=convertG2V, inputs=gif_input, outputs=video_input)
127
+ submit_btn = gr.Button("Submit")
128
+
129
+ with gr.Column():
130
+ video_output = gr.Video()
131
+ file_output = gr.Files()
132
+
133
+ submit_btn.click(fn=infer, inputs=[video_input], outputs=[video_output, file_output])
134
+
135
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ diffusers==0.14.0
2
+ opencv-python
3
+ ffmpeg-python
4
+ moviepy
5
+ controlnet_aux