Spaces:
Running
Running
import gradio as gr | |
import yt_dlp | |
import os | |
#import json | |
import numpy as np | |
import cv2 | |
import uuid | |
import moviepy | |
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip | |
import moviepy.video.io.ImageSequenceClip | |
from moviepy.editor import * | |
from PIL import Image | |
uid=uuid.uuid4() | |
if not os.path.exists(f'{uid}'): os.makedirs(f'{uid}') | |
if not os.path.exists(f'{uid}-frames'): os.makedirs(f'{uid}-frames') | |
if not os.path.exists(f'{uid}-rembg'): os.makedirs(f'{uid}-rembg') | |
esr = gr.Interface.load("spaces/Omnibus/Real-ESRGAN-mod") | |
main_url= "https://omnibus-Deblur-Media.hf.space" | |
load_js = """ | |
function(text_input, url_params) { | |
console.log(text_input, url_params); | |
const params = new URLSearchParams(window.location.search); | |
url_params = Object.fromEntries(params); | |
return [text_input, url_params] | |
} | |
""" | |
def load_video(vid): | |
new_video_in = str(vid) | |
capture = cv2.VideoCapture(new_video_in) | |
fps = capture.get(cv2.CAP_PROP_FPS) | |
msc = capture.get(cv2.CAP_PROP_POS_MSEC) | |
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) | |
msc = float(frame_count/fps) | |
capture.release() | |
vid_t = round(msc) | |
hours = int(vid_t/360) | |
minutes = int(vid_t/60)-(hours*360) | |
seconds = vid_t-(minutes*60)-(hours*360) | |
vid_len = f'{hours}:{minutes}:{seconds}' | |
return frame_count, fps, vid_len, vid | |
def im_2_vid(images,fps): | |
this_im=cv2.imread(images[0]) | |
height=this_im.shape[0] | |
width= this_im.shape[1] | |
size = (width, height) | |
movie_clip = moviepy.video.io.ImageSequenceClip.ImageSequenceClip(images, fps) | |
movie_clip.write_videofile(f'{uid}-rembg/bg_removed-{uid}.mp4') | |
return (f'{uid}-rembg/bg_removed-{uid}.mp4', f'{uid}-rembg/bg_removed-{uid}.mp4') | |
def predict(text, url_params): | |
mod_url="" | |
mod=gr.HTML("") | |
out = None | |
valid=gr.update(visible=False) | |
mod_url = url_params.get('url') | |
return ["" + text + "", mod_url] | |
def dl(inp,img): | |
fps="Error" | |
out = None | |
out_file=[] | |
if img == None and inp !="": | |
try: | |
inp_out=inp.replace("https://","") | |
inp_out=inp_out.replace("/","_").replace(".","_").replace("=","_").replace("?","_") | |
os.system(f'yt-dlp "{inp}" --trim-filenames 160 -o "{uid}/{inp_out}.mp4" -S res,mp4 --recode mp4') | |
out = f"{uid}/{inp_out}.mp4" | |
capture = cv2.VideoCapture(out) | |
fps = capture.get(cv2.CAP_PROP_FPS) | |
capture.release() | |
except Exception as e: | |
print(e) | |
out = None | |
elif img !=None and inp == "": | |
capture = cv2.VideoCapture(img) | |
fps = capture.get(cv2.CAP_PROP_FPS) | |
capture.release() | |
out = f"{img}" | |
return out,out,out,out,fps | |
def trim_vid(vid,start_time,end_time): | |
start_hr=float(start_time.split(":",2)[0])*360 | |
start_min=int(start_time.split(":",2)[1])*60 | |
start_sec=int(start_time.split(":",2)[2]) | |
end_hr=int(end_time.split(":",2)[0])*360 | |
end_min=int(end_time.split(":",2)[1])*60 | |
end_sec=float(end_time.split(":",2)[2]) | |
start=start_hr+start_min+start_sec | |
end=end_hr+end_min+end_sec | |
ffmpeg_extract_subclip(vid, start, end, targetname=f"{uid}-clip.mp4") | |
out= f"{uid}-clip.mp4" | |
capture = cv2.VideoCapture(out) | |
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) | |
capture.release() | |
return out,frame_count | |
def video_clip(program,fps,qual,model_name,denoise_strength,face_enhance,outscale): | |
_=None | |
fps=float(fps) | |
new_video_in = str(f"{uid}-clip.mp4") | |
capture = cv2.VideoCapture(new_video_in) | |
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) | |
fbox=[] | |
out_box=[] | |
cnt=0 | |
frame_count1= int(frame_count) | |
for i in range(int(frame_count1)-1): | |
capture.set(cv2.CAP_PROP_POS_FRAMES, i) | |
ret, frame_f = capture.read(i) | |
#frame_ff = cv2.cvtColor(frame_f, cv2.COLOR_BGR2RGB) | |
cv2.imwrite(f'{uid}-frames/{i+1}.png',frame_f) | |
fbox.append(f'{uid}-frames/{i+1}.png') | |
mes=f'Working on {i+1} of {frame_count1}' | |
yield _,_,mes | |
yield _,_,"Frames Complete" | |
if qual: | |
mes = "Improving Quality" | |
yield _,_,mes | |
if qual: | |
for i,img in enumerate(fbox): | |
#print (img) | |
out = os.path.abspath(img) | |
out_url = f'{main_url}/file={out}' | |
mes=f'Improving frame {i+1} of {frame_count}' | |
yield _,_,mes | |
out = esr(out_url, model_name, float(denoise_strength), face_enhance, int(outscale)) | |
#yield _,_,mes | |
#cv2.imwrite(f'{img}',out) | |
out_box.append(f'{out}') | |
print (f'{main_url}/file={out}') | |
#print (f'out::{out}') | |
print (f'out_box::{out_box}') | |
clip = ImageSequenceClip(out_box, fps = fps) | |
else: | |
mes="Writing Video" | |
clip = ImageSequenceClip(fbox, fps = fps) | |
yield _,_,"Writing Video" | |
videoclip = VideoFileClip(new_video_in) | |
audioclip=videoclip.audio | |
clip.audio=audioclip | |
clip.write_videofile(f"{uid}/enhanced_vid.mp4") | |
yield _,_,"Saving Video" | |
out = f"{uid}/enhanced_vid.mp4" | |
yield out,out,"Video Complete" | |
def gif_clip(program,fps,qual,model_name,denoise_strength,face_enhance,outscale): | |
_=None | |
fps=float(fps) | |
new_video_in = str(f"{uid}-clip.mp4") | |
capture = cv2.VideoCapture(new_video_in) | |
frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) | |
fbox=[] | |
out_box=[] | |
cnt=0 | |
frame_count1= int(frame_count) | |
for i in range(int(frame_count1)-1): | |
capture.set(cv2.CAP_PROP_POS_FRAMES, i) | |
ret, frame_f = capture.read(i) | |
cv2.imwrite(f'{uid}-frames/{i+1}.png',frame_f) | |
fbox.append(f'{uid}-frames/{i+1}.png') | |
mes=f'Working on {i+1} of {frame_count1}' | |
yield _,_,mes | |
yield _,_,"Frames Complete" | |
if qual: | |
mes = "Improving Quality" | |
yield _,_,mes | |
if qual: | |
for i,img in enumerate(fbox): | |
out = os.path.abspath(img) | |
out_url = f'{main_url}/file={out}' | |
mes=f'Improving frame {i+1} of {frame_count}' | |
yield _,_,mes | |
out = esr(out_url, model_name, float(denoise_strength), face_enhance, int(outscale)) | |
out_box.append(f'{out}') | |
print (f'{main_url}/file={out}') | |
clip = ImageSequenceClip(out_box, fps = fps) | |
else: | |
clip = ImageSequenceClip(fbox, fps = fps) | |
yield _,_,"Writing GIF" | |
clip.speedx(2).to_gif(f"{uid}/clip_gif.gif",program=program) | |
out = f"{uid}/clip_gif.gif" | |
yield out,out,"GIF Complete" | |
def improve_img(inp,model_name,denoise_strength,face_enhance,outscale): | |
_=None | |
yield _,"Reading image" | |
#img=cv2.imread(inp) | |
cv2.imwrite(f'{uid}-frames/tmp_im.png',cv2.cvtColor(inp, cv2.COLOR_BGR2RGB)) | |
out = os.path.abspath(f'{uid}-frames/tmp_im.png') | |
out_url = f'{main_url}/file={out}' | |
yield _,"Improving image" | |
out = esr(out_url, model_name, float(denoise_strength), face_enhance, int(outscale)) | |
yield out,"Complete" | |
def update_speed(inp,clip_speed,fps): | |
if "-split.mp4" in inp: | |
out_inp = f'{inp.split("-split.mp4",1)[0]}-split.mp4' | |
else: | |
out_inp = f'{inp.split(".mp4",1)[0]}-split.mp4' | |
mod_fps=float(fps)*float(clip_speed) | |
clip = VideoFileClip(inp) | |
final = moviepy.video.fx.all.speedx(clip, factor=clip_speed) | |
final.write_videofile(f'{out_inp}', fps=mod_fps) | |
out = f'{out_inp}' | |
return out,out,out | |
def echo_fn(inp): | |
return inp | |
def check_load(inp_url,outp_vid,hid_box,start_f,end_f): | |
if outp_vid == None and inp_url !="": | |
out_trim,in_vid,trim_count=trim_vid(hid_box,start_f,end_f) | |
elif outp_vid !=None and inp_url == "": | |
out_trim = None | |
in_vid=outp_vid | |
trim_count = "" | |
return out_trim,in_vid,trim_count | |
def run_samp(program,fps,qual,model_name,denoise_strength,face_enhance,outscale,frame_targ): | |
_=None | |
#fps=float(fps) | |
new_video_in = str(f"{uid}-clip.mp4") | |
capture = cv2.VideoCapture(new_video_in) | |
#frame_count1= int(frame_count) | |
i=int(frame_targ) | |
capture.set(cv2.CAP_PROP_POS_FRAMES, i) | |
ret, frame_f = capture.read(i) | |
#frame_ff = cv2.cvtColor(frame_f, cv2.COLOR_BGR2RGB) | |
cv2.imwrite(f'{uid}-frames/{i}.png',frame_f) | |
out = os.path.abspath(f'{uid}-frames/{i}.png') | |
out_url = f'{main_url}/file={out}' | |
yield _,"Creating Sample Frame","Creating Sample Frame" | |
out = esr(out_url, model_name, float(denoise_strength), face_enhance, int(outscale)) | |
yield out,"Sample Frame Complete","Sample Frame Complete" | |
def new_media(inp): | |
if inp == "Image": | |
im_box = gr.update(visible=True) | |
vid_box = gr.update(visible=False) | |
gif_row = gr.update(visible=False) | |
vid_row = gr.update(visible=False) | |
#qual = gr.update(value=True,interactive=False) | |
return (im_box,vid_box,gif_row,vid_row) | |
if inp == "Video": | |
im_box = gr.update(visible=False) | |
vid_box = gr.update(visible=True) | |
gif_row = gr.update(visible=False) | |
vid_row = gr.update(visible=True) | |
#qual = gr.update(value=False,interactive=True) | |
return (im_box,vid_box,gif_row,vid_row) | |
if inp == "GIF": | |
im_box = gr.update(visible=False) | |
vid_box = gr.update(visible=True) | |
gif_row = gr.update(visible=True) | |
vid_row = gr.update(visible=False) | |
#qual = gr.update(value=False,interactive=True) | |
return (im_box,vid_box,gif_row,vid_row) | |
css=''' | |
#component-0{ | |
background:repeating-radial-gradient(white, cornflowerblue); | |
} | |
.padded.svelte-90oupt{ | |
background:cornflowerblue; | |
} | |
.dark .gr-box{ | |
background:#344d74;!important; | |
opacity:1;!important; | |
} | |
.p-2 { | |
background:#344d74;!important; | |
opacity:1;!important; | |
} | |
.gap-4 { | |
background:#6681ab;!important; | |
opacity:1;!important; | |
} | |
.dark .gr-padded{ | |
background:#21314a;!important; | |
opacity:1;!important; | |
} | |
''' | |
with gr.Blocks(css=css) as app: | |
with gr.Row(): | |
gr.Column() | |
with gr.Column(): | |
with gr.Group(): | |
choose_media = gr.Radio(label="Media Type",choices=["Image","Video","GIF"],value="Image") | |
with gr.Accordion("Quality Options", open=False): | |
with gr.Row(): | |
model_name = gr.Dropdown(label="Real-ESRGAN Model", | |
choices=["RealESRGAN_x4plus", "RealESRNet_x4plus", "RealESRGAN_x4plus_anime_6B", | |
"RealESRGAN_x2plus", "realesr-general-x4v3"], | |
value="realesr-general-x4v3", show_label=True) | |
face_enhance = gr.Checkbox(label="Face Enhancement using GFPGAN", | |
value=False, show_label=True) | |
denoise_strength = gr.Slider(label="Denoise Strength", | |
minimum=0, maximum=1, step=0.1, value=0.5) | |
outscale = gr.Slider(label="Image Upscaling Factor", | |
minimum=1, maximum=10, step=1, value=1, show_label=True) | |
with gr.Box(visible=True) as im_box: | |
with gr.Group(): | |
inp_img_url=gr.Textbox(label="Image URL") | |
load_im_btn=gr.Button("Load URL") | |
inp_img=gr.Image(label = "Input Image") | |
with gr.Row(): | |
proc_im_btn=gr.Button("Improve") | |
im_stat=gr.Textbox(label="Status") | |
outp_img=gr.Image(label="Enhanced Image") | |
with gr.Box(visible=False) as vid_box: | |
with gr.Group(): | |
inp_url = gr.Textbox(label="Video URL") | |
go_btn = gr.Button("Run") | |
outp_vid=gr.Video(format="mp4") | |
with gr.Row(): | |
frame_count=gr.Textbox(label="Frame Count",interactive = False) | |
fps=gr.Textbox(label="FPS",interactive = False) | |
outp_file=gr.Files() | |
clip_speed = gr.Slider(label="Speed", minimum=0.01, maximum=2, value=1, step=0.01) | |
speed_btn = gr.Button("Update Speed") | |
with gr.Row(): | |
start_f = gr.Textbox(label = "Start", value = "0:00:00", placeholder = "0:00:23",interactive = True) | |
end_f = gr.Textbox(label = "End", value = "0:00:05", placeholder = "0:00:54",interactive = True) | |
trim_count = gr.Textbox(label="Trimmed Frames") | |
trim_btn=gr.Button("Trim") | |
out_trim=gr.Video(format="mp4") | |
hid_box = gr.Textbox(visible=False) | |
hid_fps = gr.Textbox(visible=False) | |
with gr.Row(): | |
qual=gr.Checkbox(label="Improve Quality",value=False) | |
with gr.Accordion("Sample Frame",open=False): | |
with gr.Row(): | |
frame_targ=gr.Number(label="Frame",value=1) | |
samp_btn=gr.Button("Get Sample") | |
samp_im = gr.Image(label="Sample Frame") | |
with gr.Column(visible=False) as gif_row: | |
with gr.Row(): | |
choose_prog=gr.Radio(label="GIF Engine",choices=['imageio','ImageMagick','ffmpeg'], value = "imageio") | |
with gr.Row(): | |
gif_btn = gr.Button("Make GIF") | |
gif_stat=gr.Textbox(label="Status") | |
with gr.Row(visible=False) as vid_row: | |
vid_btn = gr.Button("Make Video") | |
vid_stat=gr.Textbox(label="Status") | |
with gr.Row(): | |
with gr.Column(): | |
gif_show = gr.Video() | |
with gr.Column(): | |
gif_file = gr.Files() | |
gr.Column() | |
with gr.Row(visible=False): | |
text_input=gr.Textbox() | |
text_output=gr.Textbox() | |
url_params=gr.JSON() | |
def load_im_fn(inp): | |
return inp | |
load_im_btn.click(load_im_fn,inp_img_url,inp_img) | |
proc_im_btn.click(improve_img,[inp_img,model_name,denoise_strength,face_enhance,outscale],[outp_img,im_stat]) | |
choose_media.change(new_media,choose_media,[im_box,vid_box,gif_row,vid_row]) | |
samp_btn.click(run_samp,[choose_prog,fps,qual,model_name,denoise_strength,face_enhance,outscale,frame_targ],[samp_im,gif_stat,vid_stat]) | |
speed_btn.click(update_speed,[hid_box,clip_speed,hid_fps],[outp_vid,hid_box,outp_file]) | |
gif_btn.click(gif_clip,[choose_prog,fps,qual,model_name,denoise_strength,face_enhance,outscale],[gif_show,gif_file,gif_stat]) | |
vid_btn.click(video_clip,[choose_prog,fps,qual,model_name,denoise_strength,face_enhance,outscale],[gif_show,gif_file,vid_stat]) | |
trim_btn.click(trim_vid,[hid_box,start_f,end_f],[out_trim,trim_count]) | |
outp_vid.change(load_video,outp_vid,[frame_count,fps,end_f,hid_box]).then(trim_vid,[hid_box,start_f,end_f],[out_trim,trim_count]) | |
go_btn.click(dl,[inp_url,outp_vid],[outp_vid,outp_file,out_trim,hid_box,hid_fps]) | |
app.load(fn=predict, inputs=[text_input,url_params], outputs=[text_output,text_input],_js=load_js) | |
app.launch(show_api=False) |