Spaces:
Runtime error
Runtime error
Ahsen Khaliq
commited on
Commit
•
4c188b8
1
Parent(s):
4c71d5b
Update app.py
Browse files
app.py
CHANGED
@@ -4,11 +4,11 @@ import torch
|
|
4 |
import gradio as gr
|
5 |
os.system("pip install gradio==2.5.3")
|
6 |
|
7 |
-
os.system("pip install facexlib")
|
8 |
|
9 |
-
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
|
10 |
#os.system("pip install autocrop")
|
11 |
-
|
12 |
#from autocrop import Cropper
|
13 |
import torch
|
14 |
torch.backends.cudnn.benchmark = True
|
@@ -34,19 +34,19 @@ os.makedirs('style_images', exist_ok=True)
|
|
34 |
os.makedirs('style_images_aligned', exist_ok=True)
|
35 |
os.makedirs('models', exist_ok=True)
|
36 |
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
|
41 |
#cropper = Cropper(face_percent=80)
|
42 |
|
43 |
-
face_helper = FaceRestoreHelper(
|
44 |
-
upscale_factor=0,
|
45 |
-
face_size=512,
|
46 |
-
crop_ratio=(1, 1),
|
47 |
-
det_model='retinaface_resnet50',
|
48 |
-
save_ext='png',
|
49 |
-
device='cpu')
|
50 |
|
51 |
device = 'cpu'
|
52 |
|
@@ -97,8 +97,8 @@ generatorjinx.load_state_dict(ckptjinx["g"], strict=False)
|
|
97 |
|
98 |
|
99 |
def inference(img, model):
|
100 |
-
face_helper.clean_all()
|
101 |
-
|
102 |
#cropped_array = cropper.crop(img[:,:,::-1])
|
103 |
|
104 |
#if cropped_array.any():
|
@@ -106,12 +106,12 @@ def inference(img, model):
|
|
106 |
#else:
|
107 |
#aligned_face = Image.fromarray(img[:,:,::-1])
|
108 |
|
109 |
-
face_helper.read_image(img)
|
110 |
-
face_helper.get_face_landmarks_5(only_center_face=False, eye_dist_threshold=10)
|
111 |
-
face_helper.align_warp_face(save_cropped_path="/home/user/app/")
|
112 |
-
pilimg = Image.open("/home/user/app/_02.png")
|
113 |
|
114 |
-
my_w = e4e_projection(
|
115 |
if model == 'JoJo':
|
116 |
with torch.no_grad():
|
117 |
my_sample = generatorjojo(my_w, input_is_latent=True)
|
|
|
4 |
import gradio as gr
|
5 |
os.system("pip install gradio==2.5.3")
|
6 |
|
7 |
+
#os.system("pip install facexlib")
|
8 |
|
9 |
+
#from facexlib.utils.face_restoration_helper import FaceRestoreHelper
|
10 |
#os.system("pip install autocrop")
|
11 |
+
os.system("pip install dlib")
|
12 |
#from autocrop import Cropper
|
13 |
import torch
|
14 |
torch.backends.cudnn.benchmark = True
|
|
|
34 |
os.makedirs('style_images_aligned', exist_ok=True)
|
35 |
os.makedirs('models', exist_ok=True)
|
36 |
|
37 |
+
os.system("wget http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2")
|
38 |
+
os.system("bzip2 -dk shape_predictor_68_face_landmarks.dat.bz2")
|
39 |
+
os.system("mv shape_predictor_68_face_landmarks.dat models/dlibshape_predictor_68_face_landmarks.dat")
|
40 |
|
41 |
#cropper = Cropper(face_percent=80)
|
42 |
|
43 |
+
#face_helper = FaceRestoreHelper(
|
44 |
+
#upscale_factor=0,
|
45 |
+
#face_size=512,
|
46 |
+
#crop_ratio=(1, 1),
|
47 |
+
#det_model='retinaface_resnet50',
|
48 |
+
#save_ext='png',
|
49 |
+
#device='cpu')
|
50 |
|
51 |
device = 'cpu'
|
52 |
|
|
|
97 |
|
98 |
|
99 |
def inference(img, model):
|
100 |
+
#face_helper.clean_all()
|
101 |
+
aligned_face = align_face(img)
|
102 |
#cropped_array = cropper.crop(img[:,:,::-1])
|
103 |
|
104 |
#if cropped_array.any():
|
|
|
106 |
#else:
|
107 |
#aligned_face = Image.fromarray(img[:,:,::-1])
|
108 |
|
109 |
+
#face_helper.read_image(img)
|
110 |
+
#face_helper.get_face_landmarks_5(only_center_face=False, eye_dist_threshold=10)
|
111 |
+
#face_helper.align_warp_face(save_cropped_path="/home/user/app/")
|
112 |
+
#pilimg = Image.open("/home/user/app/_02.png")
|
113 |
|
114 |
+
my_w = e4e_projection(aligned_face, "test.pt", device).unsqueeze(0)
|
115 |
if model == 'JoJo':
|
116 |
with torch.no_grad():
|
117 |
my_sample = generatorjojo(my_w, input_is_latent=True)
|