zhiweili commited on
Commit
5068971
1 Parent(s): 8be2702

change base model

Browse files
Files changed (3) hide show
  1. app_diffedit.py +4 -0
  2. hair_color.py +59 -0
  3. hair_gray.py +60 -0
app_diffedit.py CHANGED
@@ -13,6 +13,10 @@ from diffusers import (
13
  DEFAULT_SRC_PROMPT = "a woman"
14
  DEFAULT_EDIT_PROMPT = "a woman, with red lips, 8k, high quality"
15
 
 
 
 
 
16
  basepipeline = StableDiffusionDiffEditPipeline.from_pretrained(
17
  "stabilityai/stable-diffusion-2-1",
18
  torch_dtype=torch.float16,
 
13
  DEFAULT_SRC_PROMPT = "a woman"
14
  DEFAULT_EDIT_PROMPT = "a woman, with red lips, 8k, high quality"
15
 
16
+ BASE_MODEL = "stabilityai/stable-diffusion-xl-base-1.0"
17
+ # BASE_MODEL = "stabilityai/sdxl-turbo"
18
+ # BASE_MODEL = "stabilityai/stable-diffusion-2-1"
19
+
20
  basepipeline = StableDiffusionDiffEditPipeline.from_pretrained(
21
  "stabilityai/stable-diffusion-2-1",
22
  torch_dtype=torch.float16,
hair_color.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import mediapipe as mp
4
+
5
+ from PIL import Image
6
+ from segment_utils import (
7
+ segmenter
8
+ )
9
+
10
+ original_image = Image.open("origin.jpeg")
11
+
12
+ image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(original_image))
13
+ segmentation_result = segmenter.segment(image)
14
+ category_mask = segmentation_result.category_mask
15
+ category_mask_np = category_mask.numpy_view()
16
+ hair_mask = category_mask_np == 1
17
+ mask_image = Image.fromarray((hair_mask * 255).astype(np.uint8))
18
+
19
+ img = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
20
+ hair_mask_image = cv2.cvtColor(np.array(mask_image), cv2.COLOR_RGB2BGR)
21
+
22
+
23
+ def rgb2scale(bgr):
24
+ min = 255
25
+ max = 0
26
+ for val in bgr:
27
+ if val < min:
28
+ min = val
29
+ if val > max:
30
+ max = val
31
+ avg = (min + max) / 2
32
+ # return [val / max for val in bgr]
33
+ # return [(val - min) / (max - min) for val in bgr]
34
+ # return [val / avg for val in bgr]
35
+ return [val / 127.5 for val in bgr]
36
+
37
+ bgr = [100, 192, 192]
38
+ scale = [0.3,0.1,1]
39
+ scale = rgb2scale(bgr)
40
+ print(scale)
41
+
42
+ a = np.where(hair_mask > 0)
43
+ ones = np.ones_like(img)
44
+ ones[a] = scale
45
+
46
+
47
+ r = img*ones
48
+ cv2.imwrite("hair_color.jpeg", r)
49
+
50
+
51
+
52
+ green_hair = np.copy(img)
53
+ # boolean indexing and assignment based on mask
54
+ green_hair[(hair_mask_image==255).all(-1)] = [255,0,0]
55
+
56
+ green_hair_w = cv2.addWeighted(green_hair, 0.3, img, 0.7, 0, green_hair)
57
+
58
+ cv2.imwrite("green_hair.jpeg", green_hair_w)
59
+
hair_gray.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import mediapipe as mp
4
+
5
+ from PIL import Image
6
+ from segment_utils import (
7
+ segmenter
8
+ )
9
+
10
+ def convert_to_grayscale(pil_image: Image):
11
+ gray_image = pil_image.convert('L')
12
+ # return Image.merge('RGB', (gray_image, gray_image, gray_image))
13
+ return gray_image
14
+
15
+ original_image = Image.open("/Users/zhiweili/Documents/20240830-175314.jpeg")
16
+ gray_image = convert_to_grayscale(original_image)
17
+ # gray_image.save("gray_image.jpeg")
18
+
19
+
20
+ image = mp.Image(image_format=mp.ImageFormat.SRGB, data=np.asarray(original_image))
21
+ segmentation_result = segmenter.segment(image)
22
+ category_mask = segmentation_result.category_mask
23
+ category_mask_np = category_mask.numpy_view()
24
+ hair_mask = category_mask_np == 1
25
+ mask_image = Image.fromarray((hair_mask * 255).astype(np.uint8))
26
+
27
+ original_image.paste(gray_image, (0, 0), mask_image)
28
+
29
+ original_image.save("tmp.jpeg")
30
+
31
+ # mask_image.show()
32
+
33
+ img = cv2.cvtColor(np.array(original_image), cv2.COLOR_RGB2BGR)
34
+ # extract the hair area to hair image
35
+ hair_image = np.copy(img)
36
+ hair_image[~hair_mask] = 0
37
+
38
+ # gray hair image
39
+ gray_hair_image = cv2.cvtColor(hair_image, cv2.COLOR_BGR2GRAY)
40
+ # gray_hair_image_3d = np.repeat(gray_hair_image[:, :, np.newaxis], 3, axis=2)
41
+ gray_hair_image_3d = cv2.merge([gray_hair_image] * 3)
42
+ # paste the gray hair image to the original image by mask
43
+ img[hair_mask] = gray_hair_image_3d[hair_mask]
44
+
45
+ cv2.imwrite("gray_hair.jpeg", img)
46
+
47
+
48
+
49
+
50
+
51
+ hairPiLImage = Image.fromarray(gray_hair_image)
52
+ # paste the gray hair image to the original image by mask
53
+ original_image.paste(hairPiLImage, (0, 0), mask_image)
54
+ original_image.save("finalImage.jpeg")
55
+
56
+
57
+
58
+ # hair_mask_image = cv2.cvtColor(np.array(mask_image), cv2.COLOR_RGB2BGR)
59
+
60
+