Spaces:
Sleeping
Sleeping
edwin200311
commited on
Commit
•
64c746e
1
Parent(s):
d67972f
thkss
Browse files
app.py
CHANGED
@@ -7,8 +7,6 @@ from PIL import Image
|
|
7 |
import tensorflow as tf
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
10 |
-
|
11 |
-
|
12 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
13 |
"mattmdjaga/segformer_b2_clothes"
|
14 |
)
|
@@ -16,7 +14,6 @@ model = TFSegformerForSemanticSegmentation.from_pretrained(
|
|
16 |
"mattmdjaga/segformer_b2_clothes"
|
17 |
)
|
18 |
|
19 |
-
|
20 |
def ade_palette():
|
21 |
"""ADE20K palette that maps each class to RGB values."""
|
22 |
return [
|
@@ -40,7 +37,6 @@ def ade_palette():
|
|
40 |
[67, 57, 91],
|
41 |
]
|
42 |
|
43 |
-
|
44 |
labels_list = []
|
45 |
|
46 |
with open(r'labels.txt', 'r') as fp:
|
@@ -49,7 +45,6 @@ with open(r'labels.txt', 'r') as fp:
|
|
49 |
|
50 |
colormap = np.asarray(ade_palette())
|
51 |
|
52 |
-
|
53 |
def label_to_color_image(label):
|
54 |
if label.ndim != 2:
|
55 |
raise ValueError("Expect 2-D input label")
|
@@ -58,7 +53,6 @@ def label_to_color_image(label):
|
|
58 |
raise ValueError("label value too large.")
|
59 |
return colormap[label]
|
60 |
|
61 |
-
|
62 |
def draw_plot(pred_img, seg):
|
63 |
fig = plt.figure(figsize=(20, 15))
|
64 |
|
@@ -80,7 +74,6 @@ def draw_plot(pred_img, seg):
|
|
80 |
ax.tick_params(width=0.0, labelsize=25)
|
81 |
return fig
|
82 |
|
83 |
-
|
84 |
def sepia(input_img):
|
85 |
input_img = Image.fromarray(input_img)
|
86 |
|
@@ -107,11 +100,11 @@ def sepia(input_img):
|
|
107 |
fig = draw_plot(pred_img, seg)
|
108 |
return fig
|
109 |
|
110 |
-
|
111 |
demo = gr.Interface(fn=sepia,
|
112 |
inputs=gr.Image(shape=(400, 600)),
|
113 |
outputs=['plot'],
|
114 |
-
examples=["person-1.jpg,person-2.jpg,person-3.jpg,person-4.jpg,person-5.jpg"],
|
115 |
allow_flagging='never')
|
116 |
|
|
|
117 |
demo.launch()
|
|
|
7 |
import tensorflow as tf
|
8 |
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation
|
9 |
|
|
|
|
|
10 |
feature_extractor = SegformerFeatureExtractor.from_pretrained(
|
11 |
"mattmdjaga/segformer_b2_clothes"
|
12 |
)
|
|
|
14 |
"mattmdjaga/segformer_b2_clothes"
|
15 |
)
|
16 |
|
|
|
17 |
def ade_palette():
|
18 |
"""ADE20K palette that maps each class to RGB values."""
|
19 |
return [
|
|
|
37 |
[67, 57, 91],
|
38 |
]
|
39 |
|
|
|
40 |
labels_list = []
|
41 |
|
42 |
with open(r'labels.txt', 'r') as fp:
|
|
|
45 |
|
46 |
colormap = np.asarray(ade_palette())
|
47 |
|
|
|
48 |
def label_to_color_image(label):
|
49 |
if label.ndim != 2:
|
50 |
raise ValueError("Expect 2-D input label")
|
|
|
53 |
raise ValueError("label value too large.")
|
54 |
return colormap[label]
|
55 |
|
|
|
56 |
def draw_plot(pred_img, seg):
|
57 |
fig = plt.figure(figsize=(20, 15))
|
58 |
|
|
|
74 |
ax.tick_params(width=0.0, labelsize=25)
|
75 |
return fig
|
76 |
|
|
|
77 |
def sepia(input_img):
|
78 |
input_img = Image.fromarray(input_img)
|
79 |
|
|
|
100 |
fig = draw_plot(pred_img, seg)
|
101 |
return fig
|
102 |
|
|
|
103 |
demo = gr.Interface(fn=sepia,
|
104 |
inputs=gr.Image(shape=(400, 600)),
|
105 |
outputs=['plot'],
|
106 |
+
examples=["person-1.jpg", "person-2.jpg", "person-3.jpg", "person-4.jpg", "person-5.jpg"],
|
107 |
allow_flagging='never')
|
108 |
|
109 |
+
|
110 |
demo.launch()
|