Spaces:
Runtime error
Runtime error
added hierarchical manipulation
Browse files- backend/disentangle_concepts.py +17 -16
- pages/1_Disentanglement.py +9 -3
backend/disentangle_concepts.py
CHANGED
@@ -51,7 +51,7 @@ def get_separation_space(type_bin, annotations, df, samples=100, method='LR', C=
|
|
51 |
return clf.coef_ / np.linalg.norm(clf.coef_), imp_features, imp_nodes, np.round(clf.score(x_val, y_val),2)
|
52 |
|
53 |
|
54 |
-
def regenerate_images(model, z, decision_boundary, min_epsilon=-3, max_epsilon=3, count=5, latent_space='Z'):
|
55 |
"""
|
56 |
The regenerate_images function takes a model, z, and decision_boundary as input. It then
|
57 |
constructs an inverse rotation/translation matrix and passes it to the generator. The generator
|
@@ -69,13 +69,12 @@ def regenerate_images(model, z, decision_boundary, min_epsilon=-3, max_epsilon=3
|
|
69 |
"""
|
70 |
device = torch.device('cpu')
|
71 |
G = model.to(device) # type: ignore
|
72 |
-
|
73 |
if False:
|
74 |
decision_boundary = z - (np.dot(z, decision_boundary.T) / np.dot(decision_boundary, decision_boundary.T)) * decision_boundary
|
75 |
# Labels.
|
76 |
label = torch.zeros([1, G.c_dim], device=device)
|
77 |
-
|
78 |
-
|
79 |
z = torch.from_numpy(z.copy()).to(device)
|
80 |
decision_boundary = torch.from_numpy(decision_boundary.copy()).to(device)
|
81 |
|
@@ -84,25 +83,26 @@ def regenerate_images(model, z, decision_boundary, min_epsilon=-3, max_epsilon=3
|
|
84 |
# Generate images.
|
85 |
for _, lambda_ in enumerate(lambdas):
|
86 |
z_0 = z + lambda_ * decision_boundary
|
87 |
-
# Construct an inverse rotation/translation matrix and pass to the generator. The
|
88 |
-
# generator expects this matrix as an inverse to avoid potentially failing numerical
|
89 |
-
# operations in the network.
|
90 |
-
#if hasattr(G.synthesis, 'input'):
|
91 |
-
#m = make_transform(translate, rotate)
|
92 |
-
#m = np.linalg.inv(m)
|
93 |
-
#G.synthesis.input.transform.copy_(torch.from_numpy(m))
|
94 |
if latent_space == 'Z':
|
95 |
-
|
|
|
|
|
|
|
|
|
96 |
|
|
|
|
|
|
|
|
|
97 |
else:
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
|
102 |
images.append(PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB'))
|
103 |
-
|
104 |
return images, lambdas
|
105 |
|
|
|
106 |
def generate_joint_effect(model, z, decision_boundaries, min_epsilon=-3, max_epsilon=3, count=5, latent_space='Z'):
|
107 |
decision_boundary_joint = np.sum(decision_boundaries, axis=0)
|
108 |
print(decision_boundary_joint.shape)
|
@@ -172,6 +172,7 @@ def get_concepts_vectors(concepts, annotations, df, samples=100, method='LR', C=
|
|
172 |
return vectors, nodes_in_common, performances
|
173 |
|
174 |
|
|
|
175 |
def get_verification_score(concept, decision_boundary, model, annotations, samples=100, latent_space='Z'):
|
176 |
import open_clip
|
177 |
import os
|
|
|
51 |
return clf.coef_ / np.linalg.norm(clf.coef_), imp_features, imp_nodes, np.round(clf.score(x_val, y_val),2)
|
52 |
|
53 |
|
54 |
+
def regenerate_images(model, z, decision_boundary, min_epsilon=-3, max_epsilon=3, count=5, latent_space='Z', layers=None):
|
55 |
"""
|
56 |
The regenerate_images function takes a model, z, and decision_boundary as input. It then
|
57 |
constructs an inverse rotation/translation matrix and passes it to the generator. The generator
|
|
|
69 |
"""
|
70 |
device = torch.device('cpu')
|
71 |
G = model.to(device) # type: ignore
|
72 |
+
|
73 |
if False:
|
74 |
decision_boundary = z - (np.dot(z, decision_boundary.T) / np.dot(decision_boundary, decision_boundary.T)) * decision_boundary
|
75 |
# Labels.
|
76 |
label = torch.zeros([1, G.c_dim], device=device)
|
77 |
+
|
|
|
78 |
z = torch.from_numpy(z.copy()).to(device)
|
79 |
decision_boundary = torch.from_numpy(decision_boundary.copy()).to(device)
|
80 |
|
|
|
83 |
# Generate images.
|
84 |
for _, lambda_ in enumerate(lambdas):
|
85 |
z_0 = z + lambda_ * decision_boundary
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
86 |
if latent_space == 'Z':
|
87 |
+
W_0 = G.mapping(z_0, label, truncation_psi=1)
|
88 |
+
W = G.mapping(z, label, truncation_psi=1)
|
89 |
+
else:
|
90 |
+
W_0 = z_0.expand((14, -1)).unsqueeze(0)
|
91 |
+
W = z.expand((14, -1)).unsqueeze(0)
|
92 |
|
93 |
+
if layers:
|
94 |
+
W_f = W.copy()
|
95 |
+
W_f[layers] = W_0[layers]
|
96 |
+
img = G.synthesis(W_f, noise_mode='const')
|
97 |
else:
|
98 |
+
img = G.synthesis(W_0, noise_mode='const')
|
99 |
+
|
|
|
100 |
img = (img.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8)
|
101 |
images.append(PIL.Image.fromarray(img[0].cpu().numpy(), 'RGB'))
|
102 |
+
|
103 |
return images, lambdas
|
104 |
|
105 |
+
|
106 |
def generate_joint_effect(model, z, decision_boundaries, min_epsilon=-3, max_epsilon=3, count=5, latent_space='Z'):
|
107 |
decision_boundary_joint = np.sum(decision_boundaries, axis=0)
|
108 |
print(decision_boundary_joint.shape)
|
|
|
172 |
return vectors, nodes_in_common, performances
|
173 |
|
174 |
|
175 |
+
|
176 |
def get_verification_score(concept, decision_boundary, model, annotations, samples=100, latent_space='Z'):
|
177 |
import open_clip
|
178 |
import os
|
pages/1_Disentanglement.py
CHANGED
@@ -141,8 +141,14 @@ with input_col_3:
|
|
141 |
with st.form('Variate along the disentangled concept'):
|
142 |
st.write('**Set range of change**')
|
143 |
chosen_epsilon_input = st.empty()
|
144 |
-
epsilon = chosen_epsilon_input.number_input('
|
145 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
|
147 |
# ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
|
148 |
|
@@ -165,7 +171,7 @@ with smoothgrad_col_3:
|
|
165 |
smooth_head_3.write(f'Base image')
|
166 |
|
167 |
|
168 |
-
images, lambdas = regenerate_images(model, original_image_vec, separation_vector, min_epsilon=-(int(epsilon)), max_epsilon=int(epsilon), latent_space=st.session_state.space_id)
|
169 |
|
170 |
with smoothgrad_col_1:
|
171 |
st.image(images[0])
|
|
|
141 |
with st.form('Variate along the disentangled concept'):
|
142 |
st.write('**Set range of change**')
|
143 |
chosen_epsilon_input = st.empty()
|
144 |
+
epsilon = chosen_epsilon_input.number_input('Lambda:', min_value=1, step=1)
|
145 |
+
st.write('**Select hierarchical levels to manipulate**')
|
146 |
+
layers = st.selectbox('Layers:', tuple(range(14)))
|
147 |
+
if len(layers) == 0:
|
148 |
+
layers = None
|
149 |
+
print(layers)
|
150 |
+
epsilon_button = st.form_submit_button('Choose the defined lambda and layers')
|
151 |
+
|
152 |
|
153 |
# ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
|
154 |
|
|
|
171 |
smooth_head_3.write(f'Base image')
|
172 |
|
173 |
|
174 |
+
images, lambdas = regenerate_images(model, original_image_vec, separation_vector, min_epsilon=-(int(epsilon)), max_epsilon=int(epsilon), latent_space=st.session_state.space_id, layers=layers)
|
175 |
|
176 |
with smoothgrad_col_1:
|
177 |
st.image(images[0])
|