Spaces:
Runtime error
Runtime error
File size: 11,555 Bytes
973a4da 1cb3b5a 973a4da 1cb3b5a 973a4da 5b6e083 973a4da 944e312 7e9d843 944e312 ae2da92 944e312 7e9d843 944e312 973a4da 895260d 5b6e083 944e312 973a4da 4098d00 ae2da92 4098d00 973a4da ae2da92 973a4da 1cb3b5a 973a4da 944e312 973a4da ae2da92 973a4da 944e312 78d8811 944e312 1cb3b5a 944e312 973a4da 944e312 ae2da92 1cb3b5a 944e312 1cb3b5a ae2da92 944e312 4098d00 944e312 1cb3b5a 944e312 ae2da92 944e312 1cb3b5a 944e312 ae2da92 1cb3b5a 944e312 1cb3b5a 4098d00 ae2da92 1cb3b5a ae2da92 e79558d 895260d ae2da92 895260d ae2da92 944e312 973a4da 944e312 1cb3b5a 973a4da 944e312 1cb3b5a 944e312 ae2da92 1cb3b5a 944e312 973a4da 944e312 973a4da 944e312 4098d00 944e312 1cb3b5a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 |
import streamlit as st
import pickle
import pandas as pd
import numpy as np
import random
import torch
from matplotlib.backends.backend_agg import RendererAgg
from backend.disentangle_concepts import *
import torch_utils
import dnnlib
import legacy
_lock = RendererAgg.lock
st.set_page_config(layout='wide')
BACKGROUND_COLOR = '#bcd0e7'
SECONDARY_COLOR = '#bce7db'
st.title('Disentanglement on Textile Datasets')
st.markdown(
"""
This is a demo of the Disentanglement experiment on the [iMET Textiles Dataset](https://www.metmuseum.org/art/collection/search/85531).
In this page, the user can adjust the colors of textile images generated by an AI by simply traversing the latent space of the AI.
The colors can be adjusted following the human-intuitive encoding of HSV, adjusting the main Hue of the image with an option of 7 colors + Gray,
the saturation (the amount of Gray) and the value of the image (the amount of Black).
""",
unsafe_allow_html=False,)
annotations_file = './data/textile_annotated_files/seeds0000-100000_S.pkl'
with open(annotations_file, 'rb') as f:
annotations = pickle.load(f)
concept_vectors = pd.read_csv('./data/stored_vectors/scores_colors_hsv.csv')
concept_vectors['vector'] = [np.array([float(xx) for xx in x]) for x in concept_vectors['vector'].str.split(', ')]
concept_vectors['score'] = concept_vectors['score'].astype(float)
concept_vectors['sign'] = [True if 'sign:True' in val else False for val in concept_vectors['kwargs']]
concept_vectors['extremes'] = [True if 'extremes method:True' in val else False for val in concept_vectors['kwargs']]
concept_vectors['regularization'] = [float(val.split(',')[1].strip('regularization: ')) if 'regularization:' in val else False for val in concept_vectors['kwargs']]
concept_vectors['cl_method'] = [val.split(',')[0].strip('classification method:') if 'classification method:' in val else False for val in concept_vectors['kwargs']]
concept_vectors['num_factors'] = [int(val.split(',')[1].strip('number of factors:')) if 'number of factors:' in val else False for val in concept_vectors['kwargs']]
concept_vectors = concept_vectors.sort_values('score', ascending=False).reset_index()
with dnnlib.util.open_url('./data/textile_model_files/network-snapshot-005000.pkl') as f:
model = legacy.load_network_pkl(f)['G_ema'].to('cpu') # type: ignore
COLORS_LIST = ['Gray', 'Red Orange', 'Yellow', 'Green', 'Light Blue', 'Blue', 'Purple', 'Pink']
if 'image_id' not in st.session_state:
st.session_state.image_id = 52921
if 'color_ids' not in st.session_state:
st.session_state.concept_ids = COLORS_LIST[-1]
if 'space_id' not in st.session_state:
st.session_state.space_id = 'W'
if 'color_lambda' not in st.session_state:
st.session_state.color_lambda = 7
if 'saturation_lambda' not in st.session_state:
st.session_state.saturation_lambda = 0
if 'value_lambda' not in st.session_state:
st.session_state.value_lambda = 0
if 'sign' not in st.session_state:
st.session_state.sign = False
if 'extremes' not in st.session_state:
st.session_state.extremes = False
if 'regularization' not in st.session_state:
st.session_state.regularization = False
if 'cl_method' not in st.session_state:
st.session_state.cl_method = False
if 'num_factors' not in st.session_state:
st.session_state.num_factors = False
if 'best' not in st.session_state:
st.session_state.best = True
# ----------------------------- INPUT ----------------------------------
st.header('Input')
input_col_1, input_col_2, input_col_3, input_col_4 = st.columns(4)
# --------------------------- INPUT column 1 ---------------------------
with input_col_1:
with st.form('image_form'):
st.write('**Choose or generate a random base image**')
chosen_image_id_input = st.empty()
image_id = chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
choose_image_button = st.form_submit_button('Choose the defined image')
random_id = st.form_submit_button('Generate a random image')
if random_id:
image_id = random.randint(0, 100000)
st.session_state.image_id = image_id
chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
if choose_image_button:
image_id = int(image_id)
st.session_state.image_id = image_id
with input_col_2:
with st.form('text_form_1'):
st.write('**Choose hue to vary**')
type_col = st.selectbox('Hue:', tuple(COLORS_LIST), index=7)
st.write('**Set range of change**')
chosen_color_lambda_input = st.empty()
color_lambda = chosen_color_lambda_input.number_input('Lambda:', min_value=-100, step=1, value=7)
color_lambda_button = st.form_submit_button('Choose the defined hue and lambda')
if color_lambda_button:
st.session_state.image_id = image_id
st.session_state.concept_ids = type_col
st.session_state.color_lambda = color_lambda
with input_col_3:
with st.form('text_form'):
st.write('**Choose saturation variation**')
chosen_saturation_lambda_input = st.empty()
saturation_lambda = chosen_saturation_lambda_input.number_input('Lambda:', min_value=-100, step=1, key=0, value=0)
st.write('**Choose value variation**')
chosen_value_lambda_input = st.empty()
value_lambda = chosen_value_lambda_input.number_input('Lambda:', min_value=-100, step=1, key=1, value=0)
value_lambda_button = st.form_submit_button('Choose the defined lambda for value and saturation')
if value_lambda_button:
st.session_state.saturation_lambda = int(saturation_lambda)
st.session_state.value_lambda = int(value_lambda)
with input_col_4:
with st.form('text_form_2'):
st.write('Use the best vectors (after hyperparameter tuning)')
best = st.selectbox('Option:', tuple([True, False]), index=0)
sign = True
num_factors=10
cl_method='LR'
regularization=0.1
extremes=True
if st.session_state.best is False:
st.write('Options for StyleSpace (not available for Saturation and Value)')
sign = st.selectbox('Sign option:', tuple([True, False]), index=1)
num_factors = st.selectbox('Number of factors option:', tuple([1, 5, 10, 20, False]), index=4)
st.write('Options for InterFaceGAN (not available for Saturation and Value)')
cl_method = st.selectbox('Classification method option:', tuple(['LR', 'SVM', False]), index=2)
regularization = st.selectbox('Regularization option:', tuple([0.1, 1.0, False]), index=2)
st.write('Options for InterFaceGAN (only for Saturation and Value)')
extremes = st.selectbox('Extremes option:', tuple([True, False]), index=1)
choose_options_button = st.form_submit_button('Choose the defined options')
if choose_options_button:
st.session_state.best = best
if st.session_state.best is False:
st.session_state.sign = sign
st.session_state.num_factors = num_factors
st.session_state.cl_method = cl_method
st.session_state.regularization = regularization
st.session_state.extremes = extremes
# with input_col_4:
# with st.form('Network specifics:'):
# st.write('**Choose a latent space to use**')
# space_id = st.selectbox('Space:', tuple(['W']))
# choose_text_button = st.form_submit_button('Choose the defined concept and space to disentangle')
# st.write('**Select hierarchical levels to manipulate**')
# layers = st.multiselect('Layers:', tuple(range(14)))
# if len(layers) == 0:
# layers = None
# print(layers)
# layers_button = st.form_submit_button('Choose the defined layers')
# ---------------------------- SET UP OUTPUT ------------------------------
epsilon_container = st.empty()
st.header('Image Manipulation')
st.write('Using selected vectors to modify the original image...')
header_col_1, header_col_2 = st.columns([1,1])
output_col_1, output_col_2 = st.columns([1,1])
# # prediction error container
# error_container = st.empty()
# smoothgrad_header_container = st.empty()
# # smoothgrad container
# smooth_head_1, smooth_head_2, = st.columns([1,1,])
# smoothgrad_col_1, smoothgrad_col_2 = st.columns([1,1])
# ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
with header_col_1:
st.write(f'### Original image')
with header_col_2:
if st.session_state.best:
color_separation_vector, performance_color = concept_vectors[concept_vectors['color'] == st.session_state.concept_ids].reset_index().loc[0, ['vector', 'score']]
saturation_separation_vector, performance_saturation = concept_vectors[concept_vectors['color'] == 'Saturation'].reset_index().loc[0, ['vector', 'score']]
value_separation_vector, performance_value = concept_vectors[concept_vectors['color'] == 'Value'].reset_index().loc[0, ['vector', 'score']]
else:
tmp = concept_vectors[concept_vectors['color'] == st.session_state.concept_ids]
tmp = tmp[tmp['sign'] == st.session_state.sign][tmp['num_factors'] == st.session_state.num_factors][tmp['cl_method'] == st.session_state.cl_method][tmp['regularization'] == st.session_state.regularization]
color_separation_vector, performance_color = tmp.reset_index().loc[0, ['vector', 'score']]
tmp_value = concept_vectors[concept_vectors['color'] == 'Value'][concept_vectors['extremes'] == st.session_state.extremes]
value_separation_vector, performance_value = tmp_value.reset_index().loc[0, ['vector', 'score']]
tmp_sat = concept_vectors[concept_vectors['color'] == 'Saturation'][concept_vectors['extremes'] == st.session_state.extremes]
saturation_separation_vector, performance_saturation = tmp_sat.reset_index().loc[0, ['vector', 'score']]
st.write('### Modified image')
st.write(f"""
Change in hue: {st.session_state.concept_ids} of amount: {np.round(st.session_state.color_lambda, 2)},
in: saturation of amount: {np.round(st.session_state.saturation_lambda, 2)},
in: value of amount: {np.round(st.session_state.value_lambda, 2)}.\
Verification performance of hue vector: {performance_color},
saturation vector: {performance_saturation/100},
value vector: {performance_value/100}""")
# ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
if st.session_state.space_id == 'Z':
original_image_vec = annotations['z_vectors'][st.session_state.image_id]
else:
original_image_vec = annotations['w_vectors'][st.session_state.image_id]
img = generate_original_image(original_image_vec, model, latent_space=st.session_state.space_id)
with output_col_1:
st.image(img)
with output_col_2:
image_updated = generate_composite_images(model, original_image_vec, [color_separation_vector, saturation_separation_vector, value_separation_vector], lambdas=[st.session_state.color_lambda, st.session_state.saturation_lambda, st.session_state.value_lambda])
st.image(image_updated)
|