latent-space-theories / pages /1_Textiles_Disentanglement.py
ludusc's picture
updating wb to textiles
5b6e083
raw
history blame
7.28 kB
import streamlit as st
import pickle
import pandas as pd
import numpy as np
import random
import torch
from matplotlib.backends.backend_agg import RendererAgg
from backend.disentangle_concepts import *
import torch_utils
import dnnlib
import legacy
_lock = RendererAgg.lock
st.set_page_config(layout='wide')
BACKGROUND_COLOR = '#bcd0e7'
SECONDARY_COLOR = '#bce7db'
st.title('Disentanglement studies on the Textile Dataset')
st.markdown(
"""
This is a demo of the Disentanglement studies on the [iMET Textiles Dataset](https://www.metmuseum.org/art/collection/search/85531).
""",
unsafe_allow_html=False,)
annotations_file = './data/textile_annotated_files/seeds0000-100000_S.pkl'
with open(annotations_file, 'rb') as f:
annotations = pickle.load(f)
COLORS_LIST = []
if 'image_id' not in st.session_state:
st.session_state.image_id = 0
if 'color_ids' not in st.session_state:
st.session_state.concept_ids =['AMPHORA']
if 'space_id' not in st.session_state:
st.session_state.space_id = 'W'
# def on_change_random_input():
# st.session_state.image_id = st.session_state.image_id
# ----------------------------- INPUT ----------------------------------
st.header('Input')
input_col_1, input_col_2, input_col_3 = st.columns(3)
# --------------------------- INPUT column 1 ---------------------------
with input_col_1:
with st.form('text_form'):
# image_id = st.number_input('Image ID: ', format='%d', step=1)
st.write('**Choose two options to disentangle**')
type_col = st.selectbox('Concept category:', tuple(['Provenance', 'Shape Name', 'Fabric', 'Technique']))
ann_df = pd.read_csv(f'./data/vase_annotated_files/sim_{type_col}_seeds0000-20000.csv')
labels = list(ann_df.columns)
labels.remove('ID')
labels.remove('Unnamed: 0')
concept_ids = st.multiselect('Concepts:', tuple(labels), max_selections=2, default=[labels[2], labels[3]])
st.write('**Choose a latent space to disentangle**')
space_id = st.selectbox('Space:', tuple(['W', 'Z']))
choose_text_button = st.form_submit_button('Choose the defined concept and space to disentangle')
if choose_text_button:
concept_ids = list(concept_ids)
st.session_state.concept_ids = concept_ids
space_id = str(space_id)
st.session_state.space_id = space_id
# st.write(image_id, st.session_state.image_id)
# ---------------------------- SET UP OUTPUT ------------------------------
epsilon_container = st.empty()
st.header('Output')
st.subheader('Concept vector')
# perform attack container
# header_col_1, header_col_2, header_col_3, header_col_4, header_col_5 = st.columns([1,1,1,1,1])
# output_col_1, output_col_2, output_col_3, output_col_4, output_col_5 = st.columns([1,1,1,1,1])
header_col_1, header_col_2 = st.columns([5,1])
output_col_1, output_col_2 = st.columns([5,1])
st.subheader('Derivations along the concept vector')
# prediction error container
error_container = st.empty()
smoothgrad_header_container = st.empty()
# smoothgrad container
smooth_head_1, smooth_head_2, smooth_head_3, smooth_head_4, smooth_head_5 = st.columns([1,1,1,1,1])
smoothgrad_col_1, smoothgrad_col_2, smoothgrad_col_3, smoothgrad_col_4, smoothgrad_col_5 = st.columns([1,1,1,1,1])
# ---------------------------- DISPLAY COL 1 ROW 1 ------------------------------
with output_col_1:
separation_vector, number_important_features, imp_nodes, performance = get_separation_space(concept_ids, annotations, ann_df, latent_space=st.session_state.space_id, samples=150)
# st.write(f'Class ID {input_id} - {input_label}: {pred_prob*100:.3f}% confidence')
st.write('Concept vector', separation_vector)
header_col_1.write(f'Concept {st.session_state.concept_ids} - Space {st.session_state.space_id} - Number of relevant nodes: {number_important_features} - Val classification performance: {performance}')# - Nodes {",".join(list(imp_nodes))}')
# ----------------------------- INPUT column 2 & 3 ----------------------------
with input_col_2:
with st.form('image_form'):
# image_id = st.number_input('Image ID: ', format='%d', step=1)
st.write('**Choose or generate a random image to test the disentanglement**')
chosen_image_id_input = st.empty()
image_id = chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
choose_image_button = st.form_submit_button('Choose the defined image')
random_id = st.form_submit_button('Generate a random image')
if random_id:
image_id = random.randint(0, 20000)
st.session_state.image_id = image_id
chosen_image_id_input.number_input('Image ID:', format='%d', step=1, value=st.session_state.image_id)
if choose_image_button:
image_id = int(image_id)
st.session_state.image_id = int(image_id)
# st.write(image_id, st.session_state.image_id)
with input_col_3:
with st.form('Variate along the disentangled concept'):
st.write('**Set range of change**')
chosen_epsilon_input = st.empty()
epsilon = chosen_epsilon_input.number_input('Lambda:', min_value=1, step=1)
epsilon_button = st.form_submit_button('Choose the defined lambda')
st.write('**Select hierarchical levels to manipulate**')
layers = st.multiselect('Layers:', tuple(range(14)))
if len(layers) == 0:
layers = None
print(layers)
layers_button = st.form_submit_button('Choose the defined layers')
# ---------------------------- DISPLAY COL 2 ROW 1 ------------------------------
#model = torch.load('./data/model_files/pytorch_model.bin', map_location=torch.device('cpu'))
with dnnlib.util.open_url('./data/vase_model_files/network-snapshot-003800.pkl') as f:
model = legacy.load_network_pkl(f)['G_ema'].to('cpu') # type: ignore
if st.session_state.space_id == 'Z':
original_image_vec = annotations['z_vectors'][st.session_state.image_id]
else:
original_image_vec = annotations['w_vectors'][st.session_state.image_id]
img = generate_original_image(original_image_vec, model, latent_space=st.session_state.space_id)
top_pred = ann_df.loc[st.session_state.image_id, labels].astype(float).idxmax()
# input_image = original_image_dict['image']
# input_label = original_image_dict['label']
# input_id = original_image_dict['id']
with smoothgrad_col_3:
st.image(img)
smooth_head_3.write(f'Base image, predicted as {top_pred}')
images, lambdas = regenerate_images(model, original_image_vec, separation_vector, min_epsilon=-(int(epsilon)), max_epsilon=int(epsilon), latent_space=st.session_state.space_id, layers=layers)
with smoothgrad_col_1:
st.image(images[0])
smooth_head_1.write(f'Change of {np.round(lambdas[0], 2)}')
with smoothgrad_col_2:
st.image(images[1])
smooth_head_2.write(f'Change of {np.round(lambdas[1], 2)}')
with smoothgrad_col_4:
st.image(images[3])
smooth_head_4.write(f'Change of {np.round(lambdas[3], 2)}')
with smoothgrad_col_5:
st.image(images[4])
smooth_head_5.write(f'Change of {np.round(lambdas[4], 2)}')