Spaces:
Runtime error
Runtime error
achterbrain
commited on
Commit
•
1b60893
1
Parent(s):
d79174d
updated to v0.0.4
Browse files- Dashboard.py +11 -1
- Dashboard_setup.py +1 -1
- Data/Prompt_dir_230104.csv +0 -0
- README.md +1 -1
- pages/1_⚙️Manual assessment.py +27 -4
- pages/2_🤖Automated assessment.py +1 -1
- pages/3_📊Assessment summary.py +10 -17
- pages/Functions/Dashboard_functions.py +118 -39
Dashboard.py
CHANGED
@@ -47,14 +47,20 @@ with st.expander("Prompt downloader"):
|
|
47 |
|
48 |
# Concat all tasks to dataframe
|
49 |
prompt_download = pd.concat(prompt_download_dict.values())
|
|
|
|
|
50 |
|
51 |
# Add relevant single object prompts
|
52 |
-
single_object_ids =
|
53 |
prompt_download = pd.concat([
|
54 |
prompt_download,
|
55 |
prompt_dir.loc[prompt_dir['ID'].isin(single_object_ids)]
|
56 |
])
|
57 |
|
|
|
|
|
|
|
|
|
58 |
# Add download button for prompts
|
59 |
st.download_button(
|
60 |
label="Download prompts",
|
@@ -140,5 +146,9 @@ if eval_df.shape[0]!=0:
|
|
140 |
st.write("- Available for manual assessment: ", str(sum(eval_df.manual_eval)))
|
141 |
manual_eval_available = sum(eval_df.manual_eval)
|
142 |
st.write("- Available for automated assessment: ", str(sum(eval_df.automated_eval)))
|
|
|
|
|
|
|
|
|
143 |
else:
|
144 |
st.write("Upload files to start the assessment.")
|
|
|
47 |
|
48 |
# Concat all tasks to dataframe
|
49 |
prompt_download = pd.concat(prompt_download_dict.values())
|
50 |
+
# Exclude prompts from single object prompt download, as else the int transform gives an error
|
51 |
+
single_object_prompt_download = prompt_download.dropna(subset='Linked_prompts')
|
52 |
|
53 |
# Add relevant single object prompts
|
54 |
+
single_object_ids = single_object_prompt_download.Linked_prompts.str.split(',').explode().unique().astype('int')
|
55 |
prompt_download = pd.concat([
|
56 |
prompt_download,
|
57 |
prompt_dir.loc[prompt_dir['ID'].isin(single_object_ids)]
|
58 |
])
|
59 |
|
60 |
+
# For img2img prompt, the prompt in the download gets replaced by img2img instructions
|
61 |
+
img2img_instructions_col = prompt_download.loc[prompt_download['Task'].str.startswith('img2img')]['img2img_instructions']
|
62 |
+
prompt_download.loc[prompt_download['Task'].str.startswith('img2img'),'Prompt']=img2img_instructions_col
|
63 |
+
|
64 |
# Add download button for prompts
|
65 |
st.download_button(
|
66 |
label="Download prompts",
|
|
|
146 |
st.write("- Available for manual assessment: ", str(sum(eval_df.manual_eval)))
|
147 |
manual_eval_available = sum(eval_df.manual_eval)
|
148 |
st.write("- Available for automated assessment: ", str(sum(eval_df.automated_eval)))
|
149 |
+
|
150 |
+
if eval_df.shape[0]>sum(eval_df.manual_eval):
|
151 |
+
st.write('WARNING: {0} image(s) with invalid file names uploaded. Pictures with invalid names will not be available for assessment. Use the file names provided by the prompt downloader to correctly name your generated images.'.format(str(eval_df.shape[0]-sum(eval_df.manual_eval))))
|
152 |
+
|
153 |
else:
|
154 |
st.write("Upload files to start the assessment.")
|
Dashboard_setup.py
CHANGED
@@ -1,4 +1,4 @@
|
|
1 |
import pandas as pd
|
2 |
|
3 |
automated_task_list = ['Multiple object types', 'Single object','Negation']
|
4 |
-
prompt_dir = pd.read_csv('Data/
|
|
|
1 |
import pandas as pd
|
2 |
|
3 |
automated_task_list = ['Multiple object types', 'Single object','Negation']
|
4 |
+
prompt_dir = pd.read_csv('Data/Prompt_dir_230104.csv')
|
Data/Prompt_dir_230104.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
README.md
CHANGED
@@ -10,4 +10,4 @@ pinned: false
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
-
|
|
|
10 |
license: mit
|
11 |
---
|
12 |
|
13 |
+
We provide a version of local hosting with a customization guide on https://github.com/8erberg/Intel-Generative-Image-Dashboard-experimental
|
pages/1_⚙️Manual assessment.py
CHANGED
@@ -2,7 +2,8 @@ import streamlit as st
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
from PIL import Image
|
5 |
-
from pages.Functions.Dashboard_functions import add_previous_manual_assessments
|
|
|
6 |
|
7 |
st.title('Manual assessment')
|
8 |
st.write('On this page you can rate all uploaded images with regards to how good they match their respective prompts. You can see the outcome of your assessment on the summary page.')
|
@@ -26,6 +27,14 @@ except KeyError:
|
|
26 |
manual_eval_available = 0
|
27 |
st.session_state['uploaded_img'] = [] #safety if program is started on manual assesssment page and not desktop
|
28 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
# Main rating loop
|
31 |
## If images are available for rating this creates a from to submit ratings to database
|
@@ -113,6 +122,9 @@ if manual_eval_available > 0:
|
|
113 |
# Submit assessments to database
|
114 |
submitted = st.form_submit_button("Submit")
|
115 |
if submitted:
|
|
|
|
|
|
|
116 |
# First add main prompt assessment
|
117 |
st.session_state['eval_df'].loc[
|
118 |
curr_picture_index,'manual_eval']=include_prompt
|
@@ -121,6 +133,9 @@ if manual_eval_available > 0:
|
|
121 |
st.session_state['eval_df'].loc[
|
122 |
curr_picture_index,'manual_eval_task_score']=curr_manual_eval_row['manual_eval_task_score'].item()
|
123 |
|
|
|
|
|
|
|
124 |
# Add subprompt assessment if dataset was created for subprompts
|
125 |
# This stage will automatically be skipped if the df for linked prompts is empty
|
126 |
for row in curr_linked_rows.itertuples():
|
@@ -131,9 +146,18 @@ if manual_eval_available > 0:
|
|
131 |
st.session_state['eval_df'].loc[
|
132 |
row.Picture_index,'manual_eval_task_score']=row.manual_eval_task_score
|
133 |
|
|
|
|
|
|
|
|
|
|
|
|
|
134 |
# Reset page after ratings were submitted
|
135 |
st.experimental_rerun()
|
136 |
|
|
|
|
|
|
|
137 |
# Add option to add previous manual assessments
|
138 |
add_previous_manual_assessments()
|
139 |
|
@@ -143,7 +167,6 @@ elif len(st.session_state['uploaded_img'])==0:
|
|
143 |
# If files are uploaded but all ratings are completed
|
144 |
else:
|
145 |
assessment_progress.write('You finished assessing the current batch of uploaded images. Upload more pictures of generate your results on the summary page.')
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
|
|
|
2 |
import numpy as np
|
3 |
import pandas as pd
|
4 |
from PIL import Image
|
5 |
+
from pages.Functions.Dashboard_functions import add_previous_manual_assessments, delete_last_manual_rating
|
6 |
+
|
7 |
|
8 |
st.title('Manual assessment')
|
9 |
st.write('On this page you can rate all uploaded images with regards to how good they match their respective prompts. You can see the outcome of your assessment on the summary page.')
|
|
|
27 |
manual_eval_available = 0
|
28 |
st.session_state['uploaded_img'] = [] #safety if program is started on manual assesssment page and not desktop
|
29 |
|
30 |
+
# Create manual rating history if it does not already exist
|
31 |
+
try:
|
32 |
+
_ = st.session_state['manual_rating_history'][-1]
|
33 |
+
except KeyError:
|
34 |
+
st.session_state['manual_rating_history'] = []
|
35 |
+
except IndexError:
|
36 |
+
pass
|
37 |
+
|
38 |
|
39 |
# Main rating loop
|
40 |
## If images are available for rating this creates a from to submit ratings to database
|
|
|
122 |
# Submit assessments to database
|
123 |
submitted = st.form_submit_button("Submit")
|
124 |
if submitted:
|
125 |
+
# Create temporary list to hold picture indexes for this run
|
126 |
+
temp_picture_index_list = []
|
127 |
+
|
128 |
# First add main prompt assessment
|
129 |
st.session_state['eval_df'].loc[
|
130 |
curr_picture_index,'manual_eval']=include_prompt
|
|
|
133 |
st.session_state['eval_df'].loc[
|
134 |
curr_picture_index,'manual_eval_task_score']=curr_manual_eval_row['manual_eval_task_score'].item()
|
135 |
|
136 |
+
# Add picture index to temp list
|
137 |
+
temp_picture_index_list.append(curr_picture_index)
|
138 |
+
|
139 |
# Add subprompt assessment if dataset was created for subprompts
|
140 |
# This stage will automatically be skipped if the df for linked prompts is empty
|
141 |
for row in curr_linked_rows.itertuples():
|
|
|
146 |
st.session_state['eval_df'].loc[
|
147 |
row.Picture_index,'manual_eval_task_score']=row.manual_eval_task_score
|
148 |
|
149 |
+
# Add picture index to temp list
|
150 |
+
temp_picture_index_list.append(row.Picture_index)
|
151 |
+
|
152 |
+
# Add temp list of picture indices to rating history
|
153 |
+
st.session_state['manual_rating_history'].append(temp_picture_index_list)
|
154 |
+
|
155 |
# Reset page after ratings were submitted
|
156 |
st.experimental_rerun()
|
157 |
|
158 |
+
# Return to last rated image
|
159 |
+
delete_last_manual_rating()
|
160 |
+
|
161 |
# Add option to add previous manual assessments
|
162 |
add_previous_manual_assessments()
|
163 |
|
|
|
167 |
# If files are uploaded but all ratings are completed
|
168 |
else:
|
169 |
assessment_progress.write('You finished assessing the current batch of uploaded images. Upload more pictures of generate your results on the summary page.')
|
170 |
+
# Add option to return to last manual rating
|
171 |
+
delete_last_manual_rating()
|
|
|
172 |
|
pages/2_🤖Automated assessment.py
CHANGED
@@ -40,7 +40,7 @@ if automated_eval_available > 0:
|
|
40 |
|
41 |
with st.form("auto_assessment_form",clear_on_submit=True):
|
42 |
# Form info statment
|
43 |
-
st.write('Select tasks to assess with the automated assessment
|
44 |
|
45 |
# Create list of bool selection buttons, one for every task
|
46 |
for i_task in range(task_list_len):
|
|
|
40 |
|
41 |
with st.form("auto_assessment_form",clear_on_submit=True):
|
42 |
# Form info statment
|
43 |
+
st.write('Select tasks to assess with the automated assessment below. Once you started an assessment you will not be able to leave this page before the assessment is completed.')
|
44 |
|
45 |
# Create list of bool selection buttons, one for every task
|
46 |
for i_task in range(task_list_len):
|
pages/3_📊Assessment summary.py
CHANGED
@@ -3,10 +3,11 @@ import pandas as pd
|
|
3 |
import seaborn as sns
|
4 |
import matplotlib.pyplot as plt
|
5 |
from PIL import Image
|
6 |
-
from pages.Functions.Dashboard_functions import
|
7 |
side_image = Image.open('Graphics/IL_Logo.png')
|
8 |
st.sidebar.image(side_image)
|
9 |
|
|
|
10 |
@st.cache
|
11 |
def convert_df_to_csv(df):
|
12 |
# IMPORTANT: Cache the conversion to prevent computation on every rerun
|
@@ -18,26 +19,22 @@ assessment_result_frames = {}
|
|
18 |
st.title('Assessment Summary')
|
19 |
st.header('Manual assessment')
|
20 |
|
|
|
21 |
try:
|
22 |
if sum(st.session_state['eval_df']['manual_eval_completed'])>0:
|
23 |
# Display file uploader
|
24 |
-
manual_file_upload = st.file_uploader("Upload .csv with saved manual assessment for model comparison")
|
25 |
-
|
26 |
# Create dataset for manual summary plots
|
27 |
manual_eval_df = st.session_state['eval_df']
|
28 |
manual_eval_df['Score'] = manual_eval_df['manual_eval_task_score'].map({'Yes':True, 'No':False})
|
29 |
manual_results_df = manual_eval_df.loc[
|
30 |
(manual_eval_df['manual_eval']==True)&
|
31 |
(manual_eval_df['manual_eval_completed']==True)]
|
32 |
-
|
33 |
assessment_result_frames['Manual assessment'] = manual_results_df
|
34 |
|
35 |
# Add plots / tables to page
|
36 |
-
|
37 |
-
manual_file_upload_df = pd.read_csv(manual_file_upload).copy()
|
38 |
-
print_results_tabs(file_upload=manual_file_upload, results_df=manual_results_df, file_upload_df=manual_file_upload_df)
|
39 |
-
except ValueError:
|
40 |
-
print_results_tabs(file_upload=manual_file_upload, results_df=manual_results_df)
|
41 |
|
42 |
st.download_button(
|
43 |
label="Download manual assessment data",
|
@@ -50,24 +47,19 @@ try:
|
|
50 |
except KeyError:
|
51 |
pre_assessment_visualisation(type_str='manual')
|
52 |
|
53 |
-
|
54 |
-
|
55 |
st.write(' ')
|
56 |
st.header('Automated assessment')
|
57 |
try:
|
58 |
# Create dataset for automated summary plots
|
59 |
auto_eval_df = st.session_state['auto_eval_df']
|
|
|
60 |
assessment_result_frames['Automated assessment'] = auto_eval_df
|
61 |
|
62 |
# Display file uploader
|
63 |
-
auto_file_upload = st.file_uploader("Upload .csv with saved automated assessment for model comparison")
|
64 |
|
65 |
# Add plots / tables to page
|
66 |
-
|
67 |
-
auto_file_upload_df = pd.read_csv(auto_file_upload).copy()
|
68 |
-
print_results_tabs(file_upload=auto_file_upload, results_df=auto_eval_df, file_upload_df=auto_file_upload_df)
|
69 |
-
except ValueError:
|
70 |
-
print_results_tabs(file_upload=auto_file_upload, results_df=auto_eval_df)
|
71 |
|
72 |
st.download_button(
|
73 |
label="Download automated assessment data",
|
@@ -121,3 +113,4 @@ except IndexError:
|
|
121 |
st.write('There is no image availabe in your selected category.')
|
122 |
except KeyError:
|
123 |
pass
|
|
|
|
3 |
import seaborn as sns
|
4 |
import matplotlib.pyplot as plt
|
5 |
from PIL import Image
|
6 |
+
from pages.Functions.Dashboard_functions import pre_assessment_visualisation, multi_comparison_plotI, print_results_tabs
|
7 |
side_image = Image.open('Graphics/IL_Logo.png')
|
8 |
st.sidebar.image(side_image)
|
9 |
|
10 |
+
|
11 |
@st.cache
|
12 |
def convert_df_to_csv(df):
|
13 |
# IMPORTANT: Cache the conversion to prevent computation on every rerun
|
|
|
19 |
st.title('Assessment Summary')
|
20 |
st.header('Manual assessment')
|
21 |
|
22 |
+
|
23 |
try:
|
24 |
if sum(st.session_state['eval_df']['manual_eval_completed'])>0:
|
25 |
# Display file uploader
|
26 |
+
manual_file_upload = st.file_uploader("Upload .csv with saved manual assessment for model comparison", accept_multiple_files=True)
|
|
|
27 |
# Create dataset for manual summary plots
|
28 |
manual_eval_df = st.session_state['eval_df']
|
29 |
manual_eval_df['Score'] = manual_eval_df['manual_eval_task_score'].map({'Yes':True, 'No':False})
|
30 |
manual_results_df = manual_eval_df.loc[
|
31 |
(manual_eval_df['manual_eval']==True)&
|
32 |
(manual_eval_df['manual_eval_completed']==True)]
|
33 |
+
manual_results_df['Model']='Manual assessment'
|
34 |
assessment_result_frames['Manual assessment'] = manual_results_df
|
35 |
|
36 |
# Add plots / tables to page
|
37 |
+
print_results_tabs(file_upload=manual_file_upload, results_df=manual_results_df)
|
|
|
|
|
|
|
|
|
38 |
|
39 |
st.download_button(
|
40 |
label="Download manual assessment data",
|
|
|
47 |
except KeyError:
|
48 |
pre_assessment_visualisation(type_str='manual')
|
49 |
|
|
|
|
|
50 |
st.write(' ')
|
51 |
st.header('Automated assessment')
|
52 |
try:
|
53 |
# Create dataset for automated summary plots
|
54 |
auto_eval_df = st.session_state['auto_eval_df']
|
55 |
+
auto_eval_df['Model']='Automated assessment'
|
56 |
assessment_result_frames['Automated assessment'] = auto_eval_df
|
57 |
|
58 |
# Display file uploader
|
59 |
+
auto_file_upload = st.file_uploader("Upload .csv with saved automated assessment for model comparison", accept_multiple_files=True)
|
60 |
|
61 |
# Add plots / tables to page
|
62 |
+
print_results_tabs(file_upload=auto_file_upload, results_df=auto_eval_df)
|
|
|
|
|
|
|
|
|
63 |
|
64 |
st.download_button(
|
65 |
label="Download automated assessment data",
|
|
|
113 |
st.write('There is no image availabe in your selected category.')
|
114 |
except KeyError:
|
115 |
pass
|
116 |
+
|
pages/Functions/Dashboard_functions.py
CHANGED
@@ -2,10 +2,37 @@
|
|
2 |
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
|
|
5 |
import seaborn as sns
|
6 |
import matplotlib.pyplot as plt
|
7 |
from PIL import Image
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
##### Dashboard main page
|
11 |
def prompt_to_csv(df):
|
@@ -16,6 +43,22 @@ def prompt_to_csv(df):
|
|
16 |
|
17 |
##### Manual assessment
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
def add_previous_manual_assessments():
|
20 |
'''
|
21 |
This is a routine to allow the user to upload prior manual ratings and override
|
@@ -31,6 +74,11 @@ def add_previous_manual_assessments():
|
|
31 |
if uploaded_ratings != None:
|
32 |
try:
|
33 |
uploaded_ratings_df = pd.read_csv(uploaded_ratings)
|
|
|
|
|
|
|
|
|
|
|
34 |
overlapping_files_df =pd.merge(st.session_state['eval_df'],uploaded_ratings_df,on='File_name',how='inner')
|
35 |
st.write('Number of matching file names found: '+ str(len(overlapping_files_df)))
|
36 |
st.write('Click "Add results" button to add / override current ratings with uploaded ratings.')
|
@@ -56,6 +104,75 @@ def add_previous_manual_assessments():
|
|
56 |
|
57 |
|
58 |
##### Assessment summary
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
def plot_style_simple(results_df, return_table = False):
|
60 |
'''
|
61 |
Simple plot function for plotting just one dataframe of results
|
@@ -109,42 +226,4 @@ def plot_style_combined(results_df, uploaded_df = None, return_table=False):
|
|
109 |
plt.xticks(rotation=-65)
|
110 |
plt.ylabel('Percentage correct')
|
111 |
plt.xlabel(' ')
|
112 |
-
return fig
|
113 |
-
|
114 |
-
|
115 |
-
def print_results_tabs(file_upload, results_df, file_upload_df=None):
|
116 |
-
'''
|
117 |
-
Routine used to give user the choice between showing results as bar chart or table
|
118 |
-
'''
|
119 |
-
# Create a tab for bar chart and one for table data
|
120 |
-
tab1, tab2 = st.tabs(["Bar chart", "Data table"])
|
121 |
-
with tab1:
|
122 |
-
# If df was uploaded for comparison, we create comparison plot, else simple plot
|
123 |
-
if file_upload == None:
|
124 |
-
fig = plot_style_simple(results_df)
|
125 |
-
st.pyplot(fig)
|
126 |
-
else:
|
127 |
-
fig = plot_style_combined(results_df,file_upload_df)
|
128 |
-
st.pyplot(fig)
|
129 |
-
|
130 |
-
with tab2:
|
131 |
-
# If df was uploaded for comparison, we create comparison table, else simple table
|
132 |
-
if file_upload == None:
|
133 |
-
table = plot_style_simple(results_df, return_table=True)
|
134 |
-
st.write(table)
|
135 |
-
else:
|
136 |
-
table = plot_style_combined(results_df,file_upload_df, return_table=True)
|
137 |
-
st.write(table)
|
138 |
-
|
139 |
-
|
140 |
-
def pre_assessment_visualisation(type_str):
|
141 |
-
'''
|
142 |
-
Routine used to allow user to visualise uploaded results before completing any assessments
|
143 |
-
'''
|
144 |
-
st.write('Complete {0} assessment or upload .csv with saved {0} assessment to generate summary.'.format(type_str))
|
145 |
-
|
146 |
-
# Display file uploader
|
147 |
-
file_upload = st.file_uploader("Upload .csv with saved {0} assessment to plot prior results.".format(type_str))
|
148 |
-
if file_upload != None:
|
149 |
-
file_upload_df = pd.read_csv(file_upload).copy()
|
150 |
-
print_results_tabs(file_upload=None, results_df=file_upload_df)
|
|
|
2 |
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
5 |
+
import numpy as np
|
6 |
import seaborn as sns
|
7 |
import matplotlib.pyplot as plt
|
8 |
from PIL import Image
|
9 |
|
10 |
+
##### Page-unspecific functions
|
11 |
+
|
12 |
+
def assert_uploaded_frame(uploaded_df):
|
13 |
+
# Set up variables checked for
|
14 |
+
asserted_columns = {
|
15 |
+
'Prompt_no':pd.api.types.is_integer_dtype,
|
16 |
+
'Score':pd.api.types.is_bool_dtype,
|
17 |
+
'Task':pd.api.types.is_object_dtype,
|
18 |
+
'File_name':pd.api.types.is_object_dtype}
|
19 |
+
asserted_column_names = ['Prompt_no','Score','Task','File_name']
|
20 |
+
|
21 |
+
# Check whether all needed column names are present
|
22 |
+
existing_column_names = [(x in uploaded_df.columns) for x in asserted_column_names]
|
23 |
+
assert all(existing_column_names), "The uploaded dataframe is missing a column needed for import. Your table needs to contain the columns: 'Prompt_no', 'Score', 'Task', 'File_name' "
|
24 |
+
|
25 |
+
# Check whether all needed columns have correct dtypes
|
26 |
+
correct_column_dtypes = []
|
27 |
+
for i_item in asserted_columns.items():
|
28 |
+
dtype_test = i_item[1](uploaded_df[i_item[0]].dtype)
|
29 |
+
correct_column_dtypes.append(dtype_test)
|
30 |
+
assert all(correct_column_dtypes), "Incorrect dtypes in uploaded dataframe."
|
31 |
+
|
32 |
+
def assert_multi_frame_upload(list_of_uploaded_dfs):
|
33 |
+
# Apply uploaded frame assert to list of frames
|
34 |
+
for i_df in list_of_uploaded_dfs:
|
35 |
+
assert_uploaded_frame(i_df)
|
36 |
|
37 |
##### Dashboard main page
|
38 |
def prompt_to_csv(df):
|
|
|
43 |
|
44 |
##### Manual assessment
|
45 |
|
46 |
+
def delete_last_manual_rating():
|
47 |
+
'''
|
48 |
+
Routine to delete last manual rating and hence to return to it
|
49 |
+
'''
|
50 |
+
if len(st.session_state['manual_rating_history'])>0:
|
51 |
+
|
52 |
+
if st.button('Return to last rated image'):
|
53 |
+
# The list contains sublists of images rated together, here we loop over these images to reset all of them
|
54 |
+
deleted_picture_index_list = st.session_state['manual_rating_history'].pop()
|
55 |
+
for i_picind in deleted_picture_index_list:
|
56 |
+
st.session_state['eval_df'].loc[
|
57 |
+
i_picind,'manual_eval_completed']=False
|
58 |
+
st.session_state['eval_df'].loc[
|
59 |
+
i_picind,'manual_eval_task_score']=np.nan
|
60 |
+
st.experimental_rerun()
|
61 |
+
|
62 |
def add_previous_manual_assessments():
|
63 |
'''
|
64 |
This is a routine to allow the user to upload prior manual ratings and override
|
|
|
74 |
if uploaded_ratings != None:
|
75 |
try:
|
76 |
uploaded_ratings_df = pd.read_csv(uploaded_ratings)
|
77 |
+
|
78 |
+
# Run standard assert pipeline
|
79 |
+
assert_uploaded_frame(uploaded_ratings_df)
|
80 |
+
|
81 |
+
# Show matching image count and instructions
|
82 |
overlapping_files_df =pd.merge(st.session_state['eval_df'],uploaded_ratings_df,on='File_name',how='inner')
|
83 |
st.write('Number of matching file names found: '+ str(len(overlapping_files_df)))
|
84 |
st.write('Click "Add results" button to add / override current ratings with uploaded ratings.')
|
|
|
104 |
|
105 |
|
106 |
##### Assessment summary
|
107 |
+
|
108 |
+
def print_results_tabs(file_upload, results_df):
|
109 |
+
'''
|
110 |
+
#Routine used to give user the choice between showing results as bar chart or table
|
111 |
+
'''
|
112 |
+
# Create a tab for bar chart and one for table data
|
113 |
+
fig, table = multi_comparison_plotI(results_df=results_df, uploaded_df_list=file_upload)
|
114 |
+
tab1, tab2 = st.tabs(["Bar chart", "Data table"])
|
115 |
+
with tab1:
|
116 |
+
st.pyplot(fig)
|
117 |
+
|
118 |
+
with tab2:
|
119 |
+
st.write(table)
|
120 |
+
|
121 |
+
|
122 |
+
def pre_assessment_visualisation(type_str):
|
123 |
+
'''
|
124 |
+
Routine used to allow user to visualise uploaded results before completing any assessments
|
125 |
+
'''
|
126 |
+
st.write('Complete {0} assessment or upload .csv with saved {0} assessment to generate summary.'.format(type_str))
|
127 |
+
|
128 |
+
# Display file uploader
|
129 |
+
file_upload = st.file_uploader("Upload .csv with saved {0} assessment to plot prior results.".format(type_str), accept_multiple_files=True)
|
130 |
+
if len(file_upload) > 0:
|
131 |
+
print_results_tabs(file_upload=file_upload, results_df=None)
|
132 |
+
|
133 |
+
|
134 |
+
def multi_comparison_plotI(results_df = None, uploaded_df_list = []):
|
135 |
+
# If list of uploaded_dfs is provided and we transform them into pd.Dfs
|
136 |
+
# Multiple file uploader returns empty list as default
|
137 |
+
file_upload_names = [x.name for x in uploaded_df_list]
|
138 |
+
plot_df_list = [pd.read_csv(x) for x in uploaded_df_list]
|
139 |
+
|
140 |
+
# Assert that all uploaded df's have correct format
|
141 |
+
assert_multi_frame_upload(plot_df_list)
|
142 |
+
|
143 |
+
# Add file name as model name
|
144 |
+
for i_df in range(len(file_upload_names)):
|
145 |
+
plot_df_list[i_df]= plot_df_list[i_df].assign(Model=file_upload_names[i_df])
|
146 |
+
|
147 |
+
# If results df is provided, add it to list of dfs to plot
|
148 |
+
if type(results_df) == pd.DataFrame:
|
149 |
+
plot_df_list.append(results_df)
|
150 |
+
|
151 |
+
# Concat all frames to joined dataframe
|
152 |
+
plot_df = pd.concat(plot_df_list)
|
153 |
+
|
154 |
+
# Calculate the grouped percentage scores per task category and model
|
155 |
+
grouped_series = plot_df.groupby(['Task','Model'])['Score'].sum()/plot_df.groupby(['Task','Model'])['Score'].count()*100
|
156 |
+
grouped_series = grouped_series.rename('Percentage correct')
|
157 |
+
|
158 |
+
# Create plot
|
159 |
+
eval_share = grouped_series.reset_index()
|
160 |
+
# Add small amount to make the bars on plot not disappear
|
161 |
+
eval_share['Percentage correct'] = eval_share['Percentage correct']+1
|
162 |
+
|
163 |
+
# Create plot
|
164 |
+
fig = plt.figure(figsize=(12, 3))
|
165 |
+
sns.barplot(data=eval_share,x='Task',y='Percentage correct',hue='Model', palette='GnBu')
|
166 |
+
plt.xticks(rotation=-65)
|
167 |
+
plt.xlabel(' ')
|
168 |
+
plt.ylim(0, 100)
|
169 |
+
return fig,grouped_series
|
170 |
+
|
171 |
+
|
172 |
+
|
173 |
+
|
174 |
+
############## Functions no longer used, to be deleted
|
175 |
+
|
176 |
def plot_style_simple(results_df, return_table = False):
|
177 |
'''
|
178 |
Simple plot function for plotting just one dataframe of results
|
|
|
226 |
plt.xticks(rotation=-65)
|
227 |
plt.ylabel('Percentage correct')
|
228 |
plt.xlabel(' ')
|
229 |
+
return fig
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|