Spaces:
Sleeping
Sleeping
import streamlit as st | |
import pandas as pd | |
import os | |
import google.generativeai as genai | |
genai.configure(api_key='AIzaSyBDeJo3pioFL92ErFTtmRBmWt5diryp0E0') | |
from emotion import ( | |
load_and_preprocess_data as emotion_load_and_preprocess_data, | |
visualize_comparison as emotion_visualize_comparison, | |
visualize_single_candidate as emotion_visualize_single_candidate, | |
generate_response as emotion_generate_response, | |
) | |
from transcrip_score import ( | |
load_and_preprocess_data as transcript_score_load_and_preprocess_data, | |
visualize_comparison as transcript_score_visualize_comparison, | |
visualize_single_candidate as transcript_score_visualize_single_candidate, | |
generate_response as transcript_score_generate_response, | |
) | |
from gaze import ( | |
load_and_preprocess_data as gaze_load_and_preprocess_data, | |
visualize_gaze_distribution, | |
visualize_single_movie, | |
generate_response as gaze_generate_response, | |
) | |
from transcrip import ( | |
load_candidate_data, | |
generate_response as transcript_generate_response, | |
) | |
def summarize_response(response): | |
model = genai.GenerativeModel('gemini-pro') | |
prompt_template = ( | |
"Please summarize the following response:\n\n" | |
f"{response}\n\n" | |
"The summary should be concise and capture the main points." | |
) | |
summry = model.generate_content(prompt_template) | |
if hasattr(summry, 'candidates'): | |
if summry.candidates: | |
content = summry.candidates[0].content | |
if hasattr(content, 'parts'): | |
for part in content.parts: | |
if hasattr(part, 'text'): | |
return part.text | |
def main(): | |
st.title("Multi-Candidate Analysis Interface") | |
num_candidates = st.number_input("How many candidates would you like to compare?", min_value=1, max_value=10, value=1) | |
data_dict = {} | |
for i in range(num_candidates): | |
st.subheader(f"Candidate {i + 1}") | |
files = {} | |
for file_type in ['emotion', 'transcript_score', 'gaze']: | |
files[file_type] = st.file_uploader(f"Choose {file_type}.csv for Candidate {i + 1}", type="csv", key=f"candidate_{i + 1}_{file_type}") | |
files['transcript'] = st.file_uploader(f"Choose transcript.txt for Candidate {i + 1}", type="txt", key=f"candidate_{i + 1}_transcript") | |
if all(files.values()): | |
data_dict[f"Candidate {i + 1}"] = files | |
if len(data_dict) == num_candidates: | |
st.success("All files uploaded successfully. You can now start analyzing!") | |
query_type = st.selectbox("Select the type of query:", ['emotion', 'transcript score', 'gaze', 'transcript comparison', 'overall comparison']) | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
selected_candidates = st.multiselect("Select candidates to compare:", list(data_dict.keys())) | |
if not selected_candidates: | |
selected_candidates = list(data_dict.keys()) | |
if query_type == 'emotion': | |
emotions = ['angry', 'disgust', 'fear', 'happy', 'sad', 'surprise', 'neutral'] | |
st.write("Available Emotions:", emotions) | |
if prompt := st.chat_input("What would you like to know about the candidates' emotions?"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
selected_emotions = [] | |
if "all" in prompt.lower(): | |
selected_emotions = ['all'] | |
else: | |
for emotion in emotions: | |
if emotion in prompt.lower(): | |
selected_emotions.append(emotion) | |
if not selected_emotions: | |
selected_emotions = ['all'] | |
emotion_data = {candidate: emotion_load_and_preprocess_data(data_dict[candidate]['emotion']) for candidate in selected_candidates} | |
emotion_percentages = None | |
with st.chat_message("assistant"): | |
if len(selected_candidates) == 1: | |
emotion_percentages = emotion_visualize_single_candidate(emotion_data[selected_candidates[0]], selected_emotions) | |
elif any(keyword in prompt.lower() for keyword in ["graph", "compare", "visualize", "show"]): | |
emotion_percentages = emotion_visualize_comparison(emotion_data, selected_emotions, selected_candidates) | |
response = emotion_generate_response(prompt, emotion_data, selected_emotions, selected_candidates, emotion_percentages) | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
elif query_type == 'transcript score': | |
features = ['positive', 'negative', 'neutral', 'confident', 'hesitant', 'concise', 'enthusiastic', 'speech_speed'] | |
st.write("Available Features:", features) | |
if prompt := st.chat_input("What would you like to know about the candidates' transcript scores?"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
selected_features = [] | |
if any(keyword in prompt.lower() for keyword in ["all features", "all transcript", "compare all"]): | |
selected_features = ['all'] | |
else: | |
for feature in features: | |
if feature in prompt.lower(): | |
selected_features.append(feature) | |
if not selected_features: | |
selected_features = ['all'] | |
transcript_data = {candidate: transcript_score_load_and_preprocess_data(data_dict[candidate]['transcript_score']) for candidate in selected_candidates} | |
feature_percentages = None | |
with st.chat_message("assistant"): | |
if len(selected_candidates) == 1: | |
feature_percentages = transcript_score_visualize_single_candidate(transcript_data[selected_candidates[0]], selected_features) | |
else: | |
feature_percentages = transcript_score_visualize_comparison(transcript_data, selected_features, selected_candidates) | |
response = transcript_score_generate_response(prompt, transcript_data, selected_features, selected_candidates, feature_percentages) | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
elif query_type == 'gaze': | |
features = ['gaze', 'blink', 'eye_offset'] | |
st.write("Available Features:", features) | |
if prompt := st.chat_input("What would you like to know about the candidates' gaze data?"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
selected_features = [feature for feature in features if feature in prompt.lower()] | |
if not selected_features: | |
selected_features = features | |
gaze_data = {candidate: gaze_load_and_preprocess_data(data_dict[candidate]['gaze']) for candidate in selected_candidates} | |
with st.chat_message("assistant"): | |
if len(selected_candidates) == 1: | |
visualize_single_movie(gaze_data[selected_candidates[0]], selected_features) | |
elif any(keyword in prompt.lower() for keyword in ["graph", "compare", "visualize", "show"]): | |
visualize_gaze_distribution(gaze_data, selected_features, selected_candidates) | |
response = gaze_generate_response(prompt, gaze_data, selected_features, selected_candidates) | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
elif query_type == 'transcript comparison': | |
transcript_data = {candidate: load_candidate_data(data_dict[candidate]['transcript']) for candidate in selected_candidates} | |
if prompt := st.chat_input("What would you like to know about the candidates' transcripts?"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
response = transcript_generate_response(prompt, transcript_data, selected_candidates) | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
elif query_type == 'overall comparison': | |
if prompt := st.chat_input("What would you like to know about the overall comparison of candidates?"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
with st.chat_message("assistant"): | |
# Transcript information | |
st.subheader("Transcript Information") | |
transcript_data = {candidate: load_candidate_data(data_dict[candidate]['transcript']) for candidate in selected_candidates} | |
transcript_response = transcript_generate_response(prompt, transcript_data, selected_candidates) | |
st.markdown(transcript_response) | |
# Emotion comparison | |
st.subheader("Emotion Comparison") | |
emotion_data = {candidate: emotion_load_and_preprocess_data(data_dict[candidate]['emotion']) for candidate in selected_candidates} | |
emotion_percentages = emotion_visualize_comparison(emotion_data, ['all'], selected_candidates) | |
emotion_response = emotion_generate_response(prompt, emotion_data, ['all'], selected_candidates, emotion_percentages) | |
st.markdown(summarize_response(emotion_response)) | |
# Gaze comparison | |
st.subheader("Gaze Comparison") | |
gaze_data = {candidate: gaze_load_and_preprocess_data(data_dict[candidate]['gaze']) for candidate in selected_candidates} | |
visualize_gaze_distribution(gaze_data, ['gaze', 'blink', 'eye_offset'], selected_candidates) | |
gaze_response = gaze_generate_response(prompt, gaze_data, ['gaze', 'blink', 'eye_offset'], selected_candidates) | |
st.markdown(summarize_response(gaze_response)) | |
# Transcript score comparison | |
st.subheader("Transcript Score Comparison") | |
transcript_score_data = {candidate: transcript_score_load_and_preprocess_data(data_dict[candidate]['transcript_score']) for candidate in selected_candidates} | |
feature_percentages = transcript_score_visualize_comparison(transcript_score_data, ['all'], selected_candidates) | |
transcript_score_response = transcript_score_generate_response(prompt, transcript_score_data, ['all'], selected_candidates, feature_percentages) | |
st.markdown(summarize_response(transcript_score_response)) | |
overall_response = f""" | |
Here's an overall comparison of the candidates: | |
1. Transcript Analysis: {transcript_response} | |
2. Emotion Analysis: {summarize_response(emotion_response)} | |
3. Gaze Analysis: {summarize_response(gaze_response)} | |
4. Transcript Score Analysis: {summarize_response(transcript_score_response)} | |
""" | |
st.session_state.messages.append({"role": "assistant", "content": overall_response}) | |
if st.checkbox("Show raw data"): | |
for candidate in selected_candidates: | |
st.subheader(f"{candidate} Data") | |
st.write("Emotion Data:") | |
st.write(emotion_load_and_preprocess_data(data_dict[candidate]['emotion'])) | |
st.write("Transcript Score Data:") | |
st.write(transcript_score_load_and_preprocess_data(data_dict[candidate]['transcript_score'])) | |
st.write("Gaze Data:") | |
st.write(gaze_load_and_preprocess_data(data_dict[candidate]['gaze'])) | |
st.write("Transcript Data:") | |
st.write(load_candidate_data(data_dict[candidate]['transcript'])) | |
if __name__ == "__main__": | |
main() |