Spaces:
Running
Running
import streamlit as st | |
import google.generativeai as genai | |
import os | |
# Set up Gemini API (replace with your actual API key) | |
genai.configure(api_key='AIzaSyBDeJo3pioFL92ErFTtmRBmWt5diryp0E0') | |
def load_candidate_data(uploaded_file): | |
return uploaded_file.getvalue().decode('utf-8') | |
def format_candidate_prompt(candidates, selected_candidates): | |
prompt = """ | |
You are an AI assistant specializing in analyzing candidate profiles. You have access to information about the following candidates: {CANDIDATES}. | |
{INFO} | |
Based on this information: | |
1. Provide a brief summary of each selected candidate's background, including their education, work experience, and key skills. | |
2. Compare and contrast the candidates' experiences and skills. | |
3. Highlight any unique qualities or experiences that set each candidate apart. | |
4. Discuss how each candidate's background might be relevant to a potential job or role. | |
5. Identify any common themes or patterns across the selected candidates' profiles. | |
Provide a detailed analysis addressing these points, using specific information from the candidates' profiles where relevant. Your analysis should offer insights into each candidate's strengths and potential contributions to a workplace. | |
""" | |
info = "" | |
for candidate in selected_candidates: | |
info += f"\n{candidate}:\n{candidates[candidate]}\n" | |
return prompt.format(CANDIDATES=", ".join(selected_candidates), INFO=info) | |
def generate_response(prompt, candidates, selected_candidates): | |
model = genai.GenerativeModel('gemini-pro') | |
analysis_prompt = format_candidate_prompt(candidates, selected_candidates) | |
full_prompt = analysis_prompt + "\n\nUser query: " + prompt | |
response = model.generate_content(full_prompt) | |
if response.parts: | |
return response.parts[0].text | |
return "Error: Unable to extract text from the response. Please check the API response structure." | |
def main(): | |
st.title("Multi-Candidate Comparison Chat Interface") | |
uploaded_files = st.file_uploader("Choose text files with candidate information", type="txt", | |
accept_multiple_files=True) | |
if uploaded_files: | |
candidates = {} | |
for file in uploaded_files: | |
candidate_name = os.path.splitext(file.name)[0] # Use filename without extension as candidate name | |
candidates[candidate_name] = load_candidate_data(file) | |
st.success(f"{len(candidates)} candidate files uploaded successfully. You can now start chatting!") | |
st.subheader("Candidate Information") | |
for candidate, info in candidates.items(): | |
st.write(f"{candidate}: {info[:100]}...") # Show first 100 characters | |
if "messages" not in st.session_state: | |
st.session_state.messages = [] | |
for message in st.session_state.messages: | |
with st.chat_message(message["role"]): | |
st.markdown(message["content"]) | |
selected_candidates = st.multiselect("Select candidates to compare:", list(candidates.keys()), | |
default=list(candidates.keys())) | |
if prompt := st.chat_input("What would you like to know about the candidates?"): | |
st.chat_message("user").markdown(prompt) | |
st.session_state.messages.append({"role": "user", "content": prompt}) | |
response = generate_response(prompt, candidates, selected_candidates) | |
with st.chat_message("assistant"): | |
st.markdown(response) | |
st.session_state.messages.append({"role": "assistant", "content": response}) | |
if st.checkbox("Show raw data"): | |
for candidate, info in candidates.items(): | |
st.subheader(f"{candidate} Full Information") | |
st.write(info) | |
if __name__ == "__main__": | |
main() |