|
import streamlit as st |
|
import os |
|
from PIL import Image |
|
import tempfile |
|
|
|
import google.generativeai as genai |
|
secret_key = os.getenv("SECRET_KEY") |
|
|
|
genai.configure(api_key=secret_key) |
|
model2=genai.GenerativeModel('gemini-pro') |
|
|
|
st.title('Mental Health') |
|
video_url1= 'https://youtu.be/NQcYZplTXnQ?si=egutHE1H9YwQNk_I' |
|
st.header("Video Demo") |
|
st.video(video_url1) |
|
|
|
from transformers import pipeline |
|
classifier = pipeline("text-classification") |
|
input1 = st.text_input('Enter text here:', '') |
|
|
|
|
|
if input1: |
|
outputs1 = classifier(input1) |
|
st.write('You entered:', input1) |
|
st.write('Emotion:', outputs1[0]['label']) |
|
st.write('Confidence in Emotion:', outputs1[0]['score']) |
|
|
|
if 'chat' not in st.session_state: |
|
st.session_state.chat=model2.start_chat(history=[]) |
|
|
|
def role_to_streamlit(role): |
|
if role=='model': |
|
return 'assistant' |
|
else: |
|
return role |
|
|
|
for message in st.session_state.chat.history: |
|
with st.chat_message(role_to_streamlit(message.role)): |
|
st.markdown(message.parts[0].text) |
|
|
|
if prompt2 := st.chat_input('Write your problem'): |
|
st.chat_message('user').markdown(prompt2) |
|
response2=st.session_state.chat.send_message(prompt2) |
|
with st.chat_message('assistant'): |
|
st.markdown(response2.text) |
|
|
|
def get_gemini_response(input,image): |
|
model = genai.GenerativeModel('gemini-pro-vision') |
|
if input!="": |
|
response = model.generate_content([input,image]) |
|
else: |
|
response = model.generate_content(image) |
|
return response.text |
|
|
|
input3='Tell me about the emotion shown in the image' |
|
|
|
uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"]) |
|
image="" |
|
if uploaded_file is not None: |
|
image = Image.open(uploaded_file) |
|
st.image(image, caption="Uploaded Image.", use_column_width=True) |
|
submit=st.button("Tell me about the emotion in image") |
|
if submit: |
|
response=get_gemini_response(input3,image) |
|
st.subheader("The Response is") |
|
st.write(response) |
|
|
|
|
|
audio_prompt="""you are audio emotion detector.You will be taking the audio |
|
and finding the emotion in audio. Please provide the emotion of the audio given here: """ |
|
|
|
audio_text=st.text_input("What do you want to know about the audio:") |
|
|
|
if audio_text: |
|
|
|
audio_prompt=""".You will be analyse the audio and provide the answers of the question given here: """+audio_text |
|
|
|
|
|
audio_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "ogg"]) |
|
|
|
def generate_gemini_content(audio_prompt,audio_file): |
|
|
|
with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file: |
|
tmp_file.write(audio_file.getvalue()) |
|
tmp_file.close() |
|
|
|
model = genai.GenerativeModel("gemini-1.5-pro-latest") |
|
your_file = genai.upload_file(tmp_file.name) |
|
response = model.generate_content([audio_prompt, your_file]) |
|
return response.text |
|
|
|
os.remove(tmp_file.name) |
|
|
|
if st.button("Answer or summary"): |
|
|
|
if audio_file: |
|
summary=generate_gemini_content(audio_prompt,audio_file) |
|
st.markdown("## Emotion/answer:") |
|
st.write(summary) |