shikharyashmaurya commited on
Commit
00cdf6b
1 Parent(s): 209061e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +99 -0
app.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from PIL import Image
4
+ import tempfile
5
+
6
+ import google.generativeai as genai
7
+ secret_key = os.getenv("SECRET_KEY")
8
+
9
+ genai.configure(api_key=secret_key)
10
+ model2=genai.GenerativeModel('gemini-pro')
11
+
12
+ st.title('Mental Health')
13
+ video_url1= 'https://youtu.be/NQcYZplTXnQ?si=egutHE1H9YwQNk_I'
14
+ st.header("Video Demo")
15
+ st.video(video_url1)
16
+
17
+ from transformers import pipeline
18
+ classifier = pipeline("text-classification")
19
+ input1 = st.text_input('Enter text here:', '')
20
+
21
+
22
+ if input1:
23
+ outputs1 = classifier(input1)
24
+ st.write('You entered:', input1)
25
+ st.write('Emotion:', outputs1[0]['label'])
26
+ st.write('Confidence in Emotion:', outputs1[0]['score'])
27
+
28
+ if 'chat' not in st.session_state:
29
+ st.session_state.chat=model2.start_chat(history=[])
30
+
31
+ def role_to_streamlit(role):
32
+ if role=='model':
33
+ return 'assistant'
34
+ else:
35
+ return role
36
+
37
+ for message in st.session_state.chat.history:
38
+ with st.chat_message(role_to_streamlit(message.role)):
39
+ st.markdown(message.parts[0].text)
40
+
41
+ if prompt2 := st.chat_input('Write your problem'):
42
+ st.chat_message('user').markdown(prompt2)
43
+ response2=st.session_state.chat.send_message(prompt2)
44
+ with st.chat_message('assistant'):
45
+ st.markdown(response2.text)
46
+
47
+ def get_gemini_response(input,image):
48
+ model = genai.GenerativeModel('gemini-pro-vision')
49
+ if input!="":
50
+ response = model.generate_content([input,image])
51
+ else:
52
+ response = model.generate_content(image)
53
+ return response.text
54
+
55
+ input3='Tell me about the emotion shown in the image'
56
+
57
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
58
+ image=""
59
+ if uploaded_file is not None:
60
+ image = Image.open(uploaded_file)
61
+ st.image(image, caption="Uploaded Image.", use_column_width=True)
62
+ submit=st.button("Tell me about the emotion in image")
63
+ if submit:
64
+ response=get_gemini_response(input3,image)
65
+ st.subheader("The Response is")
66
+ st.write(response)
67
+
68
+
69
+ audio_prompt="""you are audio emotion detector.You will be taking the audio
70
+ and finding the emotion in audio. Please provide the emotion of the audio given here: """
71
+
72
+ audio_text=st.text_input("What do you want to know about the audio:")
73
+
74
+ if audio_text:
75
+
76
+ audio_prompt=""".You will be analyse the audio and provide the answers of the question given here: """+text
77
+
78
+
79
+ audio_file = st.file_uploader("Upload an audio file", type=["mp3", "wav", "ogg"])
80
+
81
+ def generate_gemini_content(prompt,audio_file):
82
+
83
+ with tempfile.NamedTemporaryFile(suffix='.wav', delete=False) as tmp_file:
84
+ tmp_file.write(audio_file.getvalue())
85
+ tmp_file.close() # close the file
86
+
87
+ model = genai.GenerativeModel("models/gemini-1.5-pro-latest")
88
+ your_file = genai.upload_file(tmp_file.name)
89
+ response = model.generate_content([prompt, your_file])
90
+ return response.text
91
+
92
+ os.remove(tmp_file.name)
93
+
94
+ if st.button("Answer or summary"):
95
+
96
+ if audio_file:
97
+ summary=generate_gemini_content(prompt,audio_file)
98
+ st.markdown("## Summary:")
99
+ st.write(summary)