Spaces:
Sleeping
Sleeping
barghavani
commited on
Commit
•
85d00ce
1
Parent(s):
b895923
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ from langchain_google_genai import ChatGoogleGenerativeAI
|
|
9 |
from langchain.chains.question_answering import load_qa_chain
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from dotenv import load_dotenv
|
12 |
-
|
13 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
14 |
|
15 |
|
@@ -78,10 +78,34 @@ def main():
|
|
78 |
st.set_page_config("Chat PDF")
|
79 |
st.header("QnA with Multiple PDF files💁")
|
80 |
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
85 |
|
86 |
with st.sidebar:
|
87 |
st.title("Menu:")
|
@@ -91,8 +115,7 @@ def main():
|
|
91 |
raw_text = get_pdf_text(pdf_docs)
|
92 |
text_chunks = get_text_chunks(raw_text)
|
93 |
get_vector_store(text_chunks)
|
94 |
-
|
95 |
-
|
96 |
|
97 |
|
98 |
if __name__ == "__main__":
|
|
|
9 |
from langchain.chains.question_answering import load_qa_chain
|
10 |
from langchain.prompts import PromptTemplate
|
11 |
from dotenv import load_dotenv
|
12 |
+
from streamlit_webrtc import webrtc_streamer, WebRtcMode, ClientSettings
|
13 |
genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
|
14 |
|
15 |
|
|
|
78 |
st.set_page_config("Chat PDF")
|
79 |
st.header("QnA with Multiple PDF files💁")
|
80 |
|
81 |
+
# Audio recording
|
82 |
+
webrtc_ctx = webrtc_streamer(
|
83 |
+
key="audio_recorder",
|
84 |
+
mode=WebRtcMode.SENDONLY,
|
85 |
+
audio_receiver_size=256,
|
86 |
+
client_settings=ClientSettings(
|
87 |
+
media_stream_constraints={"audio": True},
|
88 |
+
),
|
89 |
+
)
|
90 |
+
|
91 |
+
if webrtc_ctx.audio_receiver:
|
92 |
+
audio_frames = []
|
93 |
+
while True:
|
94 |
+
frame = webrtc_ctx.audio_receiver.get_frame(timeout=1)
|
95 |
+
if frame is None:
|
96 |
+
break
|
97 |
+
audio_frames.append(frame)
|
98 |
+
|
99 |
+
if len(audio_frames) > 0:
|
100 |
+
audio_bytes = b"".join(frame.to_ndarray().tobytes() for frame in audio_frames)
|
101 |
+
with open("query.wav", "wb") as f:
|
102 |
+
f.write(audio_bytes)
|
103 |
+
|
104 |
+
model = whisper.load_model("large")
|
105 |
+
result = model.transcribe("query.wav", language="en", fp16=False)
|
106 |
+
user_question = result["text"]
|
107 |
+
st.write("Transcribed Question:", user_question)
|
108 |
+
user_input(user_question)
|
109 |
|
110 |
with st.sidebar:
|
111 |
st.title("Menu:")
|
|
|
115 |
raw_text = get_pdf_text(pdf_docs)
|
116 |
text_chunks = get_text_chunks(raw_text)
|
117 |
get_vector_store(text_chunks)
|
118 |
+
st.success("Done")
|
|
|
119 |
|
120 |
|
121 |
if __name__ == "__main__":
|