Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,7 +5,7 @@ import tempfile
|
|
5 |
import os
|
6 |
import warnings
|
7 |
from groq import Groq
|
8 |
-
from
|
9 |
|
10 |
# Suppress specific warning
|
11 |
warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
|
@@ -13,15 +13,13 @@ warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using F
|
|
13 |
# Set up Groq client
|
14 |
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
15 |
|
16 |
-
# Instantiate
|
17 |
-
|
18 |
|
19 |
-
# Function to transcribe audio using
|
20 |
def transcribe_audio(file_path):
|
21 |
-
|
22 |
-
text =
|
23 |
-
# Used cached function thereafter - super fast!!
|
24 |
-
text = pipeline(file_path)
|
25 |
return text
|
26 |
|
27 |
# Function to get transcript from YouTube
|
@@ -222,11 +220,15 @@ if st.session_state.generated_quiz:
|
|
222 |
|
223 |
if st.button("Submit Answers"):
|
224 |
if "questions" in st.session_state and st.session_state.questions:
|
225 |
-
with st.spinner('
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
|
|
|
|
|
|
|
|
|
5 |
import os
|
6 |
import warnings
|
7 |
from groq import Groq
|
8 |
+
from transformers import pipeline
|
9 |
|
10 |
# Suppress specific warning
|
11 |
warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
|
|
|
13 |
# Set up Groq client
|
14 |
client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
|
15 |
|
16 |
+
# Instantiate Hugging Face pipeline
|
17 |
+
transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2")
|
18 |
|
19 |
+
# Function to transcribe audio using Hugging Face Transformers
|
20 |
def transcribe_audio(file_path):
|
21 |
+
result = transcriber(file_path)
|
22 |
+
text = result["text"]
|
|
|
|
|
23 |
return text
|
24 |
|
25 |
# Function to get transcript from YouTube
|
|
|
220 |
|
221 |
if st.button("Submit Answers"):
|
222 |
if "questions" in st.session_state and st.session_state.questions:
|
223 |
+
with st.spinner('Processing your answers...'):
|
224 |
+
feedback = check_answers(st.session_state.questions, st.session_state.user_answers)
|
225 |
+
st.write("## Feedback")
|
226 |
+
for i, item in enumerate(feedback):
|
227 |
+
with st.expander(f"Question {i+1} Feedback"):
|
228 |
+
st.write(f"### {item['question']}")
|
229 |
+
st.write(f"**Your answer:** {item['user_answer']}")
|
230 |
+
st.write(f"**Correct answer:** {item['correct_answer']}")
|
231 |
+
if item['status'] == "Incorrect":
|
232 |
+
st.write(f"**Explanation:** {item['explanation']}")
|
233 |
+
else:
|
234 |
+
st.write("Please generate the quiz first.")
|