Imageye commited on
Commit
a80fd49
1 Parent(s): 924e4ea

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -5,7 +5,7 @@ import tempfile
5
  import os
6
  import warnings
7
  from groq import Groq
8
- from whisper_jax import FlaxWhisperPipline
9
 
10
  # Suppress specific warning
11
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
@@ -13,15 +13,13 @@ warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using F
13
  # Set up Groq client
14
  client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
15
 
16
- # Instantiate Whisper JAX pipeline
17
- pipeline = FlaxWhisperPipline("openai/whisper-large-v2")
18
 
19
- # Function to transcribe audio using Whisper JAX
20
  def transcribe_audio(file_path):
21
- # JIT compile the forward call - slow, but we only do once
22
- text = pipeline(file_path)
23
- # Used cached function thereafter - super fast!!
24
- text = pipeline(file_path)
25
  return text
26
 
27
  # Function to get transcript from YouTube
@@ -222,11 +220,15 @@ if st.session_state.generated_quiz:
222
 
223
  if st.button("Submit Answers"):
224
  if "questions" in st.session_state and st.session_state.questions:
225
- with st.spinner('ProcessingIf `whisper_jax` is not found, it might not be available on PyPI or installed properly. If you need to use an alternative or similar library, ensure it's correctly installed. Let's try another approach using `whisper` from OpenAI, which should be available on PyPI and can be used similarly.
226
-
227
- ### Updated `requirements.txt`
228
- ```txt
229
- streamlit
230
- youtube_transcript_api
231
- groq
232
- whisper
 
 
 
 
 
5
  import os
6
  import warnings
7
  from groq import Groq
8
+ from transformers import pipeline
9
 
10
  # Suppress specific warning
11
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
 
13
  # Set up Groq client
14
  client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
15
 
16
+ # Instantiate Hugging Face pipeline
17
+ transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-large-v2")
18
 
19
+ # Function to transcribe audio using Hugging Face Transformers
20
  def transcribe_audio(file_path):
21
+ result = transcriber(file_path)
22
+ text = result["text"]
 
 
23
  return text
24
 
25
  # Function to get transcript from YouTube
 
220
 
221
  if st.button("Submit Answers"):
222
  if "questions" in st.session_state and st.session_state.questions:
223
+ with st.spinner('Processing your answers...'):
224
+ feedback = check_answers(st.session_state.questions, st.session_state.user_answers)
225
+ st.write("## Feedback")
226
+ for i, item in enumerate(feedback):
227
+ with st.expander(f"Question {i+1} Feedback"):
228
+ st.write(f"### {item['question']}")
229
+ st.write(f"**Your answer:** {item['user_answer']}")
230
+ st.write(f"**Correct answer:** {item['correct_answer']}")
231
+ if item['status'] == "Incorrect":
232
+ st.write(f"**Explanation:** {item['explanation']}")
233
+ else:
234
+ st.write("Please generate the quiz first.")