Imageye commited on
Commit
6e83819
1 Parent(s): 5ac3776

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -42
app.py CHANGED
@@ -4,24 +4,33 @@ import re
4
  import tempfile
5
  import os
6
  import warnings
7
- import requests
 
8
  from transformers import pipeline
9
- from whisper_jax import FlaxWhisperPipeline
 
 
 
 
 
 
 
 
 
 
10
 
11
  # Suppress specific warning
12
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
13
 
14
- # Set up Groq API key
15
- GROQ_API_KEY = os.environ.get("GROQ_API_KEY")
16
 
17
- # Instantiate FlaxWhisperPipeline
18
- pipeline = FlaxWhisperPipeline("openai/whisper-large-v2")
19
-
20
- # Function to transcribe audio using FlaxWhisperPipeline
21
  def transcribe_audio(file_path):
22
- # JIT compile the forward call - slow, but we only do once
23
- text = pipeline(file_path)
24
- return text
 
25
 
26
  # Function to get transcript from YouTube
27
  def get_transcript(url):
@@ -36,30 +45,19 @@ def get_transcript(url):
36
  except Exception as e:
37
  return str(e)
38
 
39
- # Function to make a request to the Groq API
40
- def groq_request(payload):
41
- headers = {
42
- "Authorization": f"Bearer {GROQ_API_KEY}",
43
- "Content-Type": "application/json"
44
- }
45
- response = requests.post("https://api.groq.com/v1/endpoint", json=payload, headers=headers)
46
- response.raise_for_status()
47
- return response.json()
48
-
49
  # Function to summarize text using Groq API
50
  def summarize_text(text):
51
  try:
52
- payload = {
53
- "model": "llama3-8b-8192",
54
- "messages": [
55
  {
56
  "role": "user",
57
  "content": f"Summarize the following text:\n\n{text}"
58
  }
59
- ]
60
- }
61
- response = groq_request(payload)
62
- summary = response['choices'][0]['message']['content'].strip()
63
  return summary
64
  except Exception as e:
65
  return f"Error summarizing text: {e}"
@@ -67,17 +65,16 @@ def summarize_text(text):
67
  # Function to generate quiz questions using Groq API
68
  def generate_quiz_questions(text):
69
  try:
70
- payload = {
71
- "model": "llama3-8b-8192",
72
- "messages": [
73
  {
74
  "role": "user",
75
  "content": f"Generate quiz questions for the following text:\n\n{text}"
76
  }
77
- ]
78
- }
79
- response = groq_request(payload)
80
- quiz_questions = response['choices'][0]['message']['content'].strip()
81
  return quiz_questions
82
  except Exception as e:
83
  return f"Error generating quiz questions: {e}"
@@ -119,17 +116,16 @@ def parse_quiz_questions(quiz_text):
119
  # Function to generate explanation for quiz answers using Groq API
120
  def generate_explanation(question, correct_answer, user_answer):
121
  try:
122
- payload = {
123
- "model": "llama3-8b-8192",
124
- "messages": [
125
  {
126
  "role": "user",
127
  "content": f"Explain why the correct answer to the following question is '{correct_answer}' and not '{user_answer}':\n\n{question}"
128
  }
129
- ]
130
- }
131
- response = groq_request(payload)
132
- explanation = response['choices'][0]['message']['content'].strip()
133
  return explanation
134
  except Exception as e:
135
  return f"Error generating explanation: {e}"
 
4
  import tempfile
5
  import os
6
  import warnings
7
+ from groq import Groq
8
+ import torch
9
  from transformers import pipeline
10
+
11
+ # Set up device for torch
12
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
13
+
14
+ # Set up ASR pipeline with a smaller model for reduced memory usage
15
+ asr_pipeline = pipeline(
16
+ "automatic-speech-recognition",
17
+ model="openai/whisper-medium",
18
+ chunk_length_s=30,
19
+ device=device,
20
+ )
21
 
22
  # Suppress specific warning
23
  warnings.filterwarnings("ignore", message="FP16 is not supported on CPU; using FP32 instead")
24
 
25
+ # Set up Groq client
26
+ client = Groq(api_key=os.environ.get("GROQ_API_KEY"))
27
 
28
+ # Function to transcribe audio using ASR pipeline
 
 
 
29
  def transcribe_audio(file_path):
30
+ with open(file_path, "rb") as f:
31
+ audio_bytes = f.read()
32
+ prediction = asr_pipeline(audio_bytes)["text"]
33
+ return prediction
34
 
35
  # Function to get transcript from YouTube
36
  def get_transcript(url):
 
45
  except Exception as e:
46
  return str(e)
47
 
 
 
 
 
 
 
 
 
 
 
48
  # Function to summarize text using Groq API
49
  def summarize_text(text):
50
  try:
51
+ response = client.chat.completions.create(
52
+ messages=[
 
53
  {
54
  "role": "user",
55
  "content": f"Summarize the following text:\n\n{text}"
56
  }
57
+ ],
58
+ model="llama3-8b-8192",
59
+ )
60
+ summary = response.choices[0].message.content.strip()
61
  return summary
62
  except Exception as e:
63
  return f"Error summarizing text: {e}"
 
65
  # Function to generate quiz questions using Groq API
66
  def generate_quiz_questions(text):
67
  try:
68
+ response = client.chat.completions.create(
69
+ messages=[
 
70
  {
71
  "role": "user",
72
  "content": f"Generate quiz questions for the following text:\n\n{text}"
73
  }
74
+ ],
75
+ model="llama3-8b-8192",
76
+ )
77
+ quiz_questions = response.choices[0].message.content.strip()
78
  return quiz_questions
79
  except Exception as e:
80
  return f"Error generating quiz questions: {e}"
 
116
  # Function to generate explanation for quiz answers using Groq API
117
  def generate_explanation(question, correct_answer, user_answer):
118
  try:
119
+ response = client.chat.completions.create(
120
+ messages=[
 
121
  {
122
  "role": "user",
123
  "content": f"Explain why the correct answer to the following question is '{correct_answer}' and not '{user_answer}':\n\n{question}"
124
  }
125
+ ],
126
+ model="llama3-8b-8192",
127
+ )
128
+ explanation = response.choices[0].message.content.strip()
129
  return explanation
130
  except Exception as e:
131
  return f"Error generating explanation: {e}"