Spaces:
Runtime error
Runtime error
KarthickAdopleAI
commited on
Commit
•
36bafb0
1
Parent(s):
fa9ab88
Update app.py
Browse files
app.py
CHANGED
@@ -5,19 +5,17 @@ import ffmpeg
|
|
5 |
from typing import List
|
6 |
from moviepy.editor import VideoFileClip
|
7 |
import nltk
|
8 |
-
from huggingface_hub import InferenceClient
|
9 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
10 |
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
|
11 |
import gradio as gr
|
12 |
from pytube import YouTube
|
13 |
import requests
|
14 |
import logging
|
15 |
-
|
16 |
nltk.download('punkt')
|
17 |
nltk.download('stopwords')
|
18 |
|
19 |
|
20 |
-
|
21 |
class VideoAnalytics:
|
22 |
"""
|
23 |
Class for performing analytics on videos including transcription, summarization, topic generation,
|
@@ -42,8 +40,10 @@ class VideoAnalytics:
|
|
42 |
# API URL for accessing the Hugging Face model
|
43 |
self.API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
|
44 |
|
|
|
|
|
45 |
# Placeholder for Hugging Face API token
|
46 |
-
self.hf_token =
|
47 |
|
48 |
# Set headers for API requests with Hugging Face token
|
49 |
self.headers = {"Authorization": f"Bearer {self.hf_token}"}
|
@@ -85,6 +85,8 @@ class VideoAnalytics:
|
|
85 |
|
86 |
# Send audio data to the Hugging Face model for transcription
|
87 |
output = query(audio_file)
|
|
|
|
|
88 |
# Update the transcribed_text attribute with the transcription result
|
89 |
self.transcribed_text = output["text"]
|
90 |
# Update the translation text into english_text
|
@@ -374,6 +376,10 @@ class VideoAnalytics:
|
|
374 |
# Log any errors that occur during initialization of YouTube object
|
375 |
logging.error(f"Error downloading video: {e}")
|
376 |
return ""
|
|
|
|
|
|
|
|
|
377 |
|
378 |
def main(self, video: str = None, input_path: str = None) -> tuple:
|
379 |
"""
|
@@ -387,31 +393,38 @@ class VideoAnalytics:
|
|
387 |
tuple: Summary, important sentences, and topics.
|
388 |
"""
|
389 |
try:
|
390 |
-
video = VideoFileClip(input_path)
|
391 |
-
duration = video.duration
|
392 |
-
video.close()
|
393 |
-
|
394 |
-
if round(duration) < 600:
|
395 |
# Download the video if input_path is provided, otherwise use the provided video path
|
396 |
-
|
397 |
-
|
|
|
|
|
|
|
|
|
398 |
text = self.transcribe_video(input_path)
|
399 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
400 |
text = self.transcribe_video(video)
|
401 |
input_path = video
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
|
|
|
|
415 |
|
416 |
except Exception as e:
|
417 |
# Log any errors that occur during video analytics
|
@@ -432,28 +445,34 @@ class VideoAnalytics:
|
|
432 |
summary = gr.Textbox(show_label=False,lines=10)
|
433 |
with gr.Row():
|
434 |
summary_download = gr.DownloadButton(label="Download",value="Summary.txt",visible=True,size='lg',elem_classes="download_button")
|
|
|
|
|
435 |
with gr.Tab("Important Sentences"):
|
436 |
with gr.Row():
|
437 |
Important_Sentences = gr.Textbox(show_label=False,lines=10)
|
438 |
with gr.Row():
|
439 |
sentence_download = gr.DownloadButton(label="Download",value="Important_Sentence.txt",visible=True,size='lg',elem_classes="download_button")
|
|
|
|
|
440 |
with gr.Tab("Topics"):
|
441 |
with gr.Row():
|
442 |
Topics = gr.Textbox(show_label=False,lines=10)
|
443 |
with gr.Row():
|
444 |
topics_download = gr.DownloadButton(label="Download",value="Topics.txt",visible=True,size='lg',elem_classes="download_button")
|
|
|
|
|
445 |
with gr.Tab("Video QA"):
|
446 |
with gr.Row():
|
447 |
with gr.Column(scale=0.70):
|
448 |
question = gr.Textbox(show_label=False,placeholder="Ask Your Questions...")
|
449 |
with gr.Column(scale=0.30):
|
450 |
-
model = gr.Dropdown(["OpenAI", "Mixtral"],
|
451 |
with gr.Row():
|
452 |
result = gr.Textbox(label='Answer',lines=10)
|
453 |
-
submit_btn.click(self.main,[video,yt_link],[summary,Important_Sentences,Topics])
|
454 |
question.submit(self.video_qa,[question,model],result)
|
455 |
demo.launch()
|
456 |
-
|
457 |
if __name__ == "__main__":
|
458 |
video_analytics = VideoAnalytics()
|
459 |
video_analytics.gradio_interface()
|
|
|
5 |
from typing import List
|
6 |
from moviepy.editor import VideoFileClip
|
7 |
import nltk
|
|
|
8 |
from sklearn.feature_extraction.text import TfidfVectorizer
|
9 |
from langchain import HuggingFaceHub, PromptTemplate, LLMChain
|
10 |
import gradio as gr
|
11 |
from pytube import YouTube
|
12 |
import requests
|
13 |
import logging
|
14 |
+
import os
|
15 |
nltk.download('punkt')
|
16 |
nltk.download('stopwords')
|
17 |
|
18 |
|
|
|
19 |
class VideoAnalytics:
|
20 |
"""
|
21 |
Class for performing analytics on videos including transcription, summarization, topic generation,
|
|
|
40 |
# API URL for accessing the Hugging Face model
|
41 |
self.API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
|
42 |
|
43 |
+
|
44 |
+
hf_token = os.getenv('HF_TOKEN')
|
45 |
# Placeholder for Hugging Face API token
|
46 |
+
self.hf_token = hf_token # Replace this with the actual Hugging Face API token
|
47 |
|
48 |
# Set headers for API requests with Hugging Face token
|
49 |
self.headers = {"Authorization": f"Bearer {self.hf_token}"}
|
|
|
85 |
|
86 |
# Send audio data to the Hugging Face model for transcription
|
87 |
output = query(audio_file)
|
88 |
+
|
89 |
+
print(output)
|
90 |
# Update the transcribed_text attribute with the transcription result
|
91 |
self.transcribed_text = output["text"]
|
92 |
# Update the translation text into english_text
|
|
|
376 |
# Log any errors that occur during initialization of YouTube object
|
377 |
logging.error(f"Error downloading video: {e}")
|
378 |
return ""
|
379 |
+
def save_audio_with_gtts(self,text, filename):
|
380 |
+
tts = gTTS(text=text, lang='en')
|
381 |
+
tts.save(filename)
|
382 |
+
return filename
|
383 |
|
384 |
def main(self, video: str = None, input_path: str = None) -> tuple:
|
385 |
"""
|
|
|
393 |
tuple: Summary, important sentences, and topics.
|
394 |
"""
|
395 |
try:
|
|
|
|
|
|
|
|
|
|
|
396 |
# Download the video if input_path is provided, otherwise use the provided video path
|
397 |
+
if input_path:
|
398 |
+
input_path = self.Download(input_path)
|
399 |
+
video_ = VideoFileClip(input_path)
|
400 |
+
duration = video_.duration
|
401 |
+
video_.close()
|
402 |
+
if round(duration) <= 600:
|
403 |
text = self.transcribe_video(input_path)
|
404 |
+
else:
|
405 |
+
return "Video Duration Above 10 Minutes,Try Below 10 Minutes Video","",""
|
406 |
+
elif video:
|
407 |
+
video_ = VideoFileClip(video)
|
408 |
+
duration = video_.duration
|
409 |
+
video_.close()
|
410 |
+
if round(duration) <= 600:
|
411 |
text = self.transcribe_video(video)
|
412 |
input_path = video
|
413 |
+
else:
|
414 |
+
return "Video Duration Above 10 Minutes,Try Below 10 Minutes Video","",""
|
415 |
+
# Generate summary, important sentences, and topics
|
416 |
+
summary = self.generate_video_summary()
|
417 |
+
self.write_text_files(summary,"Summary")
|
418 |
+
summary_voice = save_audio_with_gtts(summary,"summary.mp3")
|
419 |
+
important_sentences = self.extract_video_important_sentence()
|
420 |
+
self.write_text_files(important_sentences,"Important_Sentence")
|
421 |
+
important_sentences_voice = save_audio_with_gtts(important_sentences,"important_sentences.mp3")
|
422 |
+
topics = self.generate_topics()
|
423 |
+
self.write_text_files(topics,"Topics")
|
424 |
+
topics_voice = save_audio_with_gtts(topics,"topics.mp3")
|
425 |
+
|
426 |
+
# Return the generated summary, important sentences, and topics
|
427 |
+
return summary,important_sentences,topics,summary_voice,important_sentences_voice,topics_voice
|
428 |
|
429 |
except Exception as e:
|
430 |
# Log any errors that occur during video analytics
|
|
|
445 |
summary = gr.Textbox(show_label=False,lines=10)
|
446 |
with gr.Row():
|
447 |
summary_download = gr.DownloadButton(label="Download",value="Summary.txt",visible=True,size='lg',elem_classes="download_button")
|
448 |
+
with gr.Row():
|
449 |
+
summary_audio = gr.Audio(show_label= False,elem_classes='audio_class')
|
450 |
with gr.Tab("Important Sentences"):
|
451 |
with gr.Row():
|
452 |
Important_Sentences = gr.Textbox(show_label=False,lines=10)
|
453 |
with gr.Row():
|
454 |
sentence_download = gr.DownloadButton(label="Download",value="Important_Sentence.txt",visible=True,size='lg',elem_classes="download_button")
|
455 |
+
with gr.Row():
|
456 |
+
important_sentence_audio = gr.Audio(show_label = False,elem_classes='audio_class')
|
457 |
with gr.Tab("Topics"):
|
458 |
with gr.Row():
|
459 |
Topics = gr.Textbox(show_label=False,lines=10)
|
460 |
with gr.Row():
|
461 |
topics_download = gr.DownloadButton(label="Download",value="Topics.txt",visible=True,size='lg',elem_classes="download_button")
|
462 |
+
with gr.Row():
|
463 |
+
topics_audio = gr.Audio(show_label=False,elem_classes='audio_class')
|
464 |
with gr.Tab("Video QA"):
|
465 |
with gr.Row():
|
466 |
with gr.Column(scale=0.70):
|
467 |
question = gr.Textbox(show_label=False,placeholder="Ask Your Questions...")
|
468 |
with gr.Column(scale=0.30):
|
469 |
+
model = gr.Dropdown(["OpenAI", "Mixtral"],show_label=False,value="model")
|
470 |
with gr.Row():
|
471 |
result = gr.Textbox(label='Answer',lines=10)
|
472 |
+
submit_btn.click(self.main,[video,yt_link],[summary,Important_Sentences,Topics,summary_audio,important_sentence_audio,topics_audio])
|
473 |
question.submit(self.video_qa,[question,model],result)
|
474 |
demo.launch()
|
475 |
+
|
476 |
if __name__ == "__main__":
|
477 |
video_analytics = VideoAnalytics()
|
478 |
video_analytics.gradio_interface()
|