FarhadMadadzade's picture
swedish model
8dffbd8
raw
history blame
No virus
2.88 kB
from transformers import pipeline
import gradio as gr
import time
from video_downloader import download_video, download_video1
from moviepy.editor import AudioFileClip
from moviepy.video.io.ffmpeg_tools import ffmpeg_extract_subclip
import datetime
import os
from pydub import AudioSegment
from pydub.silence import split_on_silence
pipe = pipeline("automatic-speech-recognition", model="Artanis1551/whisper_swedish")
def process_video1(date):
video_path = download_video1(date)
# Extract audio from the video
audio_path = f"audio_{date}.wav"
AudioFileClip(video_path).write_audiofile(audio_path)
# Split the audio into chunks
audio = AudioSegment.from_wav(audio_path)
chunks = split_on_silence(audio, min_silence_len=500, silence_thresh=-40)
# Transcribe each chunk
transcription = ""
for i, chunk in enumerate(chunks):
chunk.export(f"chunk{i}.wav", format="wav")
with open(f"chunk{i}.wav", "rb") as audio_file:
audio = audio_file.read()
transcription += pipe(audio)["text"] + "\n "
os.remove(f"chunk{i}.wav")
# Remove the audio file
os.remove(audio_path)
return video_path, transcription
iface = gr.Interface(
fn=process_video1,
inputs=[
gr.inputs.Textbox(label="Date with format YYYY-MM-DD"),
],
outputs=[
gr.outputs.Video(),
gr.Textbox(lines=1000, max_lines=1000, interactive=True),
],
title="Transcribe Swedish Parliament Decisions",
desription="This app transcribes the top Swedish Parliament decision video from the given date.",
)
def process_video(date):
# Download the video
video_path = download_video(date)
# Extract the first 30 seconds of the video
short_video_path = f"short_{date}.mp4"
ffmpeg_extract_subclip(video_path, 0, 30, targetname=short_video_path)
# Extract audio from the short video
audio_path = f"audio_{date}.wav"
AudioFileClip(short_video_path).write_audiofile(audio_path)
# Split the audio into chunks
audio = AudioSegment.from_wav(audio_path)
chunks = split_on_silence(audio, min_silence_len=500, silence_thresh=-40)
# Transcribe each chunk
transcription = ""
for i, chunk in enumerate(chunks):
chunk.export(f"chunk{i}.wav", format="wav")
with open(f"chunk{i}.wav", "rb") as audio_file:
audio = audio_file.read()
transcription += pipe(audio)["text"] + " "
os.remove(f"chunk{i}.wav")
# Remove the audio file
os.remove(audio_path)
return short_video_path, transcription
# iface = gr.Interface(
# fn=process_video,
# inputs=gr.inputs.Textbox(label="Date with format YYYYMMDD"),
# outputs=[
# gr.outputs.Video(),
# gr.Textbox(lines=1000, max_lines=1000, interactive=True),
# ],
# title="Romanian Transcription Test",
# )
iface.launch()