Upload 4 files
Browse files- .gitattributes +34 -0
- README.md +12 -0
- app.py +102 -0
- requirements.txt +10 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Whisper
|
3 |
+
emoji: 🐠
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: blue
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.12.0
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
---
|
11 |
+
|
12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import pipeline
|
4 |
+
from pytube import YouTube
|
5 |
+
from datasets import Dataset, Audio
|
6 |
+
from moviepy.editor import AudioFileClip
|
7 |
+
import googletrans
|
8 |
+
from googletrans import Translator
|
9 |
+
|
10 |
+
pipe = pipeline(model="rafat0421/whisper-small-hi")
|
11 |
+
|
12 |
+
def download_from_youtube(url):
|
13 |
+
streams = YouTube(url).streams.filter(only_audio=True, file_extension='mp4') #Downloads the video from the given YouTube URL and returns the path to the audio file.
|
14 |
+
fpath = streams.first().download()
|
15 |
+
return fpath
|
16 |
+
|
17 |
+
def get_timestamp(seconds):
|
18 |
+
minutes = int(seconds / 60)
|
19 |
+
seconds = int(seconds % 60)
|
20 |
+
return f"{str(minutes).zfill(2)}:{str(seconds).zfill(2)}" #Creates %M:%S timestamp from seconds.
|
21 |
+
|
22 |
+
def create_segments(audio_fpath, seconds_max):
|
23 |
+
if not os.path.exists("segmented_audios"):
|
24 |
+
os.makedirs("segmented_audios")
|
25 |
+
|
26 |
+
sound = AudioFileClip(audio_fpath)
|
27 |
+
n_full_segments = int(sound.duration / 30)
|
28 |
+
len_last_segment = sound.duration % 30
|
29 |
+
|
30 |
+
max_segments = int(seconds_max / 30)
|
31 |
+
if n_full_segments > max_segments:
|
32 |
+
n_full_segments = max_segments
|
33 |
+
len_last_segment = 0
|
34 |
+
|
35 |
+
segment_paths = []
|
36 |
+
segment_start_times = []
|
37 |
+
|
38 |
+
segments_available = n_full_segments + 1
|
39 |
+
for i in range(min(segments_available, max_segments)):
|
40 |
+
start = i * 30
|
41 |
+
|
42 |
+
# Skip last segment if it is smaller than two seconds
|
43 |
+
is_last_segment = i == n_full_segments
|
44 |
+
if is_last_segment and not len_last_segment > 2:
|
45 |
+
continue
|
46 |
+
elif is_last_segment:
|
47 |
+
end = start + len_last_segment
|
48 |
+
else:
|
49 |
+
end = (i + 1) * 30
|
50 |
+
|
51 |
+
segment_path = os.path.join("segmented_audios", f"segment_{i}.wav")
|
52 |
+
segment = sound.subclip(start, end)
|
53 |
+
segment.write_audiofile(segment_path)
|
54 |
+
segment_paths.append(segment_path)
|
55 |
+
segment_start_times.append(start)
|
56 |
+
|
57 |
+
return segment_paths, segment_start_times
|
58 |
+
|
59 |
+
def get_translation(text):
|
60 |
+
translator = Translator()
|
61 |
+
result = translator.translate(text, src='sv', dest='en')
|
62 |
+
|
63 |
+
return result.text
|
64 |
+
|
65 |
+
#translation_pipeline = pipeline("translation", model=model, tokenizer=tokenizer, src_lang=src_lang, tgt_lang=tgt_lang, max_length=400, device=device)
|
66 |
+
#result = translation_pipeline(text)
|
67 |
+
#return result[0]['translation_text']
|
68 |
+
|
69 |
+
#return "Under Development..."
|
70 |
+
|
71 |
+
def transcribe(audio, url, seconds_max):
|
72 |
+
if url:
|
73 |
+
fpath = download_from_youtube(url)
|
74 |
+
segment_paths, segment_start_times = create_segments(fpath, seconds_max)
|
75 |
+
|
76 |
+
audio_dataset = Dataset.from_dict({"audio": segment_paths}).cast_column("audio", Audio(sampling_rate=16000))
|
77 |
+
pred = pipe(audio_dataset["audio"])
|
78 |
+
text = ""
|
79 |
+
n_segments = len(segment_start_times)
|
80 |
+
for i, (seconds, output) in enumerate(zip(segment_start_times, pred)):
|
81 |
+
text += f"[Segment {i+1}/{n_segments}, start time {get_timestamp(seconds)}]\n"
|
82 |
+
text += f"{output['text']}\n"
|
83 |
+
text += f"[Translation]\n{get_translation(output['text'])}\n\n"
|
84 |
+
return text
|
85 |
+
|
86 |
+
else:
|
87 |
+
text = pipe(audio)["text"]
|
88 |
+
return text
|
89 |
+
|
90 |
+
iface = gr.Interface(
|
91 |
+
fn=transcribe,
|
92 |
+
inputs=[
|
93 |
+
gr.Audio(source="microphone", type="filepath", label="Transcribe from Microphone"),
|
94 |
+
gr.Text(max_lines=1, placeholder="YouTube Link", label="Transcribe from YouTube URL"),
|
95 |
+
gr.Slider(minimum=30, maximum=600, value=30, step=30, label="Number of seconds to transcribe")
|
96 |
+
],
|
97 |
+
outputs="text",
|
98 |
+
title="Whisper: transcribe Swedish language audio to text",
|
99 |
+
description="Swedish Text Transcription using Transformers.",
|
100 |
+
)
|
101 |
+
|
102 |
+
iface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio==3.12.0
|
2 |
+
transformers==4.25.1
|
3 |
+
librosa
|
4 |
+
jiwer
|
5 |
+
datasets
|
6 |
+
torch
|
7 |
+
torchaudio
|
8 |
+
moviepy
|
9 |
+
git+https://github.com/pytube/pytube
|
10 |
+
googletrans
|