yellowcandle commited on
Commit
043229b
1 Parent(s): 5c98753

Update app.py to use Whisper large-v2 model for audio transcription

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. requirements.txt +2 -1
app.py CHANGED
@@ -5,7 +5,7 @@ import torch
5
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
6
  from datasets import load_dataset
7
 
8
- @spaces.GPU(duration=600)
9
  def transcribe_audio(audio):
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
 
5
  from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
6
  from datasets import load_dataset
7
 
8
+ @spaces.GPU(duration=120)
9
  def transcribe_audio(audio):
10
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
11
  torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
requirements.txt CHANGED
@@ -3,4 +3,5 @@ transformers
3
  torch
4
  torchvision
5
  torchaudio
6
- accelerate
 
 
3
  torch
4
  torchvision
5
  torchaudio
6
+ accelerate
7
+ datasets