Spaces:
Sleeping
Sleeping
Update to support faster-whisper-large-v3.
Browse files- app.py +1 -1
- config.json5 +5 -0
- src/whisper/fasterWhisperContainer.py +5 -1
app.py
CHANGED
@@ -57,7 +57,7 @@ MAX_FILE_PREFIX_LENGTH = 17
|
|
57 |
# Limit auto_parallel to a certain number of CPUs (specify vad_cpu_cores to get a higher number)
|
58 |
MAX_AUTO_CPU_CORES = 8
|
59 |
|
60 |
-
WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]
|
61 |
|
62 |
class VadOptions:
|
63 |
def __init__(self, vad: str = None, vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1,
|
|
|
57 |
# Limit auto_parallel to a certain number of CPUs (specify vad_cpu_cores to get a higher number)
|
58 |
MAX_AUTO_CPU_CORES = 8
|
59 |
|
60 |
+
WHISPER_MODELS = ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2", "large-v3"]
|
61 |
|
62 |
class VadOptions:
|
63 |
def __init__(self, vad: str = None, vadMergeWindow: float = 5, vadMaxMergeSize: float = 150, vadPadding: float = 1, vadPromptWindow: float = 1,
|
config.json5
CHANGED
@@ -26,6 +26,11 @@
|
|
26 |
"name": "large-v2",
|
27 |
"url": "large-v2"
|
28 |
},
|
|
|
|
|
|
|
|
|
|
|
29 |
// Uncomment to add custom Japanese models
|
30 |
//{
|
31 |
// "name": "whisper-large-v2-mix-jp",
|
|
|
26 |
"name": "large-v2",
|
27 |
"url": "large-v2"
|
28 |
},
|
29 |
+
{
|
30 |
+
"name": "large-v3",
|
31 |
+
"url": "avans06/faster-whisper-large-v3",
|
32 |
+
"type": "huggingface"
|
33 |
+
},
|
34 |
// Uncomment to add custom Japanese models
|
35 |
//{
|
36 |
// "name": "whisper-large-v2-mix-jp",
|
src/whisper/fasterWhisperContainer.py
CHANGED
@@ -43,7 +43,7 @@ class FasterWhisperContainer(AbstractWhisperContainer):
|
|
43 |
model_url = model_config.url
|
44 |
|
45 |
if model_config.type == "whisper":
|
46 |
-
if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2"]:
|
47 |
raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.")
|
48 |
if model_url == "large":
|
49 |
# large is an alias for large-v1
|
@@ -55,6 +55,10 @@ class FasterWhisperContainer(AbstractWhisperContainer):
|
|
55 |
device = "auto"
|
56 |
|
57 |
model = WhisperModel(model_url, device=device, compute_type=self.compute_type)
|
|
|
|
|
|
|
|
|
58 |
return model
|
59 |
|
60 |
def create_callback(self, language: str = None, task: str = None,
|
|
|
43 |
model_url = model_config.url
|
44 |
|
45 |
if model_config.type == "whisper":
|
46 |
+
if model_url not in ["tiny", "base", "small", "medium", "large", "large-v1", "large-v2", "large-v3"]:
|
47 |
raise Exception("FasterWhisperContainer does not yet support Whisper models. Use ct2-transformers-converter to convert the model to a faster-whisper model.")
|
48 |
if model_url == "large":
|
49 |
# large is an alias for large-v1
|
|
|
55 |
device = "auto"
|
56 |
|
57 |
model = WhisperModel(model_url, device=device, compute_type=self.compute_type)
|
58 |
+
if "large-v3" in model_url:
|
59 |
+
# Working with Whisper-large-v3
|
60 |
+
# https://github.com/guillaumekln/faster-whisper/issues/547#issuecomment-1797962599
|
61 |
+
model.feature_extractor.mel_filters = model.feature_extractor.get_mel_filters(model.feature_extractor.sampling_rate, model.feature_extractor.n_fft, n_mels=128)
|
62 |
return model
|
63 |
|
64 |
def create_callback(self, language: str = None, task: str = None,
|