Update README.md
Browse files
README.md
CHANGED
@@ -13,8 +13,7 @@ pipeline_tag: automatic-speech-recognition
|
|
13 |
```python
|
14 |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
15 |
import torch
|
16 |
-
|
17 |
-
MODEL_NAME = "whis"
|
18 |
|
19 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
20 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
@@ -38,6 +37,12 @@ whisper = pipeline(
|
|
38 |
device=device,
|
39 |
)
|
40 |
|
41 |
-
whisper("c.mp3",
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
```
|
43 |
|
|
|
13 |
```python
|
14 |
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
15 |
import torch
|
16 |
+
MODEL_NAME = "FILM6912/whisper-large-v3-turbo-thai"
|
|
|
17 |
|
18 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
19 |
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
|
|
|
37 |
device=device,
|
38 |
)
|
39 |
|
40 |
+
whisper("c.mp3",
|
41 |
+
chunk_length_s=30,
|
42 |
+
stride_length_s=5,
|
43 |
+
batch_size=16,
|
44 |
+
return_timestamps=True,
|
45 |
+
generate_kwargs = {"language":"<|th|>"}
|
46 |
+
)
|
47 |
```
|
48 |
|