hf-transformers-bot
commited on
Commit
•
cac7aba
1
Parent(s):
0a87dd4
Upload 2024-06-07/ci_results_run_pipelines_torch_gpu/torch_pipeline_results.json with huggingface_hub
Browse files
2024-06-07/ci_results_run_pipelines_torch_gpu/torch_pipeline_results.json
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"failed": {
|
3 |
+
"unclassified": 0,
|
4 |
+
"single": 6,
|
5 |
+
"multi": 6
|
6 |
+
},
|
7 |
+
"success": 437,
|
8 |
+
"time_spent": "5:20:14, 5:27:56, ",
|
9 |
+
"error": false,
|
10 |
+
"failures": {
|
11 |
+
"multi": [
|
12 |
+
{
|
13 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_seamless_v2",
|
14 |
+
"trace": "(line 3430) TypeError: object of type 'NoneType' has no len()"
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_simple_whisper_asr",
|
18 |
+
"trace": "(line 753) AssertionError: {'tex[827 chars]4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.82)}]} != {'tex[827 chars]4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.84)}]}"
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_speculative_decoding_whisper_distil",
|
22 |
+
"trace": "(line 905) AssertionError: True != 'Make sure that assistant decoding is faster'"
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_large_word_timestamps_batched",
|
26 |
+
"trace": "(line 100) IndexError: tuple index out of range"
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_prompted",
|
30 |
+
"trace": "(line 572) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument tensors in method wrapper_CUDA_cat)"
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"line": "tests/pipelines/test_pipelines_text_to_audio.py::TextToAudioPipelineTests::test_conversion_additional_tensor",
|
34 |
+
"trace": "(line 1836) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument test_elements in method wrapper_CUDA_isin_Tensor_Tensor)"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"single": [
|
38 |
+
{
|
39 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_seamless_v2",
|
40 |
+
"trace": "(line 3430) TypeError: object of type 'NoneType' has no len()"
|
41 |
+
},
|
42 |
+
{
|
43 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_simple_whisper_asr",
|
44 |
+
"trace": "(line 753) AssertionError: {'tex[827 chars]4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.82)}]} != {'tex[827 chars]4.56, 4.92)}, {'text': ' gospel.', 'timestamp': (4.92, 5.84)}]}"
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_speculative_decoding_whisper_distil",
|
48 |
+
"trace": "(line 905) AssertionError: True != 'Make sure that assistant decoding is faster'"
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_large_word_timestamps_batched",
|
52 |
+
"trace": "(line 100) IndexError: tuple index out of range"
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"line": "tests/pipelines/test_pipelines_automatic_speech_recognition.py::AutomaticSpeechRecognitionPipelineTests::test_whisper_prompted",
|
56 |
+
"trace": "(line 572) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument tensors in method wrapper_CUDA_cat)"
|
57 |
+
},
|
58 |
+
{
|
59 |
+
"line": "tests/pipelines/test_pipelines_text_to_audio.py::TextToAudioPipelineTests::test_conversion_additional_tensor",
|
60 |
+
"trace": "(line 1836) RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:0 and cpu! (when checking argument for argument test_elements in method wrapper_CUDA_isin_Tensor_Tensor)"
|
61 |
+
}
|
62 |
+
]
|
63 |
+
},
|
64 |
+
"job_link": {
|
65 |
+
"multi": "https://github.com/huggingface/transformers/actions/runs/9406259076/job/25909299564",
|
66 |
+
"single": "https://github.com/huggingface/transformers/actions/runs/9406259076/job/25909298322"
|
67 |
+
}
|
68 |
+
}
|