SushantGautam commited on
Commit
bc53380
1 Parent(s): 59c2767

Create soccer_net_echoes.py

Browse files
Files changed (1) hide show
  1. soccer_net_echoes.py +105 -0
soccer_net_echoes.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Datasets Authors and the current dataset script contributor.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ """ASR Dataset for various football leagues and seasons"""
16
+
17
+ import json
18
+ import os
19
+
20
+ import datasets
21
+
22
+
23
+ _CITATION = """\
24
+ @InProceedings{huggingface:dataset,
25
+ title = {ASR Dataset for Football Leagues},
26
+ author={Your Name},
27
+ year={2024}
28
+ }
29
+ """
30
+
31
+ _DESCRIPTION = """\
32
+ This dataset contains Automatic Speech Recognition (ASR) data for various football leagues and seasons.
33
+ The dataset includes ASR outputs from Whisper v1, v2, and v3, along with their English-translated versions.
34
+ """
35
+
36
+ _HOMEPAGE = "https://github.com/SoccerNet/sn-echoes"
37
+
38
+ _LICENSE = "Apache License 2.0"
39
+
40
+ _URLS = {
41
+ "whisper_v1": "whisper_v1/",
42
+ "whisper_v1_en": "wisper_v1_en/",
43
+ "whisper_v2": "wisper_v2/",
44
+ "whisper_v2_en": "wisper_v2_en/",
45
+ "whisper_v3": "wisper_v3/",
46
+ }
47
+
48
+
49
+ class FootballASRDataset(datasets.GeneratorBasedBuilder):
50
+ """ASR Dataset for various football leagues and seasons"""
51
+
52
+ VERSION = datasets.Version("1.0.0")
53
+
54
+ BUILDER_CONFIGS = [
55
+ datasets.BuilderConfig(name="whisper_v1", version=VERSION, description="Contains ASR from Whisper v1"),
56
+ datasets.BuilderConfig(name="whisper_v1_en", version=VERSION, description="English-translated datasets from Whisper v1"),
57
+ # datasets.BuilderConfig(name="whisper_v2", version=VERSION, description="Contains ASR from Whisper v2"),
58
+ # datasets.BuilderConfig(name="whisper_v2_en", version=VERSION, description="English-translated datasets from Whisper v2"),
59
+ # datasets.BuilderConfig(name="whisper_v3", version=VERSION, description="Contains ASR from Whisper v3"),
60
+ ]
61
+
62
+ DEFAULT_CONFIG_NAME = "whisper_v1"
63
+
64
+ def _info(self):
65
+ features = datasets.Features(
66
+ {
67
+ "segment_index": datasets.Value("int32"),
68
+ "start_time": datasets.Value("float"),
69
+ "end_time": datasets.Value("float"),
70
+ "transcribed_text": datasets.Value("string"),
71
+ }
72
+ )
73
+ return datasets.DatasetInfo(
74
+ description=_DESCRIPTION,
75
+ features=features,
76
+ homepage=_HOMEPAGE,
77
+ license=_LICENSE,
78
+ citation=_CITATION,
79
+ )
80
+
81
+ def _split_generators(self, dl_manager):
82
+ urls = _URLS[self.config.name]
83
+ data_dir = dl_manager.download_and_extract("https://codeload.github.com/SoccerNet/sn-echoes/zip/refs/heads/main") +"/sn-echoes-main/Dataset/"
84
+ print("data_dir", { "data_dir": os.path.join(data_dir+ urls),})
85
+ return [
86
+ datasets.SplitGenerator(
87
+ name=datasets.Split.TRAIN,
88
+ gen_kwargs={
89
+ "data_dir": os.path.join(data_dir+ urls),
90
+ },)
91
+ ]
92
+
93
+ def _generate_examples(self, data_dir,):
94
+ for root, _, files in os.walk(data_dir):
95
+ for file in files:
96
+ if file.endswith(".json"):
97
+ with open(os.path.join(root, file), encoding="utf-8") as f:
98
+ data = json.load(f)
99
+ for segment_index, segment_data in data["segments"].items():
100
+ yield f"{file}_{segment_index}", {
101
+ "segment_index": segment_index,
102
+ "start_time": segment_data[0],
103
+ "end_time": segment_data[1],
104
+ "transcribed_text": segment_data[2],
105
+ }