LennyBijan albertvillanova HF staff commited on
Commit
8a5263a
1 Parent(s): 4a0dbf0

Convert dataset to Parquet (#2)

Browse files

- Convert dataset to Parquet (8cccce533b3f7c54ec2686e6f186ba6bcd931a6b)
- Delete loading script (05c785c6f7d23f2482427b8166495e58a48df7e2)
- Delete data file (0f157d87a8c1a51deff6ab27753e60a68593314c)
- Delete data file (b30a9cee5255b582415cf172dfa141ac74547dfd)
- Delete data file (ab97d677fce8920822f23ebeb5980c9f8f6dbe7c)
- Delete data file (5b7dfed1a0c9cd0f692459bd1952dd974df64267)


Co-authored-by: Albert Villanova <[email protected]>

BA_Datensatz_V2.py DELETED
@@ -1,82 +0,0 @@
1
- import datasets
2
- import csv
3
- import json
4
- import os
5
- import csv
6
-
7
- # Defining Access
8
- _DATA_URL = "https://huggingface.co/datasets/LennyBijan/BA_Datensatz_V2/resolve/main/data"
9
-
10
-
11
- class BA_Datensatz_V2(datasets.GeneratorBasedBuilder):
12
-
13
- def _info(self):
14
- return datasets.DatasetInfo(
15
- description="German Dataset that focuses on juristical data",
16
- features=datasets.Features({
17
- "clip_id": datasets.Value("string"),
18
- "path": datasets.Value("string"),
19
- "audio": datasets.Audio(sampling_rate=16_000),
20
- "sentence": datasets.Value("string"),
21
- "split": datasets.Value("string")
22
- }),
23
- supervised_keys=None,
24
- )
25
-
26
- def _split_generators(self, dl_manager):
27
- dl_manager.download_config.ignore_url_params = True
28
-
29
- audio_path = {}
30
- local_extracted_archive = {}
31
- metadata_path = {}
32
- split_type = {"train": datasets.Split.TRAIN, "test": datasets.Split.TEST}
33
- for split in split_type:
34
- audio_path[split] = dl_manager.download(f"{_DATA_URL}/audio_{split}.tgz")
35
- local_extracted_archive[split] = dl_manager.extract(audio_path[split]) if not dl_manager.is_streaming else None
36
- metadata_path[split] = dl_manager.download_and_extract(f"{_DATA_URL}/metadata_{split}.csv.gz")
37
- path_to_clips = "BA_Datensatz_V2"
38
-
39
- return [
40
- datasets.SplitGenerator(
41
- name=split_type[split],
42
- gen_kwargs={
43
- "local_extracted_archive": local_extracted_archive[split],
44
- "audio_files": dl_manager.iter_archive(audio_path[split]),
45
- "metadata_path": dl_manager.download_and_extract(metadata_path[split]),
46
- "path_to_clips": path_to_clips,
47
- "split": split # pass the split name to _generate_examples
48
- },
49
- ) for split in split_type
50
- ]
51
-
52
- def _generate_examples(self, audio_files, metadata_path, path_to_clips, local_extracted_archive, split):
53
- metadata = {}
54
- # Open and read the metadata CSV file.
55
- with open(metadata_path, "r", encoding="utf-8") as f:
56
- reader = csv.reader(f)
57
- for row in reader:
58
- filename, sentence = row
59
- clip_id = filename.split('_')[0]
60
- # Append the split name dynamically to the path_to_clips.
61
- path = os.path.join(path_to_clips, split +"/wav/"+ clip_id, filename)
62
- metadata[path] = {
63
- "clip_id": clip_id,
64
- "sentence": sentence,
65
- "path": path,
66
- }
67
- id_ = 0
68
- for path, file_content in audio_files: # No need to adjust the path if no local extraction.
69
- if path in metadata:
70
- result = dict(metadata[path])
71
- path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path
72
- audio_data = {"path": path, "bytes": file_content.read()}
73
- result["audio"] = audio_data
74
- result["path"] = path
75
- yield id_, result
76
- id_ += 1
77
- else:
78
- print(f"No metadata entry for {path}")
79
-
80
-
81
-
82
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md CHANGED
@@ -21,20 +21,22 @@ dataset_info:
21
  sampling_rate: 16000
22
  - name: sentence
23
  dtype: string
 
 
24
  splits:
25
  - name: train
26
- num_bytes: 1040277177.0
27
  num_examples: 4441
28
  - name: test
29
- num_bytes: 349166136.0
30
  num_examples: 1483
31
- download_size: 1388336650
32
- dataset_size: 1389443313.0
33
- configs:
34
- - config_name: default
35
- data_files:
36
- - split: train
37
- path: "data/metadata_train.csv.gz"
38
- - split: test
39
- path: "data/metadata_test.csv.gz"
40
  ---
 
21
  sampling_rate: 16000
22
  - name: sentence
23
  dtype: string
24
+ - name: split
25
+ dtype: string
26
  splits:
27
  - name: train
28
+ num_bytes: 1043053050.066
29
  num_examples: 4441
30
  - name: test
31
+ num_bytes: 349087555.272
32
  num_examples: 1483
33
+ download_size: 1389079147
34
+ dataset_size: 1392140605.338
35
+ configs:
36
+ - config_name: default
37
+ data_files:
38
+ - split: train
39
+ path: data/train-*
40
+ - split: test
41
+ path: data/test-*
42
  ---
data/audio_test.tgz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d9688b3f7092e579eef966101b0d441f73d7f0639b195c3f623b392b4ba9fdd
3
- size 300579710
 
 
 
 
data/audio_train.tgz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e4c574057441b779a73933afe08f80f7172ecaed08f3c2fd43199c5f224b325b
3
- size 929735209
 
 
 
 
data/metadata_test.csv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fafa4d28d074f972ede61ae2316ed5dbe9ff49baa16a8de5411159261e910ab2
3
- size 66008
 
 
 
 
data/metadata_train.csv.gz DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6a4c504bde885b56dbb822bec99ec2c19e7cb73b165ff64c97251a9f54a947c5
3
- size 221073
 
 
 
 
data/test-00000-of-00001.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d9bf4e835613985ef617e4960e5cf45a1920f8bceaeba9e085aabd4a0682c418
3
- size 348915286
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf88149b61de48aeba3fede4fcd9871f1af5a5a8d9757e7937793c2e793f24b6
3
+ size 349089134
data/train-00000-of-00003.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a97d82169265c11e7d8d7be9bb551a1a7a4e7b5f8102c731565b759bca7c72ed
3
- size 347286458
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7da2f5a2029950e1a5883c96a45539246b5e5ae8e7866ab5506e34ea1e232aa3
3
+ size 347475756
data/train-00001-of-00003.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4550e020542c1567a4b68e8b45a7e67b2b27e845260cb7cf1fabaab5bdc6daa0
3
- size 345551302
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8fe5ec5537343a69fed8f614e3fb074bb648cc3365e6b5b6e37464c38883aab8
3
+ size 345742490
data/train-00002-of-00003.parquet CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0f0e34d53e14067ff8e8fe4812f306537bc76196da3c9a5ab5260821a6be0c1c
3
- size 346583604
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9512562fd47220b869af132380b2d1264840e123d607336cf3615692eeec91b1
3
+ size 346771767