Datasets:
Got `KeyError: 'sentence_id'` when loading the yue data
#14
by
laubonghaudoi
- opened
I am trying to load the yue dataset with the codes:
from datasets import load_dataset, DatasetDict
common_voice = DatasetDict()
common_voice["train"] = load_dataset(
"mozilla-foundation/common_voice_17_0", "yue", split="train+validation"
)
common_voice["test"] = load_dataset(
"mozilla-foundation/common_voice_17_0", "yue", split="test"
)
print(common_voice)
However, I got this error:
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1687, in GeneratorBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)
1678 writer = writer_class(
1679 features=writer._features,
1680 path=fpath.replace("SSSSS", f"{shard_id:05d}").replace("JJJJJ", f"{job_id:05d}"),
(...)
1685 embed_local_files=embed_local_files,
1686 )
-> 1687 example = self.info.features.encode_example(record) if self.info.features is not None else record
1688 writer.write(example, key)
File /usr/local/lib/python3.10/dist-packages/datasets/features/features.py:1866, in Features.encode_example(self, example)
1865 example = cast_to_python_objects(example)
-> 1866 return encode_nested_example(self, example)
File /usr/local/lib/python3.10/dist-packages/datasets/features/features.py:1243, in encode_nested_example(schema, obj, level)
1241 raise ValueError("Got None but expected a dictionary instead")
1242 return (
-> 1243 {
1244 k: encode_nested_example(sub_schema, sub_obj, level=level + 1)
1245 for k, (sub_schema, sub_obj) in zip_dict(schema, obj)
1246 }
1247 if obj is not None
1248 else None
1249 )
1251 elif isinstance(schema, (list, tuple)):
File /usr/local/lib/python3.10/dist-packages/datasets/features/features.py:1243, in <dictcomp>(.0)
1241 raise ValueError("Got None but expected a dictionary instead")
1242 return (
-> 1243 {
1244 k: encode_nested_example(sub_schema, sub_obj, level=level + 1)
1245 for k, (sub_schema, sub_obj) in zip_dict(schema, obj)
1246 }
1247 if obj is not None
1248 else None
1249 )
1251 elif isinstance(schema, (list, tuple)):
File /usr/local/lib/python3.10/dist-packages/datasets/utils/py_utils.py:323, in zip_dict(*dicts)
321 for key in unique_values(itertools.chain(*dicts)): # set merge all keys
322 # Will raise KeyError if the dict don't have the same keys
--> 323 yield key, tuple(d[key] for d in dicts)
File /usr/local/lib/python3.10/dist-packages/datasets/utils/py_utils.py:323, in <genexpr>(.0)
321 for key in unique_values(itertools.chain(*dicts)): # set merge all keys
322 # Will raise KeyError if the dict don't have the same keys
--> 323 yield key, tuple(d[key] for d in dicts)
KeyError: 'sentence_id'
The above exception was the direct cause of the following exception:
DatasetGenerationError Traceback (most recent call last)
Cell In[13], line 5
1 from datasets import load_dataset, DatasetDict
3 common_voice = DatasetDict()
----> 5 common_voice["train"] = load_dataset(
6 "mozilla-foundation/common_voice_17_0", "yue", split="train+validation"
7 )
8 common_voice["test"] = load_dataset(
9 "mozilla-foundation/common_voice_17_0", "yue", split="test"
10 )
12 print(common_voice)
File /usr/local/lib/python3.10/dist-packages/datasets/load.py:2152, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, token, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs)
2149 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES
2151 # Download and prepare data
-> 2152 builder_instance.download_and_prepare(
2153 download_config=download_config,
2154 download_mode=download_mode,
2155 verification_mode=verification_mode,
2156 try_from_hf_gcs=try_from_hf_gcs,
2157 num_proc=num_proc,
2158 storage_options=storage_options,
2159 )
2161 # Build dataset for splits
2162 keep_in_memory = (
2163 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size)
2164 )
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:948, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs)
946 if num_proc is not None:
947 prepare_split_kwargs["num_proc"] = num_proc
--> 948 self._download_and_prepare(
949 dl_manager=dl_manager,
950 verification_mode=verification_mode,
951 **prepare_split_kwargs,
952 **download_and_prepare_kwargs,
953 )
954 # Sync info
955 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values())
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1711, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs)
1710 def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs):
-> 1711 super()._download_and_prepare(
1712 dl_manager,
1713 verification_mode,
1714 check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS
1715 or verification_mode == VerificationMode.ALL_CHECKS,
1716 **prepare_splits_kwargs,
1717 )
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1043, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs)
1039 split_dict.add(split_generator.split_info)
1041 try:
1042 # Prepare split will record examples associated to the split
-> 1043 self._prepare_split(split_generator, **prepare_split_kwargs)
1044 except OSError as e:
1045 raise OSError(
1046 "Cannot find data file. "
1047 + (self.manual_download_instructions or "")
1048 + "\nOriginal error:\n"
1049 + str(e)
1050 ) from None
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1549, in GeneratorBasedBuilder._prepare_split(self, split_generator, check_duplicate_keys, file_format, num_proc, max_shard_size)
1547 job_id = 0
1548 with pbar:
-> 1549 for job_id, done, content in self._prepare_split_single(
1550 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args
1551 ):
1552 if done:
1553 result = content
File /usr/local/lib/python3.10/dist-packages/datasets/builder.py:1706, in GeneratorBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, split_info, check_duplicate_keys, job_id)
1704 if isinstance(e, SchemaInferenceError) and e.__context__ is not None:
1705 e = e.__context__
-> 1706 raise DatasetGenerationError("An error occurred while generating the dataset") from e
1708 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths)
DatasetGenerationError: An error occurred while generating the dataset
I got no issues when loading the older versions of of Common Voice. How to resolve this?
Hi @laubonghaudoi - It appears the dataset did not download correctly. Can you try the same with streaming mode too?
Thanks for the tips! I tried streaming mode like this:
from datasets import load_dataset
dataset = load_dataset(
"mozilla-foundation/common_voice_17_0", "yue", split="train", streaming=True
)
print(next(iter(dataset)))
and got this error
Reading metadata...: 3150it [00:00, 11802.04it/s]
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[5], line 10
3 dataset = load_dataset(
4 "mozilla-foundation/common_voice_17_0", "yue", split="train", streaming=True
5 )
6 # common_voice["test"] = load_dataset(
7 # "mozilla-foundation/common_voice_17_0", "yue", split="test"
8 # )
---> 10 print(next(iter(dataset)))
12 # print(common_voice)
File /usr/local/lib/python3.10/dist-packages/datasets/iterable_dataset.py:1383, in IterableDataset.__iter__(self)
1379 for key, example in ex_iterable:
1380 if self.features:
1381 # `IterableDataset` automatically fills missing columns with None.
1382 # This is done with `_apply_feature_types_on_example`.
-> 1383 example = _apply_feature_types_on_example(
1384 example, self.features, token_per_repo_id=self._token_per_repo_id
1385 )
1386 yield format_dict(example) if format_dict else example
File /usr/local/lib/python3.10/dist-packages/datasets/iterable_dataset.py:1075, in _apply_feature_types_on_example(example, features, token_per_repo_id)
1073 example[column_name] = None
1074 # we encode the example for ClassLabel feature types for example
-> 1075 encoded_example = features.encode_example(example)
1076 # Decode example for Audio feature, e.g.
1077 decoded_example = features.decode_example(encoded_example, token_per_repo_id=token_per_repo_id)
File /usr/local/lib/python3.10/dist-packages/datasets/features/features.py:1866, in Features.encode_example(self, example)
1855 """
1856 Encode example into a format for Arrow.
1857
(...)
1863 `dict[str, Any]`
1864 """
1865 example = cast_to_python_objects(example)
-> 1866 return encode_nested_example(self, example)
File /usr/local/lib/python3.10/dist-packages/datasets/features/features.py:1243, in encode_nested_example(schema, obj, level)
1240 if level == 0 and obj is None:
1241 raise ValueError("Got None but expected a dictionary instead")
1242 return (
-> 1243 {
1244 k: encode_nested_example(sub_schema, sub_obj, level=level + 1)
1245 for k, (sub_schema, sub_obj) in zip_dict(schema, obj)
1246 }
1247 if obj is not None
1248 else None
1249 )
1251 elif isinstance(schema, (list, tuple)):
1252 sub_schema = schema[0]
File /usr/local/lib/python3.10/dist-packages/datasets/features/features.py:1243, in <dictcomp>(.0)
1240 if level == 0 and obj is None:
1241 raise ValueError("Got None but expected a dictionary instead")
1242 return (
-> 1243 {
1244 k: encode_nested_example(sub_schema, sub_obj, level=level + 1)
1245 for k, (sub_schema, sub_obj) in zip_dict(schema, obj)
1246 }
1247 if obj is not None
1248 else None
1249 )
1251 elif isinstance(schema, (list, tuple)):
1252 sub_schema = schema[0]
File /usr/local/lib/python3.10/dist-packages/datasets/utils/py_utils.py:323, in zip_dict(*dicts)
320 """Iterate over items of dictionaries grouped by their keys."""
321 for key in unique_values(itertools.chain(*dicts)): # set merge all keys
322 # Will raise KeyError if the dict don't have the same keys
--> 323 yield key, tuple(d[key] for d in dicts)
File /usr/local/lib/python3.10/dist-packages/datasets/utils/py_utils.py:323, in <genexpr>(.0)
320 """Iterate over items of dictionaries grouped by their keys."""
321 for key in unique_values(itertools.chain(*dicts)): # set merge all keys
322 # Will raise KeyError if the dict don't have the same keys
--> 323 yield key, tuple(d[key] for d in dicts)
KeyError: 'sentence_id'
is there a way to delete the downloaded data and re-download it? I am guessing that it's a download corruption problem.
@laubonghaudoi Maybe you found the issue? or work around? I have the same problem with this version of dataset. Older ones are working well.