url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
2.12B
node_id
stringlengths
18
32
number
int64
1
6.65k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
milestone
dict
comments
int64
0
70
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
3 values
active_lock_reason
float64
draft
float64
0
1
pull_request
dict
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
float64
state_reason
stringclasses
3 values
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/6649
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6649/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6649/comments
https://api.github.com/repos/huggingface/datasets/issues/6649/events
https://github.com/huggingface/datasets/pull/6649
2,124,940,213
PR_kwDODunzps5mXRo8
6,649
Minor multi gpu doc improvement
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-02-08T11:17:24"
"2024-02-08T11:23:35"
"2024-02-08T11:17:35"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6649.diff", "html_url": "https://github.com/huggingface/datasets/pull/6649", "merged_at": "2024-02-08T11:17:35Z", "patch_url": "https://github.com/huggingface/datasets/pull/6649.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6649" }
just added torch.no_grad and eval()
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6649/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6649/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6648
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6648/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6648/comments
https://api.github.com/repos/huggingface/datasets/issues/6648/events
https://github.com/huggingface/datasets/pull/6648
2,124,813,589
PR_kwDODunzps5mW1MA
6,648
Document usage of hfh cli instead of git
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
open
false
null
[]
null
1
"2024-02-08T10:24:56"
"2024-02-08T10:35:53"
null
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6648.diff", "html_url": "https://github.com/huggingface/datasets/pull/6648", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6648.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6648" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6648/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6648/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6647
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6647/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6647/comments
https://api.github.com/repos/huggingface/datasets/issues/6647/events
https://github.com/huggingface/datasets/pull/6647
2,123,397,569
PR_kwDODunzps5mSB2B
6,647
Update loading.mdx to include "jsonl" file loading.
{ "avatar_url": "https://avatars.githubusercontent.com/u/22236370?v=4", "events_url": "https://api.github.com/users/mosheber/events{/privacy}", "followers_url": "https://api.github.com/users/mosheber/followers", "following_url": "https://api.github.com/users/mosheber/following{/other_user}", "gists_url": "https://api.github.com/users/mosheber/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mosheber", "id": 22236370, "login": "mosheber", "node_id": "MDQ6VXNlcjIyMjM2Mzcw", "organizations_url": "https://api.github.com/users/mosheber/orgs", "received_events_url": "https://api.github.com/users/mosheber/received_events", "repos_url": "https://api.github.com/users/mosheber/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mosheber/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mosheber/subscriptions", "type": "User", "url": "https://api.github.com/users/mosheber" }
[]
open
false
null
[]
null
1
"2024-02-07T16:18:08"
"2024-02-07T16:51:05"
null
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6647.diff", "html_url": "https://github.com/huggingface/datasets/pull/6647", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6647.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6647" }
* A small update to the documentation, noting the ability to load jsonl files.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6647/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6647/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6646
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6646/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6646/comments
https://api.github.com/repos/huggingface/datasets/issues/6646/events
https://github.com/huggingface/datasets/pull/6646
2,123,134,128
PR_kwDODunzps5mRIma
6,646
Better multi-gpu example
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-02-07T14:15:01"
"2024-02-07T15:05:18"
"2024-02-07T14:59:11"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6646.diff", "html_url": "https://github.com/huggingface/datasets/pull/6646", "merged_at": "2024-02-07T14:59:11Z", "patch_url": "https://github.com/huggingface/datasets/pull/6646.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6646" }
Use Qwen1.5-0.5B-Chat as an easy example for multi-GPU the previous example was using a model for translation and the way it was setup was not really the right way to use the model.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6646/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6646/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6645
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6645/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6645/comments
https://api.github.com/repos/huggingface/datasets/issues/6645/events
https://github.com/huggingface/datasets/issues/6645
2,122,956,818
I_kwDODunzps5-icAS
6,645
Support fsspec 2024.2
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
"2024-02-07T12:45:29"
"2024-02-07T12:46:05"
null
MEMBER
null
null
null
Support fsspec 2024.2. First, we should address: - #6644
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6645/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6645/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6644
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6644/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6644/comments
https://api.github.com/repos/huggingface/datasets/issues/6644/events
https://github.com/huggingface/datasets/issues/6644
2,122,955,282
I_kwDODunzps5-iboS
6,644
Support fsspec 2023.12
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
"2024-02-07T12:44:39"
"2024-02-07T12:45:19"
null
MEMBER
null
null
null
Support fsspec 2023.12 by handling previous and new glob behavior.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6644/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6644/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6643
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6643/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6643/comments
https://api.github.com/repos/huggingface/datasets/issues/6643/events
https://github.com/huggingface/datasets/issues/6643
2,121,239,039
I_kwDODunzps5-b4n_
6,643
Faiss GPU index cannot be serialised when passed to trainer
{ "avatar_url": "https://avatars.githubusercontent.com/u/56388976?v=4", "events_url": "https://api.github.com/users/rubenweitzman/events{/privacy}", "followers_url": "https://api.github.com/users/rubenweitzman/followers", "following_url": "https://api.github.com/users/rubenweitzman/following{/other_user}", "gists_url": "https://api.github.com/users/rubenweitzman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rubenweitzman", "id": 56388976, "login": "rubenweitzman", "node_id": "MDQ6VXNlcjU2Mzg4OTc2", "organizations_url": "https://api.github.com/users/rubenweitzman/orgs", "received_events_url": "https://api.github.com/users/rubenweitzman/received_events", "repos_url": "https://api.github.com/users/rubenweitzman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rubenweitzman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rubenweitzman/subscriptions", "type": "User", "url": "https://api.github.com/users/rubenweitzman" }
[]
open
false
null
[]
null
1
"2024-02-06T16:41:00"
"2024-02-08T10:39:54"
null
NONE
null
null
null
### Describe the bug I am working on a retrieval project and encountering I have encountered two issues in the hugging face faiss integration: 1. I am trying to pass in a dataset with a faiss index to the Huggingface trainer. The code works for a cpu faiss index, but doesn't for a gpu one, getting error: ``` File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/transformers/trainer.py", line 1543, in train return inner_training_loop( File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/transformers/trainer.py", line 1555, in _inner_training_loop train_dataloader = self.get_train_dataloader() File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/transformers/trainer.py", line 831, in get_train_dataloader train_dataset = self._remove_unused_columns(train_dataset, description="training") File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/transformers/trainer.py", line 725, in _remove_unused_columns return dataset.remove_columns(ignored_columns) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 592, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 557, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/fingerprint.py", line 481, in wrapper out = func(dataset, *args, **kwargs) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 2146, in remove_columns dataset = copy.deepcopy(self) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 271, in _reconstruct state = deepcopy(state, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 146, in deepcopy y = copier(x, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 231, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 146, in deepcopy y = copier(x, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 231, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 172, in deepcopy y = _reconstruct(x, memo, *rv) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 271, in _reconstruct state = deepcopy(state, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 146, in deepcopy y = copier(x, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 231, in _deepcopy_dict y[deepcopy(key, memo)] = deepcopy(value, memo) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/copy.py", line 161, in deepcopy rv = reductor(4) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/faiss/__init__.py", line 556, in index_getstate return {"this": serialize_index(self).tobytes()} File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/faiss/__init__.py", line 1607, in serialize_index write_index(index, writer) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/faiss/swigfaiss.py", line 9843, in write_index return _swigfaiss.write_index(*args) RuntimeError: Error in void faiss::write_index(const faiss::Index*, faiss::IOWriter*) at /project/faiss/faiss/impl/index_write.cpp:590: don't know how to serialize this type of index ``` The index was created with the add_faiss_index method ``` train_dataset.add_faiss_index( column='embeddings', index_name='embeddings', string_factory=faiss_index_string, train_size=config.faiss_train_size, device=0, # Use -1 for CPU, or specify GPU device ID faiss_verbose=True ) ``` 2. Athough faiss is written to be compatible on the gpu for searching [https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU](https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU) I am getting error when trying to use the hugggingface code to do the search on gpu. This seems to be caused by this line https://github.com/huggingface/datasets/blob/f9975f636542df7f95c27065ea93147440d690b7/src/datasets/search.py#L376 producing error ``` total_scores, total_examples = self.dataset.get_nearest_examples_batch('embeddings', embeddings, k=self.k) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/search.py", line 773, in get_nearest_examples_batch total_scores, total_indices = self.search_batch(index_name, queries, k, **kwargs) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/search.py", line 727, in search_batch return self._indexes[index_name].search_batch(queries, k, **kwargs) File "/users/rubman/.conda/envs/protein_npt_env/lib/python3.10/site-packages/datasets/search.py", line 376, in search_batch if not queries.flags.c_contiguous: AttributeError: 'Tensor' object has no attribute 'flags' ``` ### Steps to reproduce the bug ``` train_dataset.add_faiss_index( column='embeddings', index_name='embeddings', string_factory=faiss_index_string, train_size=config.faiss_train_size, device=0, # Use -1 for CPU, or specify GPU device ID faiss_verbose=True ) Trainer( model=model, args=args, train_dataset=train_dataset, eval_dataset=eval_dataset, data_collator=data_collator, tokenizer=tokenizer ) train_dataset.get_nearest_examples_batch('embeddings', embeddings, k=self.k) ``` ### Expected behavior I would expect the faiss database code to be gpu compatible ### Environment info huggingface Version: 2.16.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6643/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6643/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6642
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6642/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6642/comments
https://api.github.com/repos/huggingface/datasets/issues/6642/events
https://github.com/huggingface/datasets/issues/6642
2,119,085,766
I_kwDODunzps5-Tq7G
6,642
Differently dataset object saved than it is loaded.
{ "avatar_url": "https://avatars.githubusercontent.com/u/31218150?v=4", "events_url": "https://api.github.com/users/MFajcik/events{/privacy}", "followers_url": "https://api.github.com/users/MFajcik/followers", "following_url": "https://api.github.com/users/MFajcik/following{/other_user}", "gists_url": "https://api.github.com/users/MFajcik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MFajcik", "id": 31218150, "login": "MFajcik", "node_id": "MDQ6VXNlcjMxMjE4MTUw", "organizations_url": "https://api.github.com/users/MFajcik/orgs", "received_events_url": "https://api.github.com/users/MFajcik/received_events", "repos_url": "https://api.github.com/users/MFajcik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MFajcik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MFajcik/subscriptions", "type": "User", "url": "https://api.github.com/users/MFajcik" }
[]
closed
false
null
[]
null
2
"2024-02-05T17:28:57"
"2024-02-06T09:50:19"
"2024-02-06T09:50:19"
NONE
null
null
null
### Describe the bug Differently sized object is saved than it is loaded. ### Steps to reproduce the bug Hi, I save dataset in a following way: ``` dataset = load_dataset("json", data_files={ "train": os.path.join(input_folder, f"{task_meta_type}_{task_type}_train.jsonl"), "test": os.path.join(input_folder, f"{task_meta_type}_{task_type}_test.jsonl")}) print(os.path.join(output_folder, f"{task_meta_type}_{task_type}")) print(f"Length of train dataset: {len(dataset['train'])}") print(f"Length of test dataset: {len(dataset['test'])}") dataset.save_to_disk(os.path.join(output_folder, f"{task_meta_type}_{task_type}")) ``` this yields output ``` .data/hf_dataset/propaganda_zanr Length of train dataset: 7642 Length of test dataset: 1000 ``` Everything looks fine. Then I load the dataset ```python from datasets import load_dataset dataset_path = ".data/hf_dataset/propaganda_zanr" dataset = load_dataset(dataset_path) print(f"Length of train dataset: {len(dataset['train'])}") print(f"Length of test dataset: {len(dataset['test'])}") ``` this prints ``` Generating train split: 1 examples [00:00, 72.10 examples/s] Generating test split: 1 examples [00:00, 100.69 examples/s] Length of train dataset: 1 Length of test dataset: 1 ``` I dont' understand :( ### Expected behavior same object is loaded ### Environment info datasets==2.16.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6642/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6642/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6641
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6641/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6641/comments
https://api.github.com/repos/huggingface/datasets/issues/6641/events
https://github.com/huggingface/datasets/issues/6641
2,116,963,132
I_kwDODunzps5-Lks8
6,641
unicodedecodeerror: 'utf-8' codec can't decode byte 0xac in position 25: invalid start byte
{ "avatar_url": "https://avatars.githubusercontent.com/u/109789057?v=4", "events_url": "https://api.github.com/users/Hughhuh/events{/privacy}", "followers_url": "https://api.github.com/users/Hughhuh/followers", "following_url": "https://api.github.com/users/Hughhuh/following{/other_user}", "gists_url": "https://api.github.com/users/Hughhuh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Hughhuh", "id": 109789057, "login": "Hughhuh", "node_id": "U_kgDOBos_gQ", "organizations_url": "https://api.github.com/users/Hughhuh/orgs", "received_events_url": "https://api.github.com/users/Hughhuh/received_events", "repos_url": "https://api.github.com/users/Hughhuh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Hughhuh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Hughhuh/subscriptions", "type": "User", "url": "https://api.github.com/users/Hughhuh" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2024-02-04T08:49:31"
"2024-02-06T09:26:07"
"2024-02-06T09:11:45"
NONE
null
null
null
### Describe the bug unicodedecodeerror: 'utf-8' codec can't decode byte 0xac in position 25: invalid start byte ### Steps to reproduce the bug ``` import sys sys.getdefaultencoding() 'utf-8' from datasets import load_dataset print(f"Train dataset size: {len(dataset['train'])}") print(f"Test dataset size: {len(dataset['test'])}") Resolving data files: 100% 159/159 [00:00<00:00, 9909.28it/s] Using custom data configuration samsum-0b1209637541c9e6 Downloading and preparing dataset json/samsum to C:/Users/Administrator/.cache/huggingface/datasets/json/samsum-0b1209637541c9e6/0.0.0/0f7e3662623656454fcd2b650f34e886a7db4b9104504885bd462096cc7a9f51... Downloading data files: 100% 3/3 [00:00<00:00, 119.99it/s] Extracting data files: 100% 3/3 [00:00<00:00, 9.54it/s] Generating train split: 88392/0 [00:15<00:00, 86848.17 examples/s] Generating test split: 0/0 [00:00<?, ? examples/s] --------------------------------------------------------------------------- ArrowInvalid Traceback (most recent call last) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\packaged_modules\json\json.py:132, in Json._generate_tables(self, files) 131 try: --> 132 pa_table = paj.read_json( 133 io.BytesIO(batch), read_options=paj.ReadOptions(block_size=block_size) 134 ) 135 break File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pyarrow\_json.pyx:290, in pyarrow._json.read_json() File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pyarrow\error.pxi:144, in pyarrow.lib.pyarrow_internal_check_status() File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\pyarrow\error.pxi:100, in pyarrow.lib.check_status() ArrowInvalid: JSON parse error: Invalid value. in row 0 During handling of the above exception, another exception occurred: UnicodeDecodeError Traceback (most recent call last) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\builder.py:1819, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id) 1818 _time = time.time() -> 1819 for _, table in generator: 1820 if max_shard_size is not None and writer._num_bytes > max_shard_size: File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\packaged_modules\json\json.py:153, in Json._generate_tables(self, files) 152 with open(file, encoding="utf-8") as f: --> 153 dataset = json.load(f) 154 except json.JSONDecodeError: File ~\AppData\Local\Programs\Python\Python310\lib\json\__init__.py:293, in load(fp, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw) 276 """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing 277 a JSON document) to a Python object. 278 (...) 291 kwarg; otherwise ``JSONDecoder`` is used. 292 """ --> 293 return loads(fp.read(), 294 cls=cls, object_hook=object_hook, 295 parse_float=parse_float, parse_int=parse_int, 296 parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw) File ~\AppData\Local\Programs\Python\Python310\lib\codecs.py:322, in BufferedIncrementalDecoder.decode(self, input, final) 321 data = self.buffer + input --> 322 (result, consumed) = self._buffer_decode(data, self.errors, final) 323 # keep undecoded input until the next call UnicodeDecodeError: 'utf-8' codec can't decode byte 0xac in position 25: invalid start byte The above exception was the direct cause of the following exception: DatasetGenerationError Traceback (most recent call last) Cell In[81], line 5 1 from datasets import load_dataset 3 # Load dataset from the hub 4 #dataset = load_dataset("json",data_files="C:/Users/Administrator/Desktop/samsum/samsum/data/corpus/train.json",field="data") ----> 5 dataset = load_dataset('json',"samsum") 6 #dataset = load_dataset("samsum") 7 print(f"Train dataset size: {len(dataset['train'])}") File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\load.py:1758, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, **config_kwargs) 1755 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES 1757 # Download and prepare data -> 1758 builder_instance.download_and_prepare( 1759 download_config=download_config, 1760 download_mode=download_mode, 1761 ignore_verifications=ignore_verifications, 1762 try_from_hf_gcs=try_from_hf_gcs, 1763 num_proc=num_proc, 1764 ) 1766 # Build dataset for splits 1767 keep_in_memory = ( 1768 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 1769 ) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\builder.py:860, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs) 858 if num_proc is not None: 859 prepare_split_kwargs["num_proc"] = num_proc --> 860 self._download_and_prepare( 861 dl_manager=dl_manager, 862 verify_infos=verify_infos, 863 **prepare_split_kwargs, 864 **download_and_prepare_kwargs, 865 ) 866 # Sync info 867 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\builder.py:953, in DatasetBuilder._download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 949 split_dict.add(split_generator.split_info) 951 try: 952 # Prepare split will record examples associated to the split --> 953 self._prepare_split(split_generator, **prepare_split_kwargs) 954 except OSError as e: 955 raise OSError( 956 "Cannot find data file. " 957 + (self.manual_download_instructions or "") 958 + "\nOriginal error:\n" 959 + str(e) 960 ) from None File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\builder.py:1708, in ArrowBasedBuilder._prepare_split(self, split_generator, file_format, num_proc, max_shard_size) 1706 gen_kwargs = split_generator.gen_kwargs 1707 job_id = 0 -> 1708 for job_id, done, content in self._prepare_split_single( 1709 gen_kwargs=gen_kwargs, job_id=job_id, **_prepare_split_args 1710 ): 1711 if done: 1712 result = content File ~\AppData\Local\Programs\Python\Python310\lib\site-packages\datasets\builder.py:1851, in ArrowBasedBuilder._prepare_split_single(self, gen_kwargs, fpath, file_format, max_shard_size, job_id) 1849 if isinstance(e, SchemaInferenceError) and e.__context__ is not None: 1850 e = e.__context__ -> 1851 raise DatasetGenerationError("An error occurred while generating the dataset") from e 1853 yield job_id, True, (total_num_examples, total_num_bytes, writer._features, num_shards, shard_lengths) DatasetGenerationError: An error occurred while generating the dataset ``` ### Expected behavior can't load dataset ### Environment info dataset:samsum system :win10 gpu:m40 24G
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6641/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6641/timeline
null
not_planned
false
https://api.github.com/repos/huggingface/datasets/issues/6640
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6640/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6640/comments
https://api.github.com/repos/huggingface/datasets/issues/6640/events
https://github.com/huggingface/datasets/issues/6640
2,115,864,531
I_kwDODunzps5-HYfT
6,640
Sign Language Support
{ "avatar_url": "https://avatars.githubusercontent.com/u/6684795?v=4", "events_url": "https://api.github.com/users/Merterm/events{/privacy}", "followers_url": "https://api.github.com/users/Merterm/followers", "following_url": "https://api.github.com/users/Merterm/following{/other_user}", "gists_url": "https://api.github.com/users/Merterm/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Merterm", "id": 6684795, "login": "Merterm", "node_id": "MDQ6VXNlcjY2ODQ3OTU=", "organizations_url": "https://api.github.com/users/Merterm/orgs", "received_events_url": "https://api.github.com/users/Merterm/received_events", "repos_url": "https://api.github.com/users/Merterm/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Merterm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Merterm/subscriptions", "type": "User", "url": "https://api.github.com/users/Merterm" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
"2024-02-02T21:54:51"
"2024-02-02T21:54:51"
null
NONE
null
null
null
### Feature request Currently, there are only several Sign Language labels, I would like to propose adding all the Signed Languages as new labels which are described in this ISO standard: https://www.evertype.com/standards/iso639/sign-language.html ### Motivation Datasets currently only have labels for several signed languages. There are more signed languages in the world. Furthermore, some signed languages that have a lot of online data cannot be found because of this reason (for instance, German Sign Language, and there is no German Sign Language label on huggingface datasets even though there are a lot of readily available sign language datasets exist for German Sign Language, which are used very frequently in Sign Language Processing papers, and models.) ### Your contribution I can submit a PR for this as well, adding the ISO codes and languages to the labels in datasets.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6640/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6640/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6639
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6639/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6639/comments
https://api.github.com/repos/huggingface/datasets/issues/6639/events
https://github.com/huggingface/datasets/pull/6639
2,114,620,200
PR_kwDODunzps5l0KPG
6,639
Run download_and_prepare if missing splits
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
open
false
null
[]
null
1
"2024-02-02T10:36:49"
"2024-02-06T16:54:22"
null
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6639.diff", "html_url": "https://github.com/huggingface/datasets/pull/6639", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6639.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6639" }
A first step towards https://github.com/huggingface/datasets/issues/6529
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6639/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6639/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6638
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6638/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6638/comments
https://api.github.com/repos/huggingface/datasets/issues/6638/events
https://github.com/huggingface/datasets/issues/6638
2,113,329,257
I_kwDODunzps599thp
6,638
Cannot download wmt16 dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/81709031?v=4", "events_url": "https://api.github.com/users/vidyasiv/events{/privacy}", "followers_url": "https://api.github.com/users/vidyasiv/followers", "following_url": "https://api.github.com/users/vidyasiv/following{/other_user}", "gists_url": "https://api.github.com/users/vidyasiv/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vidyasiv", "id": 81709031, "login": "vidyasiv", "node_id": "MDQ6VXNlcjgxNzA5MDMx", "organizations_url": "https://api.github.com/users/vidyasiv/orgs", "received_events_url": "https://api.github.com/users/vidyasiv/received_events", "repos_url": "https://api.github.com/users/vidyasiv/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vidyasiv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vidyasiv/subscriptions", "type": "User", "url": "https://api.github.com/users/vidyasiv" }
[]
closed
false
null
[]
null
1
"2024-02-01T19:41:42"
"2024-02-01T20:07:29"
"2024-02-01T20:07:29"
NONE
null
null
null
### Describe the bug As of this morning (PST) 2/1/2024, seeing the wmt16 dataset is missing from opus , could you suggest an alternative? ``` Downloading data files: 0%| | 0/4 [00:00<?, ?it/s]Traceback (most recent call last): File "test.py", line 2, in <module> raw_datasets = load_dataset("wmt16","ro-en",split="train") File "/usr/local/lib/python3.8/dist-packages/datasets/load.py", line 2153, in load_dataset builder_instance.download_and_prepare( File "/usr/local/lib/python3.8/dist-packages/datasets/builder.py", line 954, in download_and_prepare self._download_and_prepare( File "/usr/local/lib/python3.8/dist-packages/datasets/builder.py", line 1717, in _download_and_prepare super()._download_and_prepare( File "/usr/local/lib/python3.8/dist-packages/datasets/builder.py", line 1027, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/root/.cache/huggingface/modules/datasets_modules/datasets/wmt16/746749a11d25c02058042da7502d973ff410e73457f3d305fc1177dc0e8c4227/wmt_utils.py", line 754, in _split_generators downloaded_files = dl_manager.download_and_extract(urls_to_download) File "/usr/local/lib/python3.8/dist-packages/datasets/download/download_manager.py", line 565, in download_and_extract return self.extract(self.download(url_or_urls)) File "/usr/local/lib/python3.8/dist-packages/datasets/download/download_manager.py", line 428, in download downloaded_path_or_paths = map_nested( File "/usr/local/lib/python3.8/dist-packages/datasets/utils/py_utils.py", line 464, in map_nested mapped = [ File "/usr/local/lib/python3.8/dist-packages/datasets/utils/py_utils.py", line 465, in <listcomp> _single_map_nested((function, obj, types, None, True, None)) File "/usr/local/lib/python3.8/dist-packages/datasets/utils/py_utils.py", line 384, in _single_map_nested mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] File "/usr/local/lib/python3.8/dist-packages/datasets/utils/py_utils.py", line 384, in <listcomp> mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] File "/usr/local/lib/python3.8/dist-packages/datasets/utils/py_utils.py", line 367, in _single_map_nested return function(data_struct) File "/usr/local/lib/python3.8/dist-packages/datasets/download/download_manager.py", line 454, in _download return cached_path(url_or_filename, download_config=download_config) File "/usr/local/lib/python3.8/dist-packages/datasets/utils/file_utils.py", line 182, in cached_path output_path = get_from_cache( File "/usr/local/lib/python3.8/dist-packages/datasets/utils/file_utils.py", line 596, in get_from_cache raise FileNotFoundError(f"Couldn't find file at {url}") FileNotFoundError: Couldn't find file at https://opus.nlpl.eu/download.php?f=SETIMES/v2/tmx/en-ro.tmx.gz ``` ### Steps to reproduce the bug ``` from datasets import load_dataset raw_datasets = load_dataset("wmt16","ro-en",split="train") ``` ### Expected behavior Expect the dataset to be downloaded/ at least a clean exit with error explaining dataset is missing and a suggestion for next steps ### Environment info - `datasets` version: 2.14.7 - Platform: Linux-5.15.0-92-generic-x86_64-with-glibc2.29 - Python version: 3.8.10 - Huggingface_hub version: 0.17.3 - PyArrow version: 15.0.0 - Pandas version: 2.0.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6638/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6638/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6637
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6637/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6637/comments
https://api.github.com/repos/huggingface/datasets/issues/6637/events
https://github.com/huggingface/datasets/issues/6637
2,113,025,975
I_kwDODunzps598je3
6,637
'with_format' is extremely slow when used together with 'interleave_datasets' or 'shuffle' on IterableDatasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/22883190?v=4", "events_url": "https://api.github.com/users/tobycrisford/events{/privacy}", "followers_url": "https://api.github.com/users/tobycrisford/followers", "following_url": "https://api.github.com/users/tobycrisford/following{/other_user}", "gists_url": "https://api.github.com/users/tobycrisford/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tobycrisford", "id": 22883190, "login": "tobycrisford", "node_id": "MDQ6VXNlcjIyODgzMTkw", "organizations_url": "https://api.github.com/users/tobycrisford/orgs", "received_events_url": "https://api.github.com/users/tobycrisford/received_events", "repos_url": "https://api.github.com/users/tobycrisford/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tobycrisford/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tobycrisford/subscriptions", "type": "User", "url": "https://api.github.com/users/tobycrisford" }
[]
open
false
null
[]
null
1
"2024-02-01T17:16:54"
"2024-02-05T10:43:47"
null
NONE
null
null
null
### Describe the bug If you: 1. Interleave two iterable datasets together with the interleave_datasets function, or shuffle an iterable dataset 2. Set the output format to torch tensors with .with_format('torch') Then iterating through the dataset becomes over 100x slower than it is if you don't apply the torch formatting. ### Steps to reproduce the bug ```python import datasets import torch from tqdm import tqdm rand_a = torch.randn(3,224,224) rand_b = torch.randn(3,224,224) a = torch.stack([rand_a] * 1000) b = torch.stack([rand_b] * 1000) features = datasets.Features({"tensor": datasets.Array3D(shape=(3,224,224), dtype="float32")}) ds_a = datasets.Dataset.from_dict({"tensor": a}, features=features).to_iterable_dataset() ds_b = datasets.Dataset.from_dict({"tensor": b}, features=features).to_iterable_dataset() # Iterating through either dataset with torch formatting is really fast (2000it/s on my machine) for example in tqdm(ds_a.with_format('torch')): pass # Iterating through either dataset shuffled is also pretty fast (100it/s on my machine) for example in tqdm(ds_a.shuffle()): pass # Iterating through this interleaved dataset is pretty fast (200it/s on my machine) ds_fast = datasets.interleave_datasets([ds_a, ds_b]) for example in tqdm(ds_fast): pass # Iterating through either dataset with torch formatting *after shuffling* is really slow... (<2it/s on my machine) for example in tqdm(ds_a.shuffle().with_format('torch')): pass # Iterating through this torch formatted interleaved dataset is also really slow (<2it/s on my machine)... ds_slow = datasets.interleave_datasets([ds_a, ds_b]).with_format('torch') for example in tqdm(ds_slow): pass # Even doing this is way faster!! (70it/s on my machine) for example in tqdm(ds_fast): test = torch.tensor(example['tensor']) ``` ### Expected behavior Applying torch formatting to the interleaved dataset shouldn't increase the time taken to iterate through the dataset by very much, since even explicitly converting every example is over 70x faster than calling .with_format('torch'). ### Environment info - `datasets` version: 2.16.1 - Platform: Linux-6.5.0-15-generic-x86_64-with-glibc2.38 - Python version: 3.11.6 - `huggingface_hub` version: 0.20.3 - PyArrow version: 15.0.0 - Pandas version: 2.2.0 - `fsspec` version: 2023.10.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6637/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6637/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6636
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6636/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6636/comments
https://api.github.com/repos/huggingface/datasets/issues/6636/events
https://github.com/huggingface/datasets/pull/6636
2,110,781,097
PR_kwDODunzps5lm4zI
6,636
Faster column validation and reordering
{ "avatar_url": "https://avatars.githubusercontent.com/u/11325244?v=4", "events_url": "https://api.github.com/users/psmyth94/events{/privacy}", "followers_url": "https://api.github.com/users/psmyth94/followers", "following_url": "https://api.github.com/users/psmyth94/following{/other_user}", "gists_url": "https://api.github.com/users/psmyth94/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/psmyth94", "id": 11325244, "login": "psmyth94", "node_id": "MDQ6VXNlcjExMzI1MjQ0", "organizations_url": "https://api.github.com/users/psmyth94/orgs", "received_events_url": "https://api.github.com/users/psmyth94/received_events", "repos_url": "https://api.github.com/users/psmyth94/repos", "site_admin": false, "starred_url": "https://api.github.com/users/psmyth94/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/psmyth94/subscriptions", "type": "User", "url": "https://api.github.com/users/psmyth94" }
[]
closed
false
null
[]
null
3
"2024-01-31T19:08:28"
"2024-02-07T19:39:00"
"2024-02-06T23:03:38"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6636.diff", "html_url": "https://github.com/huggingface/datasets/pull/6636", "merged_at": "2024-02-06T23:03:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/6636.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6636" }
I work with bioinformatics data and often these tables have thousands and even tens of thousands of features. These tables are also accompanied by metadata that I do not want to pass in the model. When I perform `set_format('pt', columns=large_column_list)` , it can take several minutes before it finishes. The culprit is when the following check is performed: `any(col not in self._data.column_names for col in columns)`. Replacing this by `set(columns) - (self._data.column_names)` is more efficient.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6636/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6636/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6635
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6635/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6635/comments
https://api.github.com/repos/huggingface/datasets/issues/6635/events
https://github.com/huggingface/datasets/pull/6635
2,110,659,519
PR_kwDODunzps5lmeNO
6,635
Fix missing info when loading some datasets from Parquet export
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-01-31T17:55:21"
"2024-02-07T16:48:55"
"2024-02-07T16:41:04"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6635.diff", "html_url": "https://github.com/huggingface/datasets/pull/6635", "merged_at": "2024-02-07T16:41:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/6635.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6635" }
Fix getting the info for script-based datasets with Parquet export with a single config not named "default". E.g. ```python from datasets import load_dataset_builder b = load_dataset_builder("bookcorpus") print(b.info.features) # should print {'text': Value(dtype='string', id=None)} ``` I fixed this by setting the default config name when there is only one config.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6635/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6635/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6634
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6634/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6634/comments
https://api.github.com/repos/huggingface/datasets/issues/6634/events
https://github.com/huggingface/datasets/pull/6634
2,110,242,376
PR_kwDODunzps5llB9a
6,634
Support data_dir parameter in push_to_hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
3
"2024-01-31T14:37:36"
"2024-02-05T10:32:49"
"2024-02-05T10:26:40"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6634.diff", "html_url": "https://github.com/huggingface/datasets/pull/6634", "merged_at": "2024-02-05T10:26:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/6634.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6634" }
Support `data_dir` parameter in `push_to_hub`. This allows users to organize the data files according to their specific needs. For example, "wikimedia/wikipedia" files could be organized by year and/or date, e.g. "2024/20240101/20240101.en".
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6634/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6634/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6633
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6633/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6633/comments
https://api.github.com/repos/huggingface/datasets/issues/6633/events
https://github.com/huggingface/datasets/pull/6633
2,110,124,475
PR_kwDODunzps5lknz9
6,633
dataset viewer requires no-script
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo" }
[]
closed
false
null
[]
null
2
"2024-01-31T13:41:54"
"2024-01-31T14:05:04"
"2024-01-31T13:59:01"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6633.diff", "html_url": "https://github.com/huggingface/datasets/pull/6633", "merged_at": "2024-01-31T13:59:01Z", "patch_url": "https://github.com/huggingface/datasets/pull/6633.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6633" }
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6633/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6633/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6632
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6632/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6632/comments
https://api.github.com/repos/huggingface/datasets/issues/6632/events
https://github.com/huggingface/datasets/pull/6632
2,108,541,678
PR_kwDODunzps5lfPuk
6,632
Fix reload cache with data dir
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-01-30T18:52:23"
"2024-02-06T17:27:35"
"2024-02-06T17:21:24"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6632.diff", "html_url": "https://github.com/huggingface/datasets/pull/6632", "merged_at": "2024-02-06T17:21:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/6632.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6632" }
The cache used to only check for the latest cache directory with a given config_name, but it was wrong (e.g. `default-data_dir=data%2Ffortran-data_dir=data%2Ffortran` instead of `default-data_dir=data%2Ffortran`) I fixed this by not passing the `config_kwargs` to the parent Builder `__init__`, and passing the config_id forged from the `config_kwargs` directly close https://github.com/huggingface/datasets/issues/6609
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6632/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6632/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6631
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6631/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6631/comments
https://api.github.com/repos/huggingface/datasets/issues/6631/events
https://github.com/huggingface/datasets/pull/6631
2,107,802,473
PR_kwDODunzps5lcu9A
6,631
Fix filelock: use current umask for filelock >= 3.10
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-01-30T12:56:01"
"2024-01-30T15:34:49"
"2024-01-30T15:28:37"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6631.diff", "html_url": "https://github.com/huggingface/datasets/pull/6631", "merged_at": "2024-01-30T15:28:37Z", "patch_url": "https://github.com/huggingface/datasets/pull/6631.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6631" }
reported in https://github.com/huggingface/evaluate/issues/542 cc @stas00 @williamberrios close https://github.com/huggingface/datasets/issues/6589
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6631/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6631/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6630
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6630/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6630/comments
https://api.github.com/repos/huggingface/datasets/issues/6630/events
https://github.com/huggingface/datasets/pull/6630
2,106,478,275
PR_kwDODunzps5lYPi3
6,630
Bump max range of dill to 0.3.8
{ "avatar_url": "https://avatars.githubusercontent.com/u/27844407?v=4", "events_url": "https://api.github.com/users/ringohoffman/events{/privacy}", "followers_url": "https://api.github.com/users/ringohoffman/followers", "following_url": "https://api.github.com/users/ringohoffman/following{/other_user}", "gists_url": "https://api.github.com/users/ringohoffman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ringohoffman", "id": 27844407, "login": "ringohoffman", "node_id": "MDQ6VXNlcjI3ODQ0NDA3", "organizations_url": "https://api.github.com/users/ringohoffman/orgs", "received_events_url": "https://api.github.com/users/ringohoffman/received_events", "repos_url": "https://api.github.com/users/ringohoffman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ringohoffman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ringohoffman/subscriptions", "type": "User", "url": "https://api.github.com/users/ringohoffman" }
[]
closed
false
null
[]
null
4
"2024-01-29T21:35:55"
"2024-01-30T16:19:45"
"2024-01-30T15:12:25"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6630.diff", "html_url": "https://github.com/huggingface/datasets/pull/6630", "merged_at": "2024-01-30T15:12:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/6630.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6630" }
Release on Jan 27, 2024: https://pypi.org/project/dill/0.3.8/#history
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6630/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6630/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6629
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6629/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6629/comments
https://api.github.com/repos/huggingface/datasets/issues/6629/events
https://github.com/huggingface/datasets/pull/6629
2,105,774,482
PR_kwDODunzps5lV0aF
6,629
Support push_to_hub without org/user to default to logged-in user
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
3
"2024-01-29T15:36:52"
"2024-02-05T12:35:43"
"2024-02-05T12:29:36"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6629.diff", "html_url": "https://github.com/huggingface/datasets/pull/6629", "merged_at": "2024-02-05T12:29:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/6629.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6629" }
This behavior is aligned with: - the behavior of `datasets` before merging #6519 - the behavior described in the corresponding docstring - the behavior of `huggingface_hub.create_repo` Revert "Support push_to_hub canonical datasets (#6519)" - This reverts commit a887ee78835573f5d80f9e414e8443b4caff3541. Fix #6597.
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6629/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6629/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6628
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6628/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6628/comments
https://api.github.com/repos/huggingface/datasets/issues/6628/events
https://github.com/huggingface/datasets/pull/6628
2,105,760,502
PR_kwDODunzps5lVxXU
6,628
Make CLI test support multi-processing
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
3
"2024-01-29T15:30:09"
"2024-02-05T10:29:20"
"2024-02-05T10:23:13"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6628.diff", "html_url": "https://github.com/huggingface/datasets/pull/6628", "merged_at": "2024-02-05T10:23:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/6628.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6628" }
Support passing `--num_proc` to CLI test. This was really useful recently to run the command on `pubmed`: https://huggingface.co/datasets/pubmed/discussions/11
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6628/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6628/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6627
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6627/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6627/comments
https://api.github.com/repos/huggingface/datasets/issues/6627/events
https://github.com/huggingface/datasets/pull/6627
2,105,735,816
PR_kwDODunzps5lVr-t
6,627
Disable `tqdm` bars in non-interactive environments
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
2
"2024-01-29T15:18:21"
"2024-01-29T15:47:34"
"2024-01-29T15:41:32"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6627.diff", "html_url": "https://github.com/huggingface/datasets/pull/6627", "merged_at": "2024-01-29T15:41:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/6627.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6627" }
Replace `disable=False` with `disable=None` in the `tqdm` bars to disable them in non-interactive environments (by default). For more info, see a [similar PR](https://github.com/huggingface/huggingface_hub/pull/2000) in `huggingface_hub`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6627/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6627/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6626
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6626/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6626/comments
https://api.github.com/repos/huggingface/datasets/issues/6626/events
https://github.com/huggingface/datasets/pull/6626
2,105,482,522
PR_kwDODunzps5lU0I2
6,626
Raise error on bad split name
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-01-29T13:17:41"
"2024-01-29T15:18:25"
"2024-01-29T15:12:18"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6626.diff", "html_url": "https://github.com/huggingface/datasets/pull/6626", "merged_at": "2024-01-29T15:12:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/6626.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6626" }
e.g. dashes '-' are not allowed in split names This should add an error message on datasets with unsupported split names like https://huggingface.co/datasets/open-source-metrics/test cc @AndreaFrancis
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6626/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6626/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6624
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6624/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6624/comments
https://api.github.com/repos/huggingface/datasets/issues/6624/events
https://github.com/huggingface/datasets/issues/6624
2,103,950,718
I_kwDODunzps59Z71-
6,624
How to download the laion-coco dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/15981416?v=4", "events_url": "https://api.github.com/users/vanpersie32/events{/privacy}", "followers_url": "https://api.github.com/users/vanpersie32/followers", "following_url": "https://api.github.com/users/vanpersie32/following{/other_user}", "gists_url": "https://api.github.com/users/vanpersie32/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vanpersie32", "id": 15981416, "login": "vanpersie32", "node_id": "MDQ6VXNlcjE1OTgxNDE2", "organizations_url": "https://api.github.com/users/vanpersie32/orgs", "received_events_url": "https://api.github.com/users/vanpersie32/received_events", "repos_url": "https://api.github.com/users/vanpersie32/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vanpersie32/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vanpersie32/subscriptions", "type": "User", "url": "https://api.github.com/users/vanpersie32" }
[]
closed
false
null
[]
null
1
"2024-01-28T03:56:05"
"2024-02-06T09:43:31"
"2024-02-06T09:43:31"
NONE
null
null
null
The laion coco dataset is not available now. How to download it https://huggingface.co/datasets/laion/laion-coco
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6624/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6624/timeline
null
not_planned
false
https://api.github.com/repos/huggingface/datasets/issues/6623
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6623/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6623/comments
https://api.github.com/repos/huggingface/datasets/issues/6623/events
https://github.com/huggingface/datasets/issues/6623
2,103,870,123
I_kwDODunzps59ZoKr
6,623
streaming datasets doesn't work properly with multi-node
{ "avatar_url": "https://avatars.githubusercontent.com/u/30778939?v=4", "events_url": "https://api.github.com/users/rohitgr7/events{/privacy}", "followers_url": "https://api.github.com/users/rohitgr7/followers", "following_url": "https://api.github.com/users/rohitgr7/following{/other_user}", "gists_url": "https://api.github.com/users/rohitgr7/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rohitgr7", "id": 30778939, "login": "rohitgr7", "node_id": "MDQ6VXNlcjMwNzc4OTM5", "organizations_url": "https://api.github.com/users/rohitgr7/orgs", "received_events_url": "https://api.github.com/users/rohitgr7/received_events", "repos_url": "https://api.github.com/users/rohitgr7/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rohitgr7/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rohitgr7/subscriptions", "type": "User", "url": "https://api.github.com/users/rohitgr7" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
6
"2024-01-27T23:46:13"
"2024-02-02T09:42:10"
null
NONE
null
null
null
### Feature request Let’s say I have a dataset with 5 samples with values [1, 2, 3, 4, 5], with 2 GPUs (for DDP) and batch size of 2. This dataset is an `IterableDataset` since I am streaming it. Now I split the dataset using `split_dataset_by_node` to ensure it doesn’t get repeated. And since it’s already splitted, I don’t have to use `DistributedSampler` (also they don't work with iterable datasets anyway)? But in this case I noticed that the: First iteraton: first GPU will get → [1, 2] first GPU will get → [3, 4] Second iteraton: first GPU will get → [5] first GPU will get → Nothing which actually creates an issue since in case of `DistributedSampler`, the samples are repeated internally to ensure non of the GPUs at any iteration is missing any data for gradient sync. So my questions are: 1. Here since splitting is happening before hand, how to make sure each GPU get’s a batch at each iteration to avoid gradient sync issues? 2. Do we need to use `DistributedSampler`? If yes, how? 3. in the docstrings of `split_dataset_by_node`, this is mentioned: *"If the dataset has a number of shards that is a factor of `world_size` (i.e. if `dataset.n_shards % world_size == 0`), then the shards are evenly assigned across the nodes, which is the most optimized. Otherwise, each node keeps 1 example out of `world_size`, skipping the other examples."* Can you explain the last part here? 4. If `dataset.n_shards % world_size != 0`, is it possible to shard the streaming dataset on the fly to avoid the case where data is missing? ### Motivation Somehow streaming datasets should work with DDP since for big LLMs a lot of data is required and DDP/multi-node is mostly used to train such models and streaming can actually help solve the data part of it. ### Your contribution Yes, I can help in submitting the PR once we get mutual understanding on how it should behave.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6623/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6623/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6622
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6622/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6622/comments
https://api.github.com/repos/huggingface/datasets/issues/6622/events
https://github.com/huggingface/datasets/issues/6622
2,103,780,697
I_kwDODunzps59ZSVZ
6,622
multi-GPU map does not work
{ "avatar_url": "https://avatars.githubusercontent.com/u/17604849?v=4", "events_url": "https://api.github.com/users/kopyl/events{/privacy}", "followers_url": "https://api.github.com/users/kopyl/followers", "following_url": "https://api.github.com/users/kopyl/following{/other_user}", "gists_url": "https://api.github.com/users/kopyl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kopyl", "id": 17604849, "login": "kopyl", "node_id": "MDQ6VXNlcjE3NjA0ODQ5", "organizations_url": "https://api.github.com/users/kopyl/orgs", "received_events_url": "https://api.github.com/users/kopyl/received_events", "repos_url": "https://api.github.com/users/kopyl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kopyl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kopyl/subscriptions", "type": "User", "url": "https://api.github.com/users/kopyl" }
[]
closed
false
null
[]
null
1
"2024-01-27T20:06:08"
"2024-02-08T11:18:21"
"2024-02-08T11:18:21"
NONE
null
null
null
### Describe the bug Here is the code for single-GPU processing: https://pastebin.com/bfmEeK2y Here is the code for multi-GPU processing: https://pastebin.com/gQ7i5AQy Here is the video showing that the multi-GPU mapping does not work as expected (there are so many things wrong here, it's better to watch the 3-minute video than explain here): https://youtu.be/RNbdPkSppc4 ### Steps to reproduce the bug - ### Expected behavior - ### Environment info x2 RTX A4000
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6622/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6622/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6621
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6621/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6621/comments
https://api.github.com/repos/huggingface/datasets/issues/6621/events
https://github.com/huggingface/datasets/issues/6621
2,103,675,294
I_kwDODunzps59Y4me
6,621
deleted
{ "avatar_url": "https://avatars.githubusercontent.com/u/17604849?v=4", "events_url": "https://api.github.com/users/kopyl/events{/privacy}", "followers_url": "https://api.github.com/users/kopyl/followers", "following_url": "https://api.github.com/users/kopyl/following{/other_user}", "gists_url": "https://api.github.com/users/kopyl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kopyl", "id": 17604849, "login": "kopyl", "node_id": "MDQ6VXNlcjE3NjA0ODQ5", "organizations_url": "https://api.github.com/users/kopyl/orgs", "received_events_url": "https://api.github.com/users/kopyl/received_events", "repos_url": "https://api.github.com/users/kopyl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kopyl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kopyl/subscriptions", "type": "User", "url": "https://api.github.com/users/kopyl" }
[]
closed
false
null
[]
null
0
"2024-01-27T16:59:58"
"2024-01-27T17:14:43"
"2024-01-27T17:14:43"
NONE
null
null
null
...
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6621/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6621/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6620
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6620/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6620/comments
https://api.github.com/repos/huggingface/datasets/issues/6620/events
https://github.com/huggingface/datasets/issues/6620
2,103,110,536
I_kwDODunzps59WuuI
6,620
wiki_dpr.py error (ID mismatch between lines {id} and vector {vec_id}
{ "avatar_url": "https://avatars.githubusercontent.com/u/101498700?v=4", "events_url": "https://api.github.com/users/kiehls90/events{/privacy}", "followers_url": "https://api.github.com/users/kiehls90/followers", "following_url": "https://api.github.com/users/kiehls90/following{/other_user}", "gists_url": "https://api.github.com/users/kiehls90/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kiehls90", "id": 101498700, "login": "kiehls90", "node_id": "U_kgDOBgy_TA", "organizations_url": "https://api.github.com/users/kiehls90/orgs", "received_events_url": "https://api.github.com/users/kiehls90/received_events", "repos_url": "https://api.github.com/users/kiehls90/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kiehls90/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kiehls90/subscriptions", "type": "User", "url": "https://api.github.com/users/kiehls90" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
1
"2024-01-27T01:00:09"
"2024-02-06T09:40:19"
"2024-02-06T09:40:19"
NONE
null
null
null
### Describe the bug I'm trying to run a rag example, and the dataset is wiki_dpr. wiki_dpr download and extracting have been completed successfully. However, at the generating train split stage, an error from wiki_dpr.py keeps popping up. Especially in "_generate_examples" : 1. The following error occurs in the line **id, text, title = line.strip().split("\t")** ValueError: not enough values ​​to unpack (expected 3, got 2) -> This part handles exceptions so that even if an error occurs, it passes. 2. **ID mismatch between lines {id} and vector {vec_id}** This error seems to occur at the line " assert int(id) == int(vec_id),". After I handled the exception in the split error, generating train split progressed to 80%, but an id mismatch error occurred at about the 16200000th vector id. Debugging is even more difficult because it takes a long time to download and split wiki_dpr. I need help. thank you in advance!! ### Steps to reproduce the bug Occurs in the generating train split step when running the rag example in the transformers repository. Specifically, it is an error in wiki_dpr.py. ### Expected behavior . ### Environment info python 3.8
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6620/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6620/timeline
null
not_planned
false
https://api.github.com/repos/huggingface/datasets/issues/6619
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6619/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6619/comments
https://api.github.com/repos/huggingface/datasets/issues/6619/events
https://github.com/huggingface/datasets/pull/6619
2,102,407,478
PR_kwDODunzps5lK2VY
6,619
Migrate from `setup.cfg` to `pyproject.toml`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
2
"2024-01-26T15:27:10"
"2024-01-26T15:53:40"
"2024-01-26T15:47:32"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6619.diff", "html_url": "https://github.com/huggingface/datasets/pull/6619", "merged_at": "2024-01-26T15:47:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/6619.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6619" }
Based on https://github.com/huggingface/huggingface_hub/pull/1971 in `hfh`
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6619/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6619/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6618
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6618/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6618/comments
https://api.github.com/repos/huggingface/datasets/issues/6618/events
https://github.com/huggingface/datasets/issues/6618
2,101,868,198
I_kwDODunzps59R_am
6,618
While importing load_dataset from datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/77973415?v=4", "events_url": "https://api.github.com/users/Era-cell/events{/privacy}", "followers_url": "https://api.github.com/users/Era-cell/followers", "following_url": "https://api.github.com/users/Era-cell/following{/other_user}", "gists_url": "https://api.github.com/users/Era-cell/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Era-cell", "id": 77973415, "login": "Era-cell", "node_id": "MDQ6VXNlcjc3OTczNDE1", "organizations_url": "https://api.github.com/users/Era-cell/orgs", "received_events_url": "https://api.github.com/users/Era-cell/received_events", "repos_url": "https://api.github.com/users/Era-cell/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Era-cell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Era-cell/subscriptions", "type": "User", "url": "https://api.github.com/users/Era-cell" }
[]
closed
false
null
[]
null
3
"2024-01-26T09:21:57"
"2024-02-06T10:57:01"
"2024-02-06T09:25:54"
NONE
null
null
null
### Describe the bug cannot import name 'DEFAULT_CIPHERS' from 'urllib3.util.ssl_' this is the error i received ### Steps to reproduce the bug from datasets import load_dataset ### Expected behavior No errors ### Environment info python 3.11.5
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6618/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6618/timeline
null
not_planned
false
https://api.github.com/repos/huggingface/datasets/issues/6617
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6617/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6617/comments
https://api.github.com/repos/huggingface/datasets/issues/6617/events
https://github.com/huggingface/datasets/pull/6617
2,100,459,449
PR_kwDODunzps5lEagV
6,617
Fix CI: pyarrow 15, pandas 2.2 and sqlachemy
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-01-25T13:57:41"
"2024-01-26T14:56:46"
"2024-01-26T14:50:44"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6617.diff", "html_url": "https://github.com/huggingface/datasets/pull/6617", "merged_at": "2024-01-26T14:50:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/6617.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6617" }
this should fix the CI failures on `main` close https://github.com/huggingface/datasets/issues/5477
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6617/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6617/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6616
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6616/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6616/comments
https://api.github.com/repos/huggingface/datasets/issues/6616/events
https://github.com/huggingface/datasets/pull/6616
2,100,125,709
PR_kwDODunzps5lDSEL
6,616
Use schema metadata only if it matches features
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
2
"2024-01-25T11:01:14"
"2024-01-26T16:25:24"
"2024-01-26T16:19:12"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6616.diff", "html_url": "https://github.com/huggingface/datasets/pull/6616", "merged_at": "2024-01-26T16:19:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/6616.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6616" }
e.g. if we use `map` in arrow format and transform the table, the returned table might have new columns but the metadata might be wrong
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6616/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6616/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6615
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6615/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6615/comments
https://api.github.com/repos/huggingface/datasets/issues/6615/events
https://github.com/huggingface/datasets/issues/6615
2,098,951,409
I_kwDODunzps59G3Tx
6,615
...
{ "avatar_url": "https://avatars.githubusercontent.com/u/22179777?v=4", "events_url": "https://api.github.com/users/ftkeys/events{/privacy}", "followers_url": "https://api.github.com/users/ftkeys/followers", "following_url": "https://api.github.com/users/ftkeys/following{/other_user}", "gists_url": "https://api.github.com/users/ftkeys/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ftkeys", "id": 22179777, "login": "ftkeys", "node_id": "MDQ6VXNlcjIyMTc5Nzc3", "organizations_url": "https://api.github.com/users/ftkeys/orgs", "received_events_url": "https://api.github.com/users/ftkeys/received_events", "repos_url": "https://api.github.com/users/ftkeys/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ftkeys/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ftkeys/subscriptions", "type": "User", "url": "https://api.github.com/users/ftkeys" }
[]
closed
false
null
[]
null
1
"2024-01-24T19:37:03"
"2024-01-24T19:42:30"
"2024-01-24T19:40:11"
NONE
null
null
null
...
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6615/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6615/timeline
null
not_planned
false
https://api.github.com/repos/huggingface/datasets/issues/6614
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6614/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6614/comments
https://api.github.com/repos/huggingface/datasets/issues/6614/events
https://github.com/huggingface/datasets/issues/6614
2,098,884,520
I_kwDODunzps59Gm-o
6,614
`datasets/downloads` cleanup tool
{ "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/stas00", "id": 10676103, "login": "stas00", "node_id": "MDQ6VXNlcjEwNjc2MTAz", "organizations_url": "https://api.github.com/users/stas00/orgs", "received_events_url": "https://api.github.com/users/stas00/received_events", "repos_url": "https://api.github.com/users/stas00/repos", "site_admin": false, "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "type": "User", "url": "https://api.github.com/users/stas00" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
0
"2024-01-24T18:52:10"
"2024-01-24T18:55:09"
null
CONTRIBUTOR
null
null
null
### Feature request Splitting off https://github.com/huggingface/huggingface_hub/issues/1997 - currently `huggingface-cli delete-cache` doesn't take care of cleaning `datasets` temp files e.g. I discovered having millions of files under `datasets/downloads` cache, I had to do: ``` sudo find /data/huggingface/datasets/downloads -type f -mtime +3 -exec rm {} \+ sudo find /data/huggingface/datasets/downloads -type d -empty -delete ``` could the cleanup be integrated into `huggingface-cli` or a different tool provided to keep the folders tidy and not consume inodes and space e.g. there were tens of thousands of `.lock` files - I don't know why they never get removed - lock files should be temporary for the duration of the operation requiring the lock and not remain after the operation finished, IMHO. Also I think one should be able to nuke `datasets/downloads` w/o hurting the cache, but I think there are some datasets that rely on files extracted under this dir - or at least they did in the past - which is very difficult to manage since one has no idea what is safe to delete and what not. Thank you @Wauplin (requested to be tagged)
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6614/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6614/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6612
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6612/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6612/comments
https://api.github.com/repos/huggingface/datasets/issues/6612/events
https://github.com/huggingface/datasets/issues/6612
2,098,078,210
I_kwDODunzps59DiIC
6,612
cnn_dailymail repeats itself
{ "avatar_url": "https://avatars.githubusercontent.com/u/8274752?v=4", "events_url": "https://api.github.com/users/KeremZaman/events{/privacy}", "followers_url": "https://api.github.com/users/KeremZaman/followers", "following_url": "https://api.github.com/users/KeremZaman/following{/other_user}", "gists_url": "https://api.github.com/users/KeremZaman/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/KeremZaman", "id": 8274752, "login": "KeremZaman", "node_id": "MDQ6VXNlcjgyNzQ3NTI=", "organizations_url": "https://api.github.com/users/KeremZaman/orgs", "received_events_url": "https://api.github.com/users/KeremZaman/received_events", "repos_url": "https://api.github.com/users/KeremZaman/repos", "site_admin": false, "starred_url": "https://api.github.com/users/KeremZaman/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/KeremZaman/subscriptions", "type": "User", "url": "https://api.github.com/users/KeremZaman" }
[]
closed
false
null
[]
null
1
"2024-01-24T11:38:25"
"2024-02-01T08:14:50"
"2024-02-01T08:14:50"
NONE
null
null
null
### Describe the bug When I try to load `cnn_dailymail` dataset, it takes longer than usual and when I checked the dataset it's 3x bigger than it's supposed to be. Check https://huggingface.co/datasets/cnn_dailymail: it says 287k rows for train. But when I check length of train split it says 861339. Also I checked data: ``` >>> ds['train']['highlights'][0] "Harry Potter star Daniel Radcliffe gets £20M fortune as he turns 18 Monday . Young actor says he has no plans to fritter his cash away . Radcliffe's earnings from first five Potter films have been held in trust fund ."```` >>> ds['train']['highlights'][0] "Harry Potter star Daniel Radcliffe gets £20M fortune as he turns 18 Monday . Young actor says he has no plans to fritter his cash away . Radcliffe's earnings from first five Potter films have been held in trust fund ."```` >>> ds['train']['highlights'][287113] "Harry Potter star Daniel Radcliffe gets £20M fortune as he turns 18 Monday .\nYoung actor says he has no plans to fritter his cash away .\nRadcliffe's earnings from first five Potter films have been held in trust fund ."```` >>> ds['train']['highlights'][574226] "Harry Potter star Daniel Radcliffe gets £20M fortune as he turns 18 Monday .\nYoung actor says he has no plans to fritter his cash away .\nRadcliffe's earnings from first five Potter films have been held in trust fund ." ``` The datasets seems to be updated 6 days ago to convert it to Parquet. Probably, there is some issue with backward compatability. ### Steps to reproduce the bug 1. ``` from datasets import load_dataset ds = load_dataset('cnn_dailymail', '3.0.0') len(ds['train']) ``` ### Expected behavior It should not repeat itself. ### Environment info datasets==2.13.2 Python==3.7.13
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6612/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6612/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6611
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6611/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6611/comments
https://api.github.com/repos/huggingface/datasets/issues/6611/events
https://github.com/huggingface/datasets/issues/6611
2,096,004,858
I_kwDODunzps587n76
6,611
`load_from_disk` with large dataset from S3 runs into `botocore.exceptions.ClientError`
{ "avatar_url": "https://avatars.githubusercontent.com/u/15320635?v=4", "events_url": "https://api.github.com/users/zotroneneis/events{/privacy}", "followers_url": "https://api.github.com/users/zotroneneis/followers", "following_url": "https://api.github.com/users/zotroneneis/following{/other_user}", "gists_url": "https://api.github.com/users/zotroneneis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/zotroneneis", "id": 15320635, "login": "zotroneneis", "node_id": "MDQ6VXNlcjE1MzIwNjM1", "organizations_url": "https://api.github.com/users/zotroneneis/orgs", "received_events_url": "https://api.github.com/users/zotroneneis/received_events", "repos_url": "https://api.github.com/users/zotroneneis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/zotroneneis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zotroneneis/subscriptions", "type": "User", "url": "https://api.github.com/users/zotroneneis" }
[]
open
false
null
[]
null
0
"2024-01-23T12:37:57"
"2024-01-23T12:37:57"
null
NONE
null
null
null
### Describe the bug When loading a large dataset (>1000GB) from S3 I run into the following error: ``` Traceback (most recent call last): File "/home/alp/.local/lib/python3.10/site-packages/s3fs/core.py", line 113, in _error_wrapper return await func(*args, **kwargs) File "/home/alp/.local/lib/python3.10/site-packages/aiobotocore/client.py", line 383, in _make_api_call raise error_class(parsed_response, operation_name) botocore.exceptions.ClientError: An error occurred (RequestTimeTooSkewed) when calling the GetObject operation: The difference between the request time and the current time is too large. The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/alp/phoneme-classification.monorepo/aws_sagemaker/data_processing/inspect_final_dataset.py", line 13, in <module> dataset = load_from_disk("s3://speech-recognition-processed-data/whisper/de/train_data/", storage_options=storage_options) File "/home/alp/.local/lib/python3.10/site-packages/datasets/load.py", line 1902, in load_from_disk return Dataset.load_from_disk(dataset_path, keep_in_memory=keep_in_memory, storage_options=storage_options) File "/home/alp/.local/lib/python3.10/site-packages/datasets/arrow_dataset.py", line 1686, in load_from_disk fs.download(src_dataset_path, [dest_dataset_path.as](http://dest_dataset_path.as/)_posix(), recursive=True) File "/home/alp/.local/lib/python3.10/site-packages/fsspec/spec.py", line 1480, in download return self.get(rpath, lpath, recursive=recursive, **kwargs) File "/home/alp/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 121, in wrapper return sync(self.loop, func, *args, **kwargs) File "/home/alp/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 106, in sync raise return_result File "/home/alp/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 61, in _runner result[0] = await coro File "/home/alp/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 604, in _get return await _run_coros_in_chunks( File "/home/alp/.local/lib/python3.10/site-packages/fsspec/asyn.py", line 257, in _run_coros_in_chunks await asyncio.gather(*chunk, return_exceptions=return_exceptions), File "/usr/lib/python3.10/asyncio/tasks.py", line 408, in wait_for return await fut File "/home/alp/.local/lib/python3.10/site-packages/s3fs/core.py", line 1193, in _get_file body, content_length = await _open_file(range=0) File "/home/alp/.local/lib/python3.10/site-packages/s3fs/core.py", line 1184, in _open_file resp = await self._call_s3( File "/home/alp/.local/lib/python3.10/site-packages/s3fs/core.py", line 348, in _call_s3 return await _error_wrapper( File "/home/alp/.local/lib/python3.10/site-packages/s3fs/core.py", line 140, in _error_wrapper raise err PermissionError: The difference between the request time and the current time is too large. ``` The usual problem for this error is that the time on my local machine is out of sync with the current time. However, this is not the case here. I checked the time and even reset it with no success. See resources here: - https://stackoverflow.com/questions/4770635/s3-error-the-difference-between-the-request-time-and-the-current-time-is-too-la - https://stackoverflow.com/questions/25964491/aws-s3-upload-fails-requesttimetooskewed The error does not appear when loading a smaller dataset (e.g. our test set) from the same s3 path. ### Steps to reproduce the bug 1. Create large dataset 2. Try loading it from s3 using: ``` dataset = load_from_disk("s3://...", storage_options=storage_options) ``` ### Expected behavior Load dataset without running into this error. ### Environment info - `datasets` version: 2.13.1 - Platform: Linux-5.15.0-91-generic-x86_64-with-glibc2.35 - Python version: 3.10.12 - Huggingface_hub version: 0.19.3 - PyArrow version: 12.0.1 - Pandas version: 2.0.3
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6611/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6611/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6610
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6610/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6610/comments
https://api.github.com/repos/huggingface/datasets/issues/6610/events
https://github.com/huggingface/datasets/issues/6610
2,095,643,711
I_kwDODunzps586Pw_
6,610
cast_column to Sequence(subfeatures_dict) has err
{ "avatar_url": "https://avatars.githubusercontent.com/u/16574677?v=4", "events_url": "https://api.github.com/users/neiblegy/events{/privacy}", "followers_url": "https://api.github.com/users/neiblegy/followers", "following_url": "https://api.github.com/users/neiblegy/following{/other_user}", "gists_url": "https://api.github.com/users/neiblegy/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/neiblegy", "id": 16574677, "login": "neiblegy", "node_id": "MDQ6VXNlcjE2NTc0Njc3", "organizations_url": "https://api.github.com/users/neiblegy/orgs", "received_events_url": "https://api.github.com/users/neiblegy/received_events", "repos_url": "https://api.github.com/users/neiblegy/repos", "site_admin": false, "starred_url": "https://api.github.com/users/neiblegy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neiblegy/subscriptions", "type": "User", "url": "https://api.github.com/users/neiblegy" }
[]
closed
false
null
[]
null
2
"2024-01-23T09:32:32"
"2024-01-25T02:15:23"
"2024-01-25T02:15:23"
NONE
null
null
null
### Describe the bug I am working with the following demo code: ``` from datasets import load_dataset from datasets.features import Sequence, Value, ClassLabel, Features ais_dataset = load_dataset("/data/ryan.gao/ais_dataset_cache/raw/1978/") ais_dataset = ais_dataset["train"] def add_class(example): example["my_labeled_bbox"] = {"bbox": [100,100,200,200], "label": "cat"} return example ais_dataset = ais_dataset.map(add_class, batched=False, num_proc=32) ais_dataset = ais_dataset.cast_column("my_labeled_bbox", Sequence( { "bbox": Sequence(Value(dtype="int64")), "label": ClassLabel(names=["cat", "dog"]) })) print(ais_dataset[0]) ``` However, executing this code results in an error: ``` File "/home/protoss.gao/.local/lib/python3.9/site-packages/datasets/table.py", line 2111, in cast_array_to_feature raise TypeError(f"Couldn't cast array of type\n{array.type}\nto\n{feature}") TypeError: Couldn't cast array of type int64 to Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None) ``` Upon examining the source code in datasets/table.py at line 2035: ``` if isinstance(feature, Sequence) and isinstance(feature.feature, dict): feature = { name: Sequence(subfeature, length=feature.length) for name, subfeature in feature.feature.items() } ``` I noticed that if subfeature is of type Sequence, the code results in Sequence(Sequence(...), ...) and Sequence(ClassLabel(...), ...), which appears to be the source of the error. ### Steps to reproduce the bug run my demo code ### Expected behavior no exception ### Environment info python 3.9 datasets: 2.16.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6610/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6610/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6609
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6609/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6609/comments
https://api.github.com/repos/huggingface/datasets/issues/6609/events
https://github.com/huggingface/datasets/issues/6609
2,095,085,650
I_kwDODunzps584HhS
6,609
Wrong path for cache directory in offline mode
{ "avatar_url": "https://avatars.githubusercontent.com/u/42117435?v=4", "events_url": "https://api.github.com/users/je-santos/events{/privacy}", "followers_url": "https://api.github.com/users/je-santos/followers", "following_url": "https://api.github.com/users/je-santos/following{/other_user}", "gists_url": "https://api.github.com/users/je-santos/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/je-santos", "id": 42117435, "login": "je-santos", "node_id": "MDQ6VXNlcjQyMTE3NDM1", "organizations_url": "https://api.github.com/users/je-santos/orgs", "received_events_url": "https://api.github.com/users/je-santos/received_events", "repos_url": "https://api.github.com/users/je-santos/repos", "site_admin": false, "starred_url": "https://api.github.com/users/je-santos/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/je-santos/subscriptions", "type": "User", "url": "https://api.github.com/users/je-santos" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
5
"2024-01-23T01:47:19"
"2024-02-06T17:21:25"
"2024-02-06T17:21:25"
NONE
null
null
null
### Describe the bug Dear huggingfacers, I'm trying to use a subset of the-stack dataset. When I run the command the first time ``` dataset = load_dataset( path='bigcode/the-stack', data_dir='data/fortran', split='train' ) ``` It downloads the files and caches them normally. Nevertheless, since my compute nodes are not online (`HF_DATASETS_OFFLINE=1`) . Whenever I try to run the command again, the library is passing the wrong cache path: `Cache directory for the-stack doesn't exist at /Users/user/.cache/huggingface/datasets/bigcode___the-stack/default-data_dir=data%2Ffortran-data_dir=data%2Ffortran` when the right path is: `'/Users/user/.cache/huggingface/datasets/bigcode___the-stack/default-data_dir=data\%2Ffortran` Not sure why those redundancies are included in the path. If I try adding the correct path through the the cache_dir argument it throws an error: ConnectionError: Couldn't reach the Hugging Face Hub for dataset 'bigcode/the-stack': Offline mode is enabled. Your help with this issue is greatly appreciated. Thanks a lot for the great work. ### Steps to reproduce the bug 1: `dataset = load_dataset( path='bigcode/the-stack', data_dir='data/fortran', split='train' )` 2: `HF_DATASETS_OFFLINE=1` 3: `dataset = load_dataset( path='bigcode/the-stack', data_dir='data/fortran', split='train' )` ### Expected behavior being able to use the cached data ### Environment info several different systems
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6609/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6609/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6608
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6608/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6608/comments
https://api.github.com/repos/huggingface/datasets/issues/6608/events
https://github.com/huggingface/datasets/pull/6608
2,094,153,292
PR_kwDODunzps5ku_lN
6,608
Add `with_rank` param to `Dataset.filter`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
2
"2024-01-22T15:19:16"
"2024-01-29T16:43:11"
"2024-01-29T16:36:53"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6608.diff", "html_url": "https://github.com/huggingface/datasets/pull/6608", "merged_at": "2024-01-29T16:36:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/6608.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6608" }
Fix #6564
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6608/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6608/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6607
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6607/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6607/comments
https://api.github.com/repos/huggingface/datasets/issues/6607/events
https://github.com/huggingface/datasets/pull/6607
2,091,766,063
PR_kwDODunzps5knGse
6,607
Update features.py to avoid bfloat16 unsupported error
{ "avatar_url": "https://avatars.githubusercontent.com/u/75697181?v=4", "events_url": "https://api.github.com/users/skaulintel/events{/privacy}", "followers_url": "https://api.github.com/users/skaulintel/followers", "following_url": "https://api.github.com/users/skaulintel/following{/other_user}", "gists_url": "https://api.github.com/users/skaulintel/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/skaulintel", "id": 75697181, "login": "skaulintel", "node_id": "MDQ6VXNlcjc1Njk3MTgx", "organizations_url": "https://api.github.com/users/skaulintel/orgs", "received_events_url": "https://api.github.com/users/skaulintel/received_events", "repos_url": "https://api.github.com/users/skaulintel/repos", "site_admin": false, "starred_url": "https://api.github.com/users/skaulintel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/skaulintel/subscriptions", "type": "User", "url": "https://api.github.com/users/skaulintel" }
[]
open
false
null
[]
null
0
"2024-01-20T00:39:44"
"2024-01-20T00:39:44"
null
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6607.diff", "html_url": "https://github.com/huggingface/datasets/pull/6607", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6607.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6607" }
Fixes https://github.com/huggingface/datasets/issues/6566 Let me know if there's any tests I need to clear.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6607/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6607/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6606
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6606/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6606/comments
https://api.github.com/repos/huggingface/datasets/issues/6606/events
https://github.com/huggingface/datasets/pull/6606
2,091,088,785
PR_kwDODunzps5kk3KB
6,606
Dedicated RNG object for fingerprinting
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
2
"2024-01-19T18:34:47"
"2024-01-26T15:11:38"
"2024-01-26T15:05:34"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6606.diff", "html_url": "https://github.com/huggingface/datasets/pull/6606", "merged_at": "2024-01-26T15:05:34Z", "patch_url": "https://github.com/huggingface/datasets/pull/6606.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6606" }
Closes https://github.com/huggingface/datasets/issues/6604, closes https://github.com/huggingface/datasets/issues/2775
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6606/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6606/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6605
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6605/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6605/comments
https://api.github.com/repos/huggingface/datasets/issues/6605/events
https://github.com/huggingface/datasets/issues/6605
2,090,188,376
I_kwDODunzps58lb5Y
6,605
ELI5 no longer available, but referenced in example code
{ "avatar_url": "https://avatars.githubusercontent.com/u/81480344?v=4", "events_url": "https://api.github.com/users/drdsgvo/events{/privacy}", "followers_url": "https://api.github.com/users/drdsgvo/followers", "following_url": "https://api.github.com/users/drdsgvo/following{/other_user}", "gists_url": "https://api.github.com/users/drdsgvo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/drdsgvo", "id": 81480344, "login": "drdsgvo", "node_id": "MDQ6VXNlcjgxNDgwMzQ0", "organizations_url": "https://api.github.com/users/drdsgvo/orgs", "received_events_url": "https://api.github.com/users/drdsgvo/received_events", "repos_url": "https://api.github.com/users/drdsgvo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/drdsgvo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/drdsgvo/subscriptions", "type": "User", "url": "https://api.github.com/users/drdsgvo" }
[]
closed
false
null
[]
null
1
"2024-01-19T10:21:52"
"2024-02-01T17:58:23"
"2024-02-01T17:58:22"
NONE
null
null
null
Here, an example code is given: https://huggingface.co/docs/transformers/tasks/language_modeling This code + article references the ELI5 dataset. ELI5 is no longer available, as the ELI5 dataset page states: https://huggingface.co/datasets/eli5 "Defunct: Dataset "eli5" is defunct and no longer accessible due to unavailability of the source data. Reddit recently [changed the terms of access](https://www.reddit.com/r/reddit/comments/12qwagm/an_update_regarding_reddits_api/) to its API, making the source data for this dataset unavailable. " Please change the example code to use a different dataset.
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6605/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6605/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6604
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6604/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6604/comments
https://api.github.com/repos/huggingface/datasets/issues/6604/events
https://github.com/huggingface/datasets/issues/6604
2,089,713,945
I_kwDODunzps58joEZ
6,604
Transform fingerprint collisions due to setting fixed random seed
{ "avatar_url": "https://avatars.githubusercontent.com/u/6687910?v=4", "events_url": "https://api.github.com/users/normster/events{/privacy}", "followers_url": "https://api.github.com/users/normster/followers", "following_url": "https://api.github.com/users/normster/following{/other_user}", "gists_url": "https://api.github.com/users/normster/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/normster", "id": 6687910, "login": "normster", "node_id": "MDQ6VXNlcjY2ODc5MTA=", "organizations_url": "https://api.github.com/users/normster/orgs", "received_events_url": "https://api.github.com/users/normster/received_events", "repos_url": "https://api.github.com/users/normster/repos", "site_admin": false, "starred_url": "https://api.github.com/users/normster/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/normster/subscriptions", "type": "User", "url": "https://api.github.com/users/normster" }
[]
closed
false
null
[]
null
2
"2024-01-19T06:32:25"
"2024-01-26T15:05:35"
"2024-01-26T15:05:35"
NONE
null
null
null
### Describe the bug The transform fingerprinting logic relies on the `random` library for random bits when the function is not hashable (e.g. bound methods as used in `trl`: https://github.com/huggingface/trl/blob/main/trl/trainer/dpo_trainer.py#L356). This causes collisions when the training code sets a fixed random seed, which is common practice: https://github.com/huggingface/alignment-handbook/blob/main/recipes/zephyr-7b-beta/sft/config_full.yaml#L45. This results in fingerprint collisions which leads to silently loading incorrect cache files corresponding to completely different datasets. ### Steps to reproduce the bug n/a ### Expected behavior Use `uuid` v4 instead of `random.getrandbits()` ### Environment info `datasets` main branch
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6604/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6604/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6603
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6603/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6603/comments
https://api.github.com/repos/huggingface/datasets/issues/6603/events
https://github.com/huggingface/datasets/issues/6603
2,089,230,766
I_kwDODunzps58hyGu
6,603
datasets map `cache_file_name` does not work
{ "avatar_url": "https://avatars.githubusercontent.com/u/35147961?v=4", "events_url": "https://api.github.com/users/ChenchaoZhao/events{/privacy}", "followers_url": "https://api.github.com/users/ChenchaoZhao/followers", "following_url": "https://api.github.com/users/ChenchaoZhao/following{/other_user}", "gists_url": "https://api.github.com/users/ChenchaoZhao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ChenchaoZhao", "id": 35147961, "login": "ChenchaoZhao", "node_id": "MDQ6VXNlcjM1MTQ3OTYx", "organizations_url": "https://api.github.com/users/ChenchaoZhao/orgs", "received_events_url": "https://api.github.com/users/ChenchaoZhao/received_events", "repos_url": "https://api.github.com/users/ChenchaoZhao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ChenchaoZhao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChenchaoZhao/subscriptions", "type": "User", "url": "https://api.github.com/users/ChenchaoZhao" }
[]
open
false
null
[]
null
2
"2024-01-18T23:08:30"
"2024-01-28T04:01:15"
null
NONE
null
null
null
### Describe the bug In the documentation `datasets.Dataset.map` arg `cache_file_name` is said to be a string, but it doesn't work. ### Steps to reproduce the bug 1. pick a dataset 2. write a map function 3. do `ds.map(..., cache_file_name='some_filename')` 4. it crashes ### Expected behavior It will tell you the filename you specified does not exist or it will generate a new file and tell you the filename does not exist. ### Environment info - `datasets` version: 2.16.0 - Platform: Linux-5.10.201-168.748.amzn2int.x86_64-x86_64-with-glibc2.26 - Python version: 3.10.13 - `huggingface_hub` version: 0.20.2 - PyArrow version: 14.0.2 - Pandas version: 2.1.4 - `fsspec` version: 2023.12.2
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6603/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6603/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6602
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6602/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6602/comments
https://api.github.com/repos/huggingface/datasets/issues/6602/events
https://github.com/huggingface/datasets/issues/6602
2,089,217,483
I_kwDODunzps58hu3L
6,602
Index error when data is large
{ "avatar_url": "https://avatars.githubusercontent.com/u/35147961?v=4", "events_url": "https://api.github.com/users/ChenchaoZhao/events{/privacy}", "followers_url": "https://api.github.com/users/ChenchaoZhao/followers", "following_url": "https://api.github.com/users/ChenchaoZhao/following{/other_user}", "gists_url": "https://api.github.com/users/ChenchaoZhao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ChenchaoZhao", "id": 35147961, "login": "ChenchaoZhao", "node_id": "MDQ6VXNlcjM1MTQ3OTYx", "organizations_url": "https://api.github.com/users/ChenchaoZhao/orgs", "received_events_url": "https://api.github.com/users/ChenchaoZhao/received_events", "repos_url": "https://api.github.com/users/ChenchaoZhao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ChenchaoZhao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ChenchaoZhao/subscriptions", "type": "User", "url": "https://api.github.com/users/ChenchaoZhao" }
[]
open
false
null
[]
null
0
"2024-01-18T23:00:47"
"2024-01-18T23:00:47"
null
NONE
null
null
null
### Describe the bug At `save_to_disk` step, the `max_shard_size` by default is `500MB`. However, one row of the dataset might be larger than `500MB` then the saving will throw an index error. Without looking at the source code, the bug is due to wrong calculation of number of shards which i think is `total_size / min(max_shard_size, row_size)` which should be `total_size / max(max_shard_size, row_size)` The fix is setting a larger `max_shard_size` ### Steps to reproduce the bug 1. create a dataset with large dense tensors per row 2. set a small `max_shard_size` say 1MB 3. `save_to_disk` ### Expected behavior ``` raise IndexError(f"Index {index} out of range for dataset of size {size}.") IndexError: Index 10 out of range for dataset of size 10. ``` ### Environment info - `datasets` version: 2.16.0 - Platform: Linux-5.10.201-168.748.amzn2int.x86_64-x86_64-with-glibc2.26 - Python version: 3.10.13 - `huggingface_hub` version: 0.20.2 - PyArrow version: 14.0.2 - Pandas version: 2.1.4 - `fsspec` version: 2023.12.2
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6602/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6602/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6601
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6601/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6601/comments
https://api.github.com/repos/huggingface/datasets/issues/6601/events
https://github.com/huggingface/datasets/pull/6601
2,088,624,054
PR_kwDODunzps5kcWN0
6,601
add safety checks when using only part of dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/63422923?v=4", "events_url": "https://api.github.com/users/benseddikismail/events{/privacy}", "followers_url": "https://api.github.com/users/benseddikismail/followers", "following_url": "https://api.github.com/users/benseddikismail/following{/other_user}", "gists_url": "https://api.github.com/users/benseddikismail/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/benseddikismail", "id": 63422923, "login": "benseddikismail", "node_id": "MDQ6VXNlcjYzNDIyOTIz", "organizations_url": "https://api.github.com/users/benseddikismail/orgs", "received_events_url": "https://api.github.com/users/benseddikismail/received_events", "repos_url": "https://api.github.com/users/benseddikismail/repos", "site_admin": false, "starred_url": "https://api.github.com/users/benseddikismail/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/benseddikismail/subscriptions", "type": "User", "url": "https://api.github.com/users/benseddikismail" }
[]
open
false
null
[]
null
0
"2024-01-18T16:16:59"
"2024-01-18T16:16:59"
null
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6601.diff", "html_url": "https://github.com/huggingface/datasets/pull/6601", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/6601.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6601" }
Added some checks to prevent errors that arrise when using evaluate.py on only a portion of the squad 2.0 dataset.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6601/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6601/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6600
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6600/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6600/comments
https://api.github.com/repos/huggingface/datasets/issues/6600/events
https://github.com/huggingface/datasets/issues/6600
2,088,446,385
I_kwDODunzps58eymx
6,600
Loading CSV exported dataset has unexpected format
{ "avatar_url": "https://avatars.githubusercontent.com/u/59572247?v=4", "events_url": "https://api.github.com/users/OrianeN/events{/privacy}", "followers_url": "https://api.github.com/users/OrianeN/followers", "following_url": "https://api.github.com/users/OrianeN/following{/other_user}", "gists_url": "https://api.github.com/users/OrianeN/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/OrianeN", "id": 59572247, "login": "OrianeN", "node_id": "MDQ6VXNlcjU5NTcyMjQ3", "organizations_url": "https://api.github.com/users/OrianeN/orgs", "received_events_url": "https://api.github.com/users/OrianeN/received_events", "repos_url": "https://api.github.com/users/OrianeN/repos", "site_admin": false, "starred_url": "https://api.github.com/users/OrianeN/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/OrianeN/subscriptions", "type": "User", "url": "https://api.github.com/users/OrianeN" }
[]
open
false
null
[]
null
2
"2024-01-18T14:48:27"
"2024-01-23T14:42:32"
null
NONE
null
null
null
### Describe the bug I wanted to be able to save a HF dataset for translations and load it again in another script, but I'm a bit confused with the documentation and the result I've got so I'm opening this issue to ask if this behavior is as expected. ### Steps to reproduce the bug The documentation I've mainly consulted is https://huggingface.co/docs/datasets/v2.16.1/en/package_reference/loading_methods#datasets.load_dataset and https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset (where I've found `.to_csv()`) ```python # Load a dataset of translations test_dataset = load_dataset("opus100", name="en-fr", split="test") # Save with .to_csv() test_csv_path = "try_testset_save.csv" test_dataset.to_csv(test_csv_path) # Load dataset from the CSV loaded_dataset = load_dataset("csv", data_files=test_csv_path) print(test_dataset_fromfile[0]["translation"]) print(test_dataset_fromfile[0]["translation"]["en"]) ``` ``` Creating CSV from Arrow format: 100% 2/2 [00:00<00:00, 47.99ba/s] Downloading data files: 100% 1/1 [00:00<00:00, 65.33it/s] Extracting data files: 100% 1/1 [00:00<00:00, 42.10it/s] Generating train split: 2000/0 [00:00<00:00, 47486.09 examples/s] {'en': "She wasn't going to vaccinate her kid against polio, no way.", 'fr': 'Elle ne vaccinerait pas son enfant contre la polio. Pas question.'} --------------------------------------------------------------------------- TypeError Traceback (most recent call last) Cell In[29], line 11 9 loaded_dataset = load_dataset("csv", data_files=test_csv_path) 10 print(test_dataset_fromfile[0]["translation"]) ---> 11 print(test_dataset_fromfile[0]["translation"]["en"]) TypeError: string indices must be integers, not 'str' ``` ### Expected behavior Each translation was saved as a stringified dict like `"{'en': ""She wasn't going to vaccinate her kid against polio, no way."", 'fr': 'Elle ne vaccinerait pas son enfant contre la polio. Pas question.'}"` where I would have expected 2 columns (1st with English segments, and 2nd with French segments), and I was expecting `load_dataset` to infer the type of feature automatically as I haven't seen anything about it in the documentation. Do you have an example of how to effectively save and load datasets of translations ? ### Environment info - `datasets` version: 2.15.0 - Platform: Linux-3.10.0-1160.36.2.el7.x86_64-x86_64-with-glibc2.17 - Python version: 3.11.5 - `huggingface_hub` version: 0.16.4 - PyArrow version: 14.0.2 - Pandas version: 2.1.4 - `fsspec` version: 2023.10.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6600/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6600/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6599
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6599/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6599/comments
https://api.github.com/repos/huggingface/datasets/issues/6599/events
https://github.com/huggingface/datasets/issues/6599
2,086,684,664
I_kwDODunzps58YEf4
6,599
Easy way to segment into 30s snippets given an m4a file and a vtt file
{ "avatar_url": "https://avatars.githubusercontent.com/u/78278410?v=4", "events_url": "https://api.github.com/users/RonanKMcGovern/events{/privacy}", "followers_url": "https://api.github.com/users/RonanKMcGovern/followers", "following_url": "https://api.github.com/users/RonanKMcGovern/following{/other_user}", "gists_url": "https://api.github.com/users/RonanKMcGovern/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RonanKMcGovern", "id": 78278410, "login": "RonanKMcGovern", "node_id": "MDQ6VXNlcjc4Mjc4NDEw", "organizations_url": "https://api.github.com/users/RonanKMcGovern/orgs", "received_events_url": "https://api.github.com/users/RonanKMcGovern/received_events", "repos_url": "https://api.github.com/users/RonanKMcGovern/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RonanKMcGovern/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RonanKMcGovern/subscriptions", "type": "User", "url": "https://api.github.com/users/RonanKMcGovern" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
2
"2024-01-17T17:51:40"
"2024-01-23T10:42:17"
"2024-01-22T15:35:49"
NONE
null
null
null
### Feature request Uploading datasets is straightforward thanks to the ability to push Audio to hub. However, it would be nice if the data (text and audio) could be segmented when being pushed (if not possible already). ### Motivation It's easy to create a vtt file from an audio file. If there could be auto-segmenting, this would make the creation of datasets much faster. ### Your contribution I have made a custom script to do this but it's not all that clean - uses librosa and pydub.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6599/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6599/timeline
null
not_planned
false
https://api.github.com/repos/huggingface/datasets/issues/6598
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6598/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6598/comments
https://api.github.com/repos/huggingface/datasets/issues/6598/events
https://github.com/huggingface/datasets/issues/6598
2,084,236,605
I_kwDODunzps58Ou09
6,598
Unexpected keyword argument 'hf' when downloading CSV dataset from S3
{ "avatar_url": "https://avatars.githubusercontent.com/u/5592111?v=4", "events_url": "https://api.github.com/users/dguenms/events{/privacy}", "followers_url": "https://api.github.com/users/dguenms/followers", "following_url": "https://api.github.com/users/dguenms/following{/other_user}", "gists_url": "https://api.github.com/users/dguenms/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dguenms", "id": 5592111, "login": "dguenms", "node_id": "MDQ6VXNlcjU1OTIxMTE=", "organizations_url": "https://api.github.com/users/dguenms/orgs", "received_events_url": "https://api.github.com/users/dguenms/received_events", "repos_url": "https://api.github.com/users/dguenms/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dguenms/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dguenms/subscriptions", "type": "User", "url": "https://api.github.com/users/dguenms" }
[]
open
false
null
[]
null
4
"2024-01-16T15:16:01"
"2024-02-05T05:44:44"
null
NONE
null
null
null
### Describe the bug I receive this error message when using `load_dataset` with "csv" path and `dataset_files=s3://...`: ``` TypeError: Session.__init__() got an unexpected keyword argument 'hf' ``` I found a similar issue here: https://stackoverflow.com/questions/77596258/aws-issue-load-dataset-from-s3-fails-with-unexpected-keyword-argument-error-in Full stacktrace: ``` .../site-packages/datasets/load.py:2549: in load_dataset builder_instance.download_and_prepare( .../site-packages/datasets/builder.py:1005: in download_and_prepare self._download_and_prepare( .../site-packages/datasets/builder.py:1078: in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) .../site-packages/datasets/packaged_modules/csv/csv.py:147: in _split_generators data_files = dl_manager.download_and_extract(self.config.data_files) .../site-packages/datasets/download/download_manager.py:562: in download_and_extract return self.extract(self.download(url_or_urls)) .../site-packages/datasets/download/download_manager.py:426: in download downloaded_path_or_paths = map_nested( .../site-packages/datasets/utils/py_utils.py:466: in map_nested mapped = [ .../site-packages/datasets/utils/py_utils.py:467: in <listcomp> _single_map_nested((function, obj, types, None, True, None)) .../site-packages/datasets/utils/py_utils.py:387: in _single_map_nested mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] .../site-packages/datasets/utils/py_utils.py:387: in <listcomp> mapped = [_single_map_nested((function, v, types, None, True, None)) for v in pbar] .../site-packages/datasets/utils/py_utils.py:370: in _single_map_nested return function(data_struct) .../site-packages/datasets/download/download_manager.py:451: in _download out = cached_path(url_or_filename, download_config=download_config) .../site-packages/datasets/utils/file_utils.py:188: in cached_path output_path = get_from_cache( ...1/site-packages/datasets/utils/file_utils.py:511: in get_from_cache response = fsspec_head(url, storage_options=storage_options) .../site-packages/datasets/utils/file_utils.py:316: in fsspec_head fs, _, paths = fsspec.get_fs_token_paths(url, storage_options=storage_options) .../site-packages/fsspec/core.py:622: in get_fs_token_paths fs = filesystem(protocol, **inkwargs) .../site-packages/fsspec/registry.py:290: in filesystem return cls(**storage_options) .../site-packages/fsspec/spec.py:79: in __call__ obj = super().__call__(*args, **kwargs) .../site-packages/s3fs/core.py:187: in __init__ self.s3 = self.connect() _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = <s3fs.core.S3FileSystem object at 0x1500a1310>, refresh = True def connect(self, refresh=True): """ Establish S3 connection object. Parameters ---------- refresh : bool Whether to create new session/client, even if a previous one with the same parameters already exists. If False (default), an existing one will be used if possible """ if refresh is False: # back compat: we store whole FS instance now return self.s3 anon, key, secret, kwargs, ckwargs, token, ssl = ( self.anon, self.key, self.secret, self.kwargs, self.client_kwargs, self.token, self.use_ssl) if not self.passed_in_session: > self.session = botocore.session.Session(**self.kwargs) E TypeError: Session.__init__() got an unexpected keyword argument 'hf' ``` ### Steps to reproduce the bug 1. Assuming a valid CSV file located at `s3://bucket/data.csv` 2. Run the below code: ``` storage_options = { "key": "...", "secret": "...", "client_kwargs": { "endpoint_url": "...", } } load_dataset("csv", data_files="s3://bucket/data.csv", storage_options=storage_options) ``` Encountered in version `2.16.1` but also reproduced in `2.16.0` and `2.15.0`. Note: I encountered this in a unit test using a `moto` mock for S3, however since the error occurs before the session is instantiated, it should not be the issue. ### Expected behavior No exception is raised, the boto3 session is created successfully, and the CSV file is downloaded successfully and returned as a dataset. === After some research I found that `DownloadConfig` has a `__post_init__` method that always forces this value to be set in its `storage_options`, even though in case of an S3 location the storage options get passed on to the S3 Session which does not expect this parameter. I assume this parameter is needed when reading from the huggingface hub and should not be set in this context. Unfortunately there is nothing the user can do to work around it. Even if you manually do something like: ``` download_config = DownloadConfig() del download_config.storage_options["hf"] load_dataset("csv", data_files="s3://bucket/data.csv", download_config=download_config) ``` the library will still reinsert this parameter when `download_config = self.download_config.copy()` in line 418 of `download_manager.py` (`DownloadManager.download`). Therefore `load_dataset` currently cannot be used to read a dataset in CSV format from an S3 location. ### Environment info - `datasets` version: 2.16.1 - Platform: macOS-14.2.1-arm64-arm-64bit - Python version: 3.11.7 - `huggingface_hub` version: 0.20.2 - PyArrow version: 14.0.2 - Pandas version: 2.1.4 - `fsspec` version: 2023.10.0
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6598/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6598/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6597
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6597/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6597/comments
https://api.github.com/repos/huggingface/datasets/issues/6597/events
https://github.com/huggingface/datasets/issues/6597
2,083,708,521
I_kwDODunzps58Mt5p
6,597
Dataset.push_to_hub of a canonical dataset creates an additional dataset under the user namespace
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
6
"2024-01-16T11:27:07"
"2024-02-05T12:29:37"
"2024-02-05T12:29:37"
MEMBER
null
null
null
While using `Dataset.push_to_hub` of a canonical dataset, an additional dataset was created under my user namespace. ## Steps to reproduce the bug The command: ```python commit_info = ds.push_to_hub( "caner", config_name="default", commit_message="Convert dataset to Parquet", commit_description="Convert dataset to Parquet.", create_pr=True, token=token, ) ``` creates the additional dataset `albertvillanova/caner`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6597/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6597/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/6596
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6596/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6596/comments
https://api.github.com/repos/huggingface/datasets/issues/6596/events
https://github.com/huggingface/datasets/pull/6596
2,083,108,156
PR_kwDODunzps5kJceH
6,596
Drop redundant None guard.
{ "avatar_url": "https://avatars.githubusercontent.com/u/5203025?v=4", "events_url": "https://api.github.com/users/xkszltl/events{/privacy}", "followers_url": "https://api.github.com/users/xkszltl/followers", "following_url": "https://api.github.com/users/xkszltl/following{/other_user}", "gists_url": "https://api.github.com/users/xkszltl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/xkszltl", "id": 5203025, "login": "xkszltl", "node_id": "MDQ6VXNlcjUyMDMwMjU=", "organizations_url": "https://api.github.com/users/xkszltl/orgs", "received_events_url": "https://api.github.com/users/xkszltl/received_events", "repos_url": "https://api.github.com/users/xkszltl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/xkszltl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xkszltl/subscriptions", "type": "User", "url": "https://api.github.com/users/xkszltl" }
[]
closed
false
null
[]
null
2
"2024-01-16T06:31:54"
"2024-01-16T17:16:16"
"2024-01-16T17:05:52"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6596.diff", "html_url": "https://github.com/huggingface/datasets/pull/6596", "merged_at": "2024-01-16T17:05:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/6596.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6596" }
`xxx if xxx is not None else None` is no-op.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6596/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6596/timeline
null
null
true
README.md exists but content is empty. Use the Edit dataset card button to edit it.
Downloads last month
41
Edit dataset card