url
stringlengths 58
61
| repository_url
stringclasses 1
value | labels_url
stringlengths 72
75
| comments_url
stringlengths 67
70
| events_url
stringlengths 65
68
| html_url
stringlengths 46
51
| id
int64 599M
1.13B
| node_id
stringlengths 18
32
| number
int64 1
3.71k
| title
stringlengths 1
276
| user
dict | labels
list | state
stringclasses 2
values | locked
bool 1
class | assignee
dict | assignees
list | milestone
dict | comments
int64 0
42
| created_at
timestamp[s] | updated_at
timestamp[s] | closed_at
timestamp[s] | author_association
stringclasses 3
values | active_lock_reason
null | draft
bool 2
classes | pull_request
dict | body
stringlengths 0
228k
⌀ | reactions
dict | timeline_url
stringlengths 67
70
| performed_via_github_app
null |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
https://api.github.com/repos/huggingface/datasets/issues/371 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/371/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/371/comments | https://api.github.com/repos/huggingface/datasets/issues/371/events | https://github.com/huggingface/datasets/pull/371 | 654,668,242 | MDExOlB1bGxSZXF1ZXN0NDQ3MzQ4NDgw | 371 | Fix cached file path for metrics with different config names | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-10T10:02:24 | 2020-07-10T13:45:22 | 2020-07-10T13:45:20 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/371",
"html_url": "https://github.com/huggingface/datasets/pull/371",
"diff_url": "https://github.com/huggingface/datasets/pull/371.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/371.patch",
"merged_at": "2020-07-10T13:45:20"
} | The config name was not taken into account to build the cached file path.
It should fix #368 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/371/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/371/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/370 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/370/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/370/comments | https://api.github.com/repos/huggingface/datasets/issues/370/events | https://github.com/huggingface/datasets/pull/370 | 654,304,193 | MDExOlB1bGxSZXF1ZXN0NDQ3MDU3NTIw | 370 | Allow indexing Dataset via np.ndarray | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-09T19:43:15 | 2020-07-10T14:05:44 | 2020-07-10T14:05:43 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/370",
"html_url": "https://github.com/huggingface/datasets/pull/370",
"diff_url": "https://github.com/huggingface/datasets/pull/370.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/370.patch",
"merged_at": "2020-07-10T14:05:43"
} | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/370/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/370/timeline | null |
|
https://api.github.com/repos/huggingface/datasets/issues/369 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/369/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/369/comments | https://api.github.com/repos/huggingface/datasets/issues/369/events | https://github.com/huggingface/datasets/issues/369 | 654,186,890 | MDU6SXNzdWU2NTQxODY4OTA= | 369 | can't load local dataset: pyarrow.lib.ArrowInvalid: straddling object straddles two block boundaries | {
"login": "vegarab",
"id": 24683907,
"node_id": "MDQ6VXNlcjI0NjgzOTA3",
"avatar_url": "https://avatars.githubusercontent.com/u/24683907?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/vegarab",
"html_url": "https://github.com/vegarab",
"followers_url": "https://api.github.com/users/vegarab/followers",
"following_url": "https://api.github.com/users/vegarab/following{/other_user}",
"gists_url": "https://api.github.com/users/vegarab/gists{/gist_id}",
"starred_url": "https://api.github.com/users/vegarab/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/vegarab/subscriptions",
"organizations_url": "https://api.github.com/users/vegarab/orgs",
"repos_url": "https://api.github.com/users/vegarab/repos",
"events_url": "https://api.github.com/users/vegarab/events{/privacy}",
"received_events_url": "https://api.github.com/users/vegarab/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067388877,
"node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug",
"name": "dataset bug",
"color": "2edb81",
"default": false,
"description": "A bug in a dataset script provided in the library"
}
] | closed | false | null | [] | null | 2 | 2020-07-09T16:16:53 | 2020-12-15T23:07:22 | 2020-07-10T14:52:06 | CONTRIBUTOR | null | null | null | Trying to load a local SQuAD-formatted dataset (from a JSON file, about 60MB):
```
dataset = nlp.load_dataset(path='json', data_files={nlp.Split.TRAIN: ["./path/to/file.json"]})
```
causes
```
Traceback (most recent call last):
File "dataloader.py", line 9, in <module>
["./path/to/file.json"]})
File "/home/XXX/.conda/envs/torch/lib/python3.7/site-packages/nlp/load.py", line 524, in load_dataset
save_infos=save_infos,
File "/home/XXX/.conda/envs/torch/lib/python3.7/site-packages/nlp/builder.py", line 432, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/home/XXX/.conda/envs/torch/lib/python3.7/site-packages/nlp/builder.py", line 483, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/home/XXX/.conda/envs/torch/lib/python3.7/site-packages/nlp/builder.py", line 719, in _prepare_split
for key, table in utils.tqdm(generator, unit=" tables", leave=False):
File "/home/XXX/.conda/envs/torch/lib/python3.7/site-packages/tqdm/std.py", line 1129, in __iter__
for obj in iterable:
File "/home/XXX/.conda/envs/torch/lib/python3.7/site-packages/nlp/datasets/json/88c1bc5c68489f7eda549ed05a5a738527c613b3e7a4ee3524d9d233353a949b/json.py", line 53, in _generate_tables
file, read_options=self.config.pa_read_options, parse_options=self.config.pa_parse_options,
File "pyarrow/_json.pyx", line 191, in pyarrow._json.read_json
File "pyarrow/error.pxi", line 85, in pyarrow.lib.check_status
pyarrow.lib.ArrowInvalid: straddling object straddles two block boundaries (try to increase block size?)
```
I haven't been able to find any reports of this specific pyarrow error here or elsewhere. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/369/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/369/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/368 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/368/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/368/comments | https://api.github.com/repos/huggingface/datasets/issues/368/events | https://github.com/huggingface/datasets/issues/368 | 654,087,251 | MDU6SXNzdWU2NTQwODcyNTE= | 368 | load_metric can't acquire lock anymore | {
"login": "ydshieh",
"id": 2521628,
"node_id": "MDQ6VXNlcjI1MjE2Mjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/2521628?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ydshieh",
"html_url": "https://github.com/ydshieh",
"followers_url": "https://api.github.com/users/ydshieh/followers",
"following_url": "https://api.github.com/users/ydshieh/following{/other_user}",
"gists_url": "https://api.github.com/users/ydshieh/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ydshieh/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ydshieh/subscriptions",
"organizations_url": "https://api.github.com/users/ydshieh/orgs",
"repos_url": "https://api.github.com/users/ydshieh/repos",
"events_url": "https://api.github.com/users/ydshieh/events{/privacy}",
"received_events_url": "https://api.github.com/users/ydshieh/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-09T14:04:09 | 2020-07-10T13:45:20 | 2020-07-10T13:45:20 | NONE | null | null | null | I can't load metric (glue) anymore after an error in a previous run. I even removed the whole cache folder `/home/XXX/.cache/huggingface/`, and the issue persisted. What are the steps to fix this?
Traceback (most recent call last):
File "/home/XXX/miniconda3/envs/ML-DL-py-3.7/lib/python3.7/site-packages/nlp/metric.py", line 101, in __init__
self.filelock.acquire(timeout=1)
File "/home/XXX/miniconda3/envs/ML-DL-py-3.7/lib/python3.7/site-packages/filelock.py", line 278, in acquire
raise Timeout(self._lock_file)
filelock.Timeout: The file lock '/home/XXX/.cache/huggingface/metrics/glue/1.0.0/1-glue-0.arrow.lock' could not be acquired.
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "examples_huggingface_nlp.py", line 268, in <module>
main()
File "examples_huggingface_nlp.py", line 242, in main
dataset, metric = get_dataset_metric(glue_task)
File "examples_huggingface_nlp.py", line 77, in get_dataset_metric
metric = nlp.load_metric('glue', glue_config, experiment_id=1)
File "/home/XXX/miniconda3/envs/ML-DL-py-3.7/lib/python3.7/site-packages/nlp/load.py", line 440, in load_metric
**metric_init_kwargs,
File "/home/XXX/miniconda3/envs/ML-DL-py-3.7/lib/python3.7/site-packages/nlp/metric.py", line 104, in __init__
"Cannot acquire lock, caching file might be used by another process, "
ValueError: Cannot acquire lock, caching file might be used by another process, you should setup a unique 'experiment_id' for this run.
I0709 15:54:41.008838 139854118430464 filelock.py:318] Lock 139852058030936 released on /home/XXX/.cache/huggingface/metrics/glue/1.0.0/1-glue-0.arrow.lock
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/368/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/368/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/367 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/367/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/367/comments | https://api.github.com/repos/huggingface/datasets/issues/367/events | https://github.com/huggingface/datasets/pull/367 | 654,012,984 | MDExOlB1bGxSZXF1ZXN0NDQ2ODIxNTAz | 367 | Update Xtreme to add PAWS-X es | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-09T12:14:37 | 2020-07-09T12:37:11 | 2020-07-09T12:37:10 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/367",
"html_url": "https://github.com/huggingface/datasets/pull/367",
"diff_url": "https://github.com/huggingface/datasets/pull/367.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/367.patch",
"merged_at": "2020-07-09T12:37:10"
} | This PR adds the `PAWS-X.es` in the Xtreme dataset #362 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/367/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/367/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/366 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/366/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/366/comments | https://api.github.com/repos/huggingface/datasets/issues/366/events | https://github.com/huggingface/datasets/pull/366 | 653,954,896 | MDExOlB1bGxSZXF1ZXN0NDQ2NzcyODE2 | 366 | Add quora dataset | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-09T10:34:22 | 2020-07-13T17:35:21 | 2020-07-13T17:35:21 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/366",
"html_url": "https://github.com/huggingface/datasets/pull/366",
"diff_url": "https://github.com/huggingface/datasets/pull/366.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/366.patch",
"merged_at": "2020-07-13T17:35:21"
} | Added the [Quora question pairs dataset](https://www.quora.com/q/quoradata/First-Quora-Dataset-Release-Question-Pairs).
Implementation Notes:
- I used the original version provided on the quora website. There's also a [Kaggle competition](https://www.kaggle.com/c/quora-question-pairs) which has a nice train/test split but I can't find an easy way to download it.
- I've made the questions into a list:
```python
{
"questions": [
{"id":0, "text": "Is this an example question?"},
{"id":1, "text": "Is this a sample question?"},
],
...
}
```
rather than:
```python
{
"question1": "Is this an example question?",
"question2": "Is this a sample question?"
"qid0": 0
"qid1": 1
...
}
```
Not sure if this was the right call.
- Can't find a good citation for this dataset | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/366/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/366/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/365 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/365/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/365/comments | https://api.github.com/repos/huggingface/datasets/issues/365/events | https://github.com/huggingface/datasets/issues/365 | 653,845,964 | MDU6SXNzdWU2NTM4NDU5NjQ= | 365 | How to augment data ? | {
"login": "astariul",
"id": 43774355,
"node_id": "MDQ6VXNlcjQzNzc0MzU1",
"avatar_url": "https://avatars.githubusercontent.com/u/43774355?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/astariul",
"html_url": "https://github.com/astariul",
"followers_url": "https://api.github.com/users/astariul/followers",
"following_url": "https://api.github.com/users/astariul/following{/other_user}",
"gists_url": "https://api.github.com/users/astariul/gists{/gist_id}",
"starred_url": "https://api.github.com/users/astariul/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/astariul/subscriptions",
"organizations_url": "https://api.github.com/users/astariul/orgs",
"repos_url": "https://api.github.com/users/astariul/repos",
"events_url": "https://api.github.com/users/astariul/events{/privacy}",
"received_events_url": "https://api.github.com/users/astariul/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 6 | 2020-07-09T07:52:37 | 2020-07-10T09:12:07 | 2020-07-10T08:22:15 | NONE | null | null | null | Is there any clean way to augment data ?
For now my work-around is to use batched map, like this :
```python
def aug(samples):
# Simply copy the existing data to have x2 amount of data
for k, v in samples.items():
samples[k].extend(v)
return samples
dataset = dataset.map(aug, batched=True)
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/365/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/365/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/364 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/364/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/364/comments | https://api.github.com/repos/huggingface/datasets/issues/364/events | https://github.com/huggingface/datasets/pull/364 | 653,821,597 | MDExOlB1bGxSZXF1ZXN0NDQ2NjY0NzM5 | 364 | add MS MARCO dataset | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 7 | 2020-07-09T07:11:19 | 2020-08-06T06:15:49 | 2020-08-06T06:15:48 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/364",
"html_url": "https://github.com/huggingface/datasets/pull/364",
"diff_url": "https://github.com/huggingface/datasets/pull/364.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/364.patch",
"merged_at": "2020-08-06T06:15:48"
} | This PR adds the MS MARCO dataset as requested in this issue #336. MS mARCO has multiple task including:
- Passage and Document Retrieval
- Keyphrase Extraction
- QA and NLG
This PR only adds the 2 versions of the QA and NLG task dataset which was realeased with the original paper here https://arxiv.org/pdf/1611.09268.pdf
Tests are failing because of the dummy data. I tried to fix it without success. Can you please have a look at it? @patrickvonplaten , @lhoestq | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/364/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/364/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/363 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/363/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/363/comments | https://api.github.com/repos/huggingface/datasets/issues/363/events | https://github.com/huggingface/datasets/pull/363 | 653,821,172 | MDExOlB1bGxSZXF1ZXN0NDQ2NjY0NDIy | 363 | Adding support for generic multi dimensional tensors and auxillary image data for multimodal datasets | {
"login": "eltoto1219",
"id": 14030663,
"node_id": "MDQ6VXNlcjE0MDMwNjYz",
"avatar_url": "https://avatars.githubusercontent.com/u/14030663?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/eltoto1219",
"html_url": "https://github.com/eltoto1219",
"followers_url": "https://api.github.com/users/eltoto1219/followers",
"following_url": "https://api.github.com/users/eltoto1219/following{/other_user}",
"gists_url": "https://api.github.com/users/eltoto1219/gists{/gist_id}",
"starred_url": "https://api.github.com/users/eltoto1219/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/eltoto1219/subscriptions",
"organizations_url": "https://api.github.com/users/eltoto1219/orgs",
"repos_url": "https://api.github.com/users/eltoto1219/repos",
"events_url": "https://api.github.com/users/eltoto1219/events{/privacy}",
"received_events_url": "https://api.github.com/users/eltoto1219/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 23 | 2020-07-09T07:10:30 | 2020-08-24T09:59:35 | 2020-08-24T09:59:35 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/363",
"html_url": "https://github.com/huggingface/datasets/pull/363",
"diff_url": "https://github.com/huggingface/datasets/pull/363.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/363.patch",
"merged_at": "2020-08-24T09:59:35"
} | nlp/features.py:
The main factory class is MultiArray, every single time this class is called, a corresponding pyarrow extension array and type class is generated (and added to the list of globals for future use) for a given root data type and set of dimensions/shape. I provide examples on working with this in datasets/lxmert_pretraining_beta/test_multi_array.py
src/nlp/arrow_writer.py
I had to add a method for writing batches that include extension array types because despite having a unique class for each multidimensional array shape, pyarrow is unable to write any other "array-like" data class to a batch object unless it is of the type pyarrow.ExtensionType. The problem in this is that when writing multiple batches, the order of the schema and data to be written get mixed up (where the pyarrow datatype in the schema only refers to as ExtensionAray, but each ExtensionArray subclass has a different shape) ... possibly I am missing something here and would be grateful if anyone else could take a look!
datasets/lxmert_pretraining_beta/lxmert_pretraining_beta.py & datasets/lxmert_pretraining_beta/to_arrow_data.py:
I have begun adding the data from the original LXMERT paper (https://arxiv.org/abs/1908.07490) hosted here: (https://github.com/airsplay/lxmert). The reason I am not pulling from the source of truth for each individual dataset is because it seems that there will also need to be functionality to aggregate multimodal datasets to create a pre-training corpus (:sleepy: ).
For now, this is just being used to test and run edge-cases for the MultiArray feature, so ive labeled it as "beta_pretraining"!
(still working on the pretraining, just wanted to push out the new functionality sooner than later) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/363/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/363/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/362 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/362/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/362/comments | https://api.github.com/repos/huggingface/datasets/issues/362/events | https://github.com/huggingface/datasets/issues/362 | 653,766,245 | MDU6SXNzdWU2NTM3NjYyNDU= | 362 | [dateset subset missing] xtreme paws-x | {
"login": "jerryIsHere",
"id": 50871412,
"node_id": "MDQ6VXNlcjUwODcxNDEy",
"avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jerryIsHere",
"html_url": "https://github.com/jerryIsHere",
"followers_url": "https://api.github.com/users/jerryIsHere/followers",
"following_url": "https://api.github.com/users/jerryIsHere/following{/other_user}",
"gists_url": "https://api.github.com/users/jerryIsHere/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jerryIsHere/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jerryIsHere/subscriptions",
"organizations_url": "https://api.github.com/users/jerryIsHere/orgs",
"repos_url": "https://api.github.com/users/jerryIsHere/repos",
"events_url": "https://api.github.com/users/jerryIsHere/events{/privacy}",
"received_events_url": "https://api.github.com/users/jerryIsHere/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-09T05:04:54 | 2020-07-09T12:38:42 | 2020-07-09T12:38:42 | CONTRIBUTOR | null | null | null | I tried nlp.load_dataset('xtreme', 'PAWS-X.es') but get the value error
It turns out that the subset for Spanish is missing
https://github.com/google-research-datasets/paws/tree/master/pawsx | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/362/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/362/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/361 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/361/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/361/comments | https://api.github.com/repos/huggingface/datasets/issues/361/events | https://github.com/huggingface/datasets/issues/361 | 653,757,376 | MDU6SXNzdWU2NTM3NTczNzY= | 361 | 🐛 [Metrics] ROUGE is non-deterministic | {
"login": "astariul",
"id": 43774355,
"node_id": "MDQ6VXNlcjQzNzc0MzU1",
"avatar_url": "https://avatars.githubusercontent.com/u/43774355?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/astariul",
"html_url": "https://github.com/astariul",
"followers_url": "https://api.github.com/users/astariul/followers",
"following_url": "https://api.github.com/users/astariul/following{/other_user}",
"gists_url": "https://api.github.com/users/astariul/gists{/gist_id}",
"starred_url": "https://api.github.com/users/astariul/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/astariul/subscriptions",
"organizations_url": "https://api.github.com/users/astariul/orgs",
"repos_url": "https://api.github.com/users/astariul/repos",
"events_url": "https://api.github.com/users/astariul/events{/privacy}",
"received_events_url": "https://api.github.com/users/astariul/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 6 | 2020-07-09T04:39:37 | 2020-07-20T23:48:37 | 2020-07-20T23:48:37 | NONE | null | null | null | If I run the ROUGE metric 2 times, with same predictions / references, the scores are slightly different.
Refer to [this Colab notebook](https://colab.research.google.com/drive/1wRssNXgb9ldcp4ulwj-hMJn0ywhDOiDy?usp=sharing) for reproducing the problem.
Example of F-score for ROUGE-1, ROUGE-2, ROUGE-L in 2 differents run :
> ['0.3350', '0.1470', '0.2329']
['0.3358', '0.1451', '0.2332']
---
Why ROUGE is not deterministic ? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/361/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/361/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/360 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/360/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/360/comments | https://api.github.com/repos/huggingface/datasets/issues/360/events | https://github.com/huggingface/datasets/issues/360 | 653,687,176 | MDU6SXNzdWU2NTM2ODcxNzY= | 360 | [Feature request] Add dataset.ragged_map() function for many-to-many transformations | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-09T01:04:43 | 2020-07-09T19:31:51 | 2020-07-09T19:31:51 | CONTRIBUTOR | null | null | null | `dataset.map()` enables one-to-one transformations. Input one example and output one example. This is helpful for tokenizing and cleaning individual lines.
`dataset.filter()` enables one-to-(one-or-none) transformations. Input one example and output either zero/one example. This is helpful for removing portions from the dataset.
However, some dataset transformations are many-to-many. Consider constructing BERT training examples from a dataset of sentences, where you map `["a", "b", "c"] -> ["a[SEP]b", "a[SEP]c", "b[SEP]c", "c[SEP]b", ...]`
I propose a more general `ragged_map()` method that takes in a batch of examples of length `N` and return a batch of examples `M`. This is different from the `map(batched=True)` method, which takes examples of length `N` and returns a batch of length `N`, processing individual examples in parallel. I don't have a clear vision of how this would be implemented efficiently and lazily, but would love to hear the community's feedback on this.
My specific use case is creating an end-to-end ELECTRA data pipeline. I would like to take the raw WikiText data and generate training examples from this using the `ragged_map()` method, then export to TFRecords and train quickly. This would be a reproducible pipeline with no bash scripts. Currently I'm relying on scripts like https://github.com/google-research/electra/blob/master/build_pretraining_dataset.py, which are less general.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/360/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/360/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/359 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/359/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/359/comments | https://api.github.com/repos/huggingface/datasets/issues/359/events | https://github.com/huggingface/datasets/issues/359 | 653,656,279 | MDU6SXNzdWU2NTM2NTYyNzk= | 359 | ArrowBasedBuilder _prepare_split parse_schema breaks on nested structures | {
"login": "timothyjlaurent",
"id": 2000204,
"node_id": "MDQ6VXNlcjIwMDAyMDQ=",
"avatar_url": "https://avatars.githubusercontent.com/u/2000204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/timothyjlaurent",
"html_url": "https://github.com/timothyjlaurent",
"followers_url": "https://api.github.com/users/timothyjlaurent/followers",
"following_url": "https://api.github.com/users/timothyjlaurent/following{/other_user}",
"gists_url": "https://api.github.com/users/timothyjlaurent/gists{/gist_id}",
"starred_url": "https://api.github.com/users/timothyjlaurent/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/timothyjlaurent/subscriptions",
"organizations_url": "https://api.github.com/users/timothyjlaurent/orgs",
"repos_url": "https://api.github.com/users/timothyjlaurent/repos",
"events_url": "https://api.github.com/users/timothyjlaurent/events{/privacy}",
"received_events_url": "https://api.github.com/users/timothyjlaurent/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 4 | 2020-07-08T23:24:05 | 2020-07-10T14:52:06 | 2020-07-10T14:52:06 | NONE | null | null | null | I tried using the Json dataloader to load some JSON lines files. but get an exception in the parse_schema function.
```
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-23-9aecfbee53bd> in <module>
55 from nlp import load_dataset
56
---> 57 ds = load_dataset("../text2struct/model/dataset_builder.py", data_files=rel_datafiles)
58
59
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/load.py in load_dataset(path, name, version, data_dir, data_files, split, cache_dir, download_config, download_mode, ignore_verifications, save_infos, **config_kwargs)
522 download_mode=download_mode,
523 ignore_verifications=ignore_verifications,
--> 524 save_infos=save_infos,
525 )
526
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, save_infos, try_from_hf_gcs, dl_manager, **download_and_prepare_kwargs)
430 verify_infos = not save_infos and not ignore_verifications
431 self._download_and_prepare(
--> 432 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
433 )
434 # Sync info
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)
481 try:
482 # Prepare split will record examples associated to the split
--> 483 self._prepare_split(split_generator, **prepare_split_kwargs)
484 except OSError:
485 raise OSError("Cannot find data file. " + (self.manual_download_instructions or ""))
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/builder.py in _prepare_split(self, split_generator)
736 schema_dict[field.name] = Value(str(field.type))
737
--> 738 parse_schema(writer.schema, features)
739 self.info.features = Features(features)
740
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/builder.py in parse_schema(schema, schema_dict)
734 parse_schema(field.type.value_type, schema_dict[field.name])
735 else:
--> 736 schema_dict[field.name] = Value(str(field.type))
737
738 parse_schema(writer.schema, features)
<string> in __init__(self, dtype, id, _type)
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/features.py in __post_init__(self)
55
56 def __post_init__(self):
---> 57 self.pa_type = string_to_arrow(self.dtype)
58
59 def __call__(self):
~/.virtualenvs/inv-text2struct/lib/python3.6/site-packages/nlp/features.py in string_to_arrow(type_str)
32 if str(type_str + "_") not in pa.__dict__:
33 raise ValueError(
---> 34 f"Neither {type_str} nor {type_str + '_'} seems to be a pyarrow data type. "
35 f"Please make sure to use a correct data type, see: "
36 f"https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions"
ValueError: Neither list<item: string> nor list<item: string>_ seems to be a pyarrow data type. Please make sure to use a correct data type, see: https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions
```
If I create the dataset imperatively, using a pyarrow table, the dataset is created correctly. If I override the `_prepare_split` method to avoid calling the validate schema, the dataset can load as well. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/359/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/359/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/358 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/358/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/358/comments | https://api.github.com/repos/huggingface/datasets/issues/358/events | https://github.com/huggingface/datasets/pull/358 | 653,645,121 | MDExOlB1bGxSZXF1ZXN0NDQ2NTI0NjQ5 | 358 | Starting to add some real doc | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-08T22:53:03 | 2020-07-14T09:58:17 | 2020-07-14T09:58:15 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/358",
"html_url": "https://github.com/huggingface/datasets/pull/358",
"diff_url": "https://github.com/huggingface/datasets/pull/358.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/358.patch",
"merged_at": "2020-07-14T09:58:15"
} | Adding a lot of documentation for:
- load a dataset
- explore the dataset object
- process data with the dataset
- add a new dataset script
- share a dataset script
- full package reference
This version of the doc can be explored here: https://2219-250213286-gh.circle-artifacts.com/0/docs/_build/html/index.html
Also:
- fix a bug in `train_test_split`
- update the `csv` script
- add a verbose argument to the dataset processing methods
Still missing:
- doc for the metrics
- how to directly upload a community provided dataset with the CLI
- clean up more docstrings
- add the `features` argument to `load_dataset` (should be another PR) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/358/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/358/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/357 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/357/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/357/comments | https://api.github.com/repos/huggingface/datasets/issues/357/events | https://github.com/huggingface/datasets/pull/357 | 653,642,292 | MDExOlB1bGxSZXF1ZXN0NDQ2NTIyMzU2 | 357 | Add hashes to cnn_dailymail | {
"login": "jbragg",
"id": 2238344,
"node_id": "MDQ6VXNlcjIyMzgzNDQ=",
"avatar_url": "https://avatars.githubusercontent.com/u/2238344?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jbragg",
"html_url": "https://github.com/jbragg",
"followers_url": "https://api.github.com/users/jbragg/followers",
"following_url": "https://api.github.com/users/jbragg/following{/other_user}",
"gists_url": "https://api.github.com/users/jbragg/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jbragg/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jbragg/subscriptions",
"organizations_url": "https://api.github.com/users/jbragg/orgs",
"repos_url": "https://api.github.com/users/jbragg/repos",
"events_url": "https://api.github.com/users/jbragg/events{/privacy}",
"received_events_url": "https://api.github.com/users/jbragg/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-08T22:45:21 | 2020-07-13T14:16:38 | 2020-07-13T14:16:38 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/357",
"html_url": "https://github.com/huggingface/datasets/pull/357",
"diff_url": "https://github.com/huggingface/datasets/pull/357.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/357.patch",
"merged_at": "2020-07-13T14:16:38"
} | The URL hashes are helpful for comparing results from other sources. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/357/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/357/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/356 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/356/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/356/comments | https://api.github.com/repos/huggingface/datasets/issues/356/events | https://github.com/huggingface/datasets/pull/356 | 653,537,388 | MDExOlB1bGxSZXF1ZXN0NDQ2NDM3MDQ5 | 356 | Add text dataset | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-08T19:21:53 | 2020-07-10T14:19:03 | 2020-07-10T14:19:03 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/356",
"html_url": "https://github.com/huggingface/datasets/pull/356",
"diff_url": "https://github.com/huggingface/datasets/pull/356.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/356.patch",
"merged_at": "2020-07-10T14:19:03"
} | Usage:
```python
from nlp import load_dataset
dset = load_dataset("text", data_files="/path/to/file.txt")["train"]
```
I created a dummy_data.zip which contains three files: `train.txt`, `test.txt`, `dev.txt`. Each of these contains two lines. It passes
```bash
RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_text
```
but I would like a second set of eyes to ensure I did it right.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/356/reactions",
"total_count": 6,
"+1": 2,
"-1": 0,
"laugh": 0,
"hooray": 3,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/356/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/355 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/355/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/355/comments | https://api.github.com/repos/huggingface/datasets/issues/355/events | https://github.com/huggingface/datasets/issues/355 | 653,451,013 | MDU6SXNzdWU2NTM0NTEwMTM= | 355 | can't load SNLI dataset | {
"login": "jxmorris12",
"id": 13238952,
"node_id": "MDQ6VXNlcjEzMjM4OTUy",
"avatar_url": "https://avatars.githubusercontent.com/u/13238952?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jxmorris12",
"html_url": "https://github.com/jxmorris12",
"followers_url": "https://api.github.com/users/jxmorris12/followers",
"following_url": "https://api.github.com/users/jxmorris12/following{/other_user}",
"gists_url": "https://api.github.com/users/jxmorris12/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jxmorris12/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jxmorris12/subscriptions",
"organizations_url": "https://api.github.com/users/jxmorris12/orgs",
"repos_url": "https://api.github.com/users/jxmorris12/repos",
"events_url": "https://api.github.com/users/jxmorris12/events{/privacy}",
"received_events_url": "https://api.github.com/users/jxmorris12/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2020-07-08T16:54:14 | 2020-07-18T05:15:57 | 2020-07-15T07:59:01 | CONTRIBUTOR | null | null | null | `nlp` seems to load `snli` from some URL based on nlp.stanford.edu. This subdomain is frequently down -- including right now, when I'd like to load `snli` in a Colab notebook, but can't.
Is there a plan to move these datasets to huggingface servers for a more stable solution?
Btw, here's the stack trace:
```
File "/content/nlp/src/nlp/builder.py", line 432, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/content/nlp/src/nlp/builder.py", line 466, in _download_and_prepare
split_generators = self._split_generators(dl_manager, **split_generators_kwargs)
File "/content/nlp/src/nlp/datasets/snli/e417f6f2e16254938d977a17ed32f3998f5b23e4fcab0f6eb1d28784f23ea60d/snli.py", line 76, in _split_generators
dl_dir = dl_manager.download_and_extract(_DATA_URL)
File "/content/nlp/src/nlp/utils/download_manager.py", line 217, in download_and_extract
return self.extract(self.download(url_or_urls))
File "/content/nlp/src/nlp/utils/download_manager.py", line 156, in download
lambda url: cached_path(url, download_config=self._download_config,), url_or_urls,
File "/content/nlp/src/nlp/utils/py_utils.py", line 190, in map_nested
return function(data_struct)
File "/content/nlp/src/nlp/utils/download_manager.py", line 156, in <lambda>
lambda url: cached_path(url, download_config=self._download_config,), url_or_urls,
File "/content/nlp/src/nlp/utils/file_utils.py", line 198, in cached_path
local_files_only=download_config.local_files_only,
File "/content/nlp/src/nlp/utils/file_utils.py", line 356, in get_from_cache
raise ConnectionError("Couldn't reach {}".format(url))
ConnectionError: Couldn't reach https://nlp.stanford.edu/projects/snli/snli_1.0.zip
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/355/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/355/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/354 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/354/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/354/comments | https://api.github.com/repos/huggingface/datasets/issues/354/events | https://github.com/huggingface/datasets/pull/354 | 653,357,617 | MDExOlB1bGxSZXF1ZXN0NDQ2MjkyMTc4 | 354 | More faiss control | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-08T14:45:20 | 2020-07-09T09:54:54 | 2020-07-09T09:54:51 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/354",
"html_url": "https://github.com/huggingface/datasets/pull/354",
"diff_url": "https://github.com/huggingface/datasets/pull/354.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/354.patch",
"merged_at": "2020-07-09T09:54:51"
} | Allow users to specify a faiss index they created themselves, as sometimes indexes can be composite for examples | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/354/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/354/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/353 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/353/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/353/comments | https://api.github.com/repos/huggingface/datasets/issues/353/events | https://github.com/huggingface/datasets/issues/353 | 653,250,611 | MDU6SXNzdWU2NTMyNTA2MTE= | 353 | [Dataset requests] New datasets for Text Classification | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892884,
"node_id": "MDU6TGFiZWwxOTM1ODkyODg0",
"url": "https://api.github.com/repos/huggingface/datasets/labels/help%20wanted",
"name": "help wanted",
"color": "008672",
"default": true,
"description": "Extra attention is needed"
},
{
"id": 2067376369,
"node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request",
"name": "dataset request",
"color": "e99695",
"default": false,
"description": "Requesting to add a new dataset"
}
] | open | false | null | [] | null | 8 | 2020-07-08T12:17:58 | 2020-10-20T03:41:23 | null | MEMBER | null | null | null | We are missing a few datasets for Text Classification which is an important field.
Namely, it would be really nice to add:
- TREC-6 dataset (see here for instance: https://pytorchnlp.readthedocs.io/en/latest/source/torchnlp.datasets.html#torchnlp.datasets.trec_dataset) **[done]**
- Yelp-5
- Movie review (Movie Review (MR) dataset [156]) **[done (same as rotten_tomatoes)]**
- SST (Stanford Sentiment Treebank) **[include in glue]**
- Multi-Perspective Question Answering (MPQA) dataset **[require authentication (indeed manual download)]**
- Amazon. This is a popular corpus of product reviews collected from the Amazon website [159]. It contains labels for both binary classification and multi-class (5-class) classification
- 20 Newsgroups. The 20 Newsgroups dataset **[done]**
- Sogou News dataset **[done]**
- Reuters news. The Reuters-21578 dataset [165] **[done]**
- DBpedia. The DBpedia dataset [170]
- Ohsumed. The Ohsumed collection [171] is a subset of the MEDLINE database
- EUR-Lex. The EUR-Lex dataset
- WOS. The Web Of Science (WOS) dataset **[done]**
- PubMed. PubMed [173]
- TREC-QA. TREC-QA
- Quora. The Quora dataset [180]
All these datasets are cited in https://arxiv.org/abs/2004.03705 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/353/reactions",
"total_count": 6,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 3,
"rocket": 3,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/353/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/352 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/352/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/352/comments | https://api.github.com/repos/huggingface/datasets/issues/352/events | https://github.com/huggingface/datasets/pull/352 | 653,128,883 | MDExOlB1bGxSZXF1ZXN0NDQ2MTA1Mjky | 352 | 🐛[BugFix]fix seqeval | {
"login": "AlongWY",
"id": 20281571,
"node_id": "MDQ6VXNlcjIwMjgxNTcx",
"avatar_url": "https://avatars.githubusercontent.com/u/20281571?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/AlongWY",
"html_url": "https://github.com/AlongWY",
"followers_url": "https://api.github.com/users/AlongWY/followers",
"following_url": "https://api.github.com/users/AlongWY/following{/other_user}",
"gists_url": "https://api.github.com/users/AlongWY/gists{/gist_id}",
"starred_url": "https://api.github.com/users/AlongWY/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/AlongWY/subscriptions",
"organizations_url": "https://api.github.com/users/AlongWY/orgs",
"repos_url": "https://api.github.com/users/AlongWY/repos",
"events_url": "https://api.github.com/users/AlongWY/events{/privacy}",
"received_events_url": "https://api.github.com/users/AlongWY/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 7 | 2020-07-08T09:12:12 | 2020-07-16T08:26:46 | 2020-07-16T08:26:46 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/352",
"html_url": "https://github.com/huggingface/datasets/pull/352",
"diff_url": "https://github.com/huggingface/datasets/pull/352.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/352.patch",
"merged_at": "2020-07-16T08:26:46"
} | Fix seqeval process labels such as 'B', 'B-ARGM-LOC' | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/352/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/352/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/351 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/351/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/351/comments | https://api.github.com/repos/huggingface/datasets/issues/351/events | https://github.com/huggingface/datasets/pull/351 | 652,424,048 | MDExOlB1bGxSZXF1ZXN0NDQ1NDk0NTE4 | 351 | add pandas dataset | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-07T15:38:07 | 2020-07-08T14:15:16 | 2020-07-08T14:15:15 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/351",
"html_url": "https://github.com/huggingface/datasets/pull/351",
"diff_url": "https://github.com/huggingface/datasets/pull/351.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/351.patch",
"merged_at": "2020-07-08T14:15:15"
} | Create a dataset from serialized pandas dataframes.
Usage:
```python
from nlp import load_dataset
dset = load_dataset("pandas", data_files="df.pkl")["train"]
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/351/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 1,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/351/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/350 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/350/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/350/comments | https://api.github.com/repos/huggingface/datasets/issues/350/events | https://github.com/huggingface/datasets/pull/350 | 652,398,691 | MDExOlB1bGxSZXF1ZXN0NDQ1NDczODYz | 350 | add from_pandas and from_dict | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-07T15:03:53 | 2020-07-08T14:14:33 | 2020-07-08T14:14:32 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/350",
"html_url": "https://github.com/huggingface/datasets/pull/350",
"diff_url": "https://github.com/huggingface/datasets/pull/350.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/350.patch",
"merged_at": "2020-07-08T14:14:32"
} | I added two new methods to the `Dataset` class:
- `from_pandas()` to create a dataset from a pandas dataframe
- `from_dict()` to create a dataset from a dictionary (keys = columns)
It uses the `pa.Table.from_pandas` and `pa.Table.from_pydict` funcitons to do so.
It is also possible to specify the features types via `features=...` if there are ambiguities (null/nan values), otherwise the arrow schema is infered from the data automatically by pyarrow.
One question that I have right now:
+ Should we also add a `save()` method that would write the dataset on the disk ? Right now if we create a `Dataset` using those two new methods, the data are kept in RAM. Then to reload it we can call the `from_file()` method. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/350/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/350/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/349 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/349/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/349/comments | https://api.github.com/repos/huggingface/datasets/issues/349/events | https://github.com/huggingface/datasets/pull/349 | 652,231,571 | MDExOlB1bGxSZXF1ZXN0NDQ1MzQwMTQ1 | 349 | Hyperpartisan news detection | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-07T11:06:37 | 2020-07-07T20:47:27 | 2020-07-07T14:57:11 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/349",
"html_url": "https://github.com/huggingface/datasets/pull/349",
"diff_url": "https://github.com/huggingface/datasets/pull/349.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/349.patch",
"merged_at": "2020-07-07T14:57:11"
} | Adding the hyperpartisan news detection dataset from PAN. This contains news article text, labelled with whether they're hyper-partisan and why kinds of biases they display.
Implementation notes:
- As with many PAN tasks, the data is hosted on [Zenodo](https://zenodo.org/record/1489920) and must be requested before use. I've used the manual download stuff for this, although the dataset is provided under a Creative Commons Attribution 4.0 International License, so we could host a version if we wanted to?
- The 'bias' attribute doesn't exist for the 'byarticle' configuration. I've added an empty string to the class labels to deal with this. Is there a more standard value for empty data?
- Should we always subclass `nlp.BuilderConfig`?
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/349/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/349/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/348 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/348/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/348/comments | https://api.github.com/repos/huggingface/datasets/issues/348/events | https://github.com/huggingface/datasets/pull/348 | 652,158,308 | MDExOlB1bGxSZXF1ZXN0NDQ1MjgwNjk3 | 348 | Add OSCAR dataset | {
"login": "pjox",
"id": 635220,
"node_id": "MDQ6VXNlcjYzNTIyMA==",
"avatar_url": "https://avatars.githubusercontent.com/u/635220?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/pjox",
"html_url": "https://github.com/pjox",
"followers_url": "https://api.github.com/users/pjox/followers",
"following_url": "https://api.github.com/users/pjox/following{/other_user}",
"gists_url": "https://api.github.com/users/pjox/gists{/gist_id}",
"starred_url": "https://api.github.com/users/pjox/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/pjox/subscriptions",
"organizations_url": "https://api.github.com/users/pjox/orgs",
"repos_url": "https://api.github.com/users/pjox/repos",
"events_url": "https://api.github.com/users/pjox/events{/privacy}",
"received_events_url": "https://api.github.com/users/pjox/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 20 | 2020-07-07T09:22:07 | 2021-05-03T22:07:08 | 2021-02-09T10:19:19 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/348",
"html_url": "https://github.com/huggingface/datasets/pull/348",
"diff_url": "https://github.com/huggingface/datasets/pull/348.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/348.patch",
"merged_at": null
} | I don't know if tests pass, when I run them it tries to download the whole corpus which is around 3.5TB compressed and I don't have that kind of space. I'll really need some help with it 😅
Thanks! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/348/reactions",
"total_count": 4,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 4,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/348/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/347 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/347/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/347/comments | https://api.github.com/repos/huggingface/datasets/issues/347/events | https://github.com/huggingface/datasets/issues/347 | 652,106,567 | MDU6SXNzdWU2NTIxMDY1Njc= | 347 | 'cp950' codec error from load_dataset('xtreme', 'tydiqa') | {
"login": "jerryIsHere",
"id": 50871412,
"node_id": "MDQ6VXNlcjUwODcxNDEy",
"avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jerryIsHere",
"html_url": "https://github.com/jerryIsHere",
"followers_url": "https://api.github.com/users/jerryIsHere/followers",
"following_url": "https://api.github.com/users/jerryIsHere/following{/other_user}",
"gists_url": "https://api.github.com/users/jerryIsHere/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jerryIsHere/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jerryIsHere/subscriptions",
"organizations_url": "https://api.github.com/users/jerryIsHere/orgs",
"repos_url": "https://api.github.com/users/jerryIsHere/repos",
"events_url": "https://api.github.com/users/jerryIsHere/events{/privacy}",
"received_events_url": "https://api.github.com/users/jerryIsHere/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067388877,
"node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug",
"name": "dataset bug",
"color": "2edb81",
"default": false,
"description": "A bug in a dataset script provided in the library"
}
] | closed | false | null | [] | null | 10 | 2020-07-07T08:14:23 | 2020-09-07T14:51:45 | 2020-09-07T14:51:45 | CONTRIBUTOR | null | null | null | ![image](https://user-images.githubusercontent.com/50871412/86744744-67481680-c06c-11ea-8612-b77eba92a392.png)
I guess the error is related to python source encoding issue that my PC is trying to decode the source code with wrong encoding-decoding tools, perhaps :
https://www.python.org/dev/peps/pep-0263/
I guess the error was triggered by the code " module = importlib.import_module(module_path)" at line 57 in the source code: nlp/src/nlp/load.py / (https://github.com/huggingface/nlp/blob/911d5596f9b500e39af8642fe3d1b891758999c7/src/nlp/load.py#L51)
Any ideas?
p.s. tried the same code on colab, that runs perfectly
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/347/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/347/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/346 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/346/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/346/comments | https://api.github.com/repos/huggingface/datasets/issues/346/events | https://github.com/huggingface/datasets/pull/346 | 652,044,151 | MDExOlB1bGxSZXF1ZXN0NDQ1MTg4MTUz | 346 | Add emotion dataset | {
"login": "lewtun",
"id": 26859204,
"node_id": "MDQ6VXNlcjI2ODU5MjA0",
"avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lewtun",
"html_url": "https://github.com/lewtun",
"followers_url": "https://api.github.com/users/lewtun/followers",
"following_url": "https://api.github.com/users/lewtun/following{/other_user}",
"gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lewtun/subscriptions",
"organizations_url": "https://api.github.com/users/lewtun/orgs",
"repos_url": "https://api.github.com/users/lewtun/repos",
"events_url": "https://api.github.com/users/lewtun/events{/privacy}",
"received_events_url": "https://api.github.com/users/lewtun/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 6 | 2020-07-07T06:35:41 | 2021-04-23T07:13:43 | 2020-07-13T14:39:38 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/346",
"html_url": "https://github.com/huggingface/datasets/pull/346",
"diff_url": "https://github.com/huggingface/datasets/pull/346.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/346.patch",
"merged_at": "2020-07-13T14:39:38"
} | Hello 🤗 team!
I am trying to add an emotion classification dataset ([link](https://github.com/dair-ai/emotion_dataset)) to `nlp` but I am a bit stuck about what I should do when the URL for the dataset is not a ZIP file, but just a pickled `pandas.DataFrame` (see [here](https://www.dropbox.com/s/607ptdakxuh5i4s/merged_training.pkl)).
With the current implementation, running
```bash
python nlp-cli test datasets/emotion --save_infos --all_configs
```
throws a `_pickle.UnpicklingError: invalid load key, '<'.` error (full stack trace below). The strange thing is that the path to the file does not carry the `.pkl` extension and instead appears to be some md5 hash (see the `FILE PATH` print statement in the stack trace).
Note: I have checked that the `merged_training.pkl` file is not corrupted when I download it with `wget`.
Any pointers on what I'm doing wrong would be greatly appreciated!
**Stack trace**
```
INFO:nlp.load:Checking datasets/emotion/emotion.py for additional imports.
INFO:filelock:Lock 140330435928512 acquired on datasets/emotion/emotion.py.lock
INFO:nlp.load:Found main folder for dataset datasets/emotion/emotion.py at /Users/lewtun/git/nlp/src/nlp/datasets/emotion
INFO:nlp.load:Creating specific version folder for dataset datasets/emotion/emotion.py at /Users/lewtun/git/nlp/src/nlp/datasets/emotion/59666994754d1b369228a749b695e377643d141fa98c6972be00407659788c7b
INFO:nlp.load:Copying script file from datasets/emotion/emotion.py to /Users/lewtun/git/nlp/src/nlp/datasets/emotion/59666994754d1b369228a749b695e377643d141fa98c6972be00407659788c7b/emotion.py
INFO:nlp.load:Couldn't find dataset infos file at datasets/emotion/dataset_infos.json
INFO:nlp.load:Creating metadata file for dataset datasets/emotion/emotion.py at /Users/lewtun/git/nlp/src/nlp/datasets/emotion/59666994754d1b369228a749b695e377643d141fa98c6972be00407659788c7b/emotion.json
INFO:filelock:Lock 140330435928512 released on datasets/emotion/emotion.py.lock
INFO:nlp.builder:Generating dataset emotion (/Users/lewtun/.cache/huggingface/datasets/emotion/emotion/1.0.0)
INFO:nlp.builder:Dataset not on Hf google storage. Downloading and preparing it from source
Downloading and preparing dataset emotion/emotion (download: Unknown size, generated: Unknown size, total: Unknown size) to /Users/lewtun/.cache/huggingface/datasets/emotion/emotion/1.0.0...
INFO:nlp.builder:Generating split train
0 examples [00:00, ? examples/s]FILE PATH /Users/lewtun/.cache/huggingface/datasets/3615dcb52b7ba052ef63e1571894c4b67e8e12a6ab1ef2f756ec3c380bf48490
Traceback (most recent call last):
File "nlp-cli", line 37, in <module>
service.run()
File "/Users/lewtun/git/nlp/src/nlp/commands/test.py", line 83, in run
builder.download_and_prepare(
File "/Users/lewtun/git/nlp/src/nlp/builder.py", line 431, in download_and_prepare
self._download_and_prepare(
File "/Users/lewtun/git/nlp/src/nlp/builder.py", line 483, in _download_and_prepare
self._prepare_split(split_generator, **prepare_split_kwargs)
File "/Users/lewtun/git/nlp/src/nlp/builder.py", line 664, in _prepare_split
for key, record in utils.tqdm(generator, unit=" examples", total=split_info.num_examples, leave=False):
File "/Users/lewtun/miniconda3/envs/nlp/lib/python3.8/site-packages/tqdm/std.py", line 1129, in __iter__
for obj in iterable:
File "/Users/lewtun/git/nlp/src/nlp/datasets/emotion/59666994754d1b369228a749b695e377643d141fa98c6972be00407659788c7b/emotion.py", line 87, in _generate_examples
data = pickle.load(f)
_pickle.UnpicklingError: invalid load key, '<'.
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/346/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/346/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/345 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/345/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/345/comments | https://api.github.com/repos/huggingface/datasets/issues/345/events | https://github.com/huggingface/datasets/issues/345 | 651,761,201 | MDU6SXNzdWU2NTE3NjEyMDE= | 345 | Supporting documents in ELI5 | {
"login": "saverymax",
"id": 29262273,
"node_id": "MDQ6VXNlcjI5MjYyMjcz",
"avatar_url": "https://avatars.githubusercontent.com/u/29262273?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/saverymax",
"html_url": "https://github.com/saverymax",
"followers_url": "https://api.github.com/users/saverymax/followers",
"following_url": "https://api.github.com/users/saverymax/following{/other_user}",
"gists_url": "https://api.github.com/users/saverymax/gists{/gist_id}",
"starred_url": "https://api.github.com/users/saverymax/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/saverymax/subscriptions",
"organizations_url": "https://api.github.com/users/saverymax/orgs",
"repos_url": "https://api.github.com/users/saverymax/repos",
"events_url": "https://api.github.com/users/saverymax/events{/privacy}",
"received_events_url": "https://api.github.com/users/saverymax/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-06T19:14:13 | 2020-10-27T15:38:45 | 2020-10-27T15:38:45 | NONE | null | null | null | I was attempting to use the ELI5 dataset, when I realized that huggingface does not provide the supporting documents (the source documents from the common crawl). Without the supporting documents, this makes the dataset about as useful for my project as a block of cheese, or some other more apt metaphor. According to facebook, the entire document collection is quite large. However, it would still be helpful to at least include a subset of the supporting documents i.e., having some data is better than having a block of cheese, in my case at least.
If you choose not to include them, it would be helpful to have documentation mentioning this specifically. It is especially confusing because the hf nlp ELI5 dataset has the key `'document'` but there are no documents to be found :( | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/345/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/345/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/344 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/344/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/344/comments | https://api.github.com/repos/huggingface/datasets/issues/344/events | https://github.com/huggingface/datasets/pull/344 | 651,495,246 | MDExOlB1bGxSZXF1ZXN0NDQ0NzQwMTIw | 344 | Search qa | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-06T12:23:16 | 2020-07-16T08:58:16 | 2020-07-16T08:58:16 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/344",
"html_url": "https://github.com/huggingface/datasets/pull/344",
"diff_url": "https://github.com/huggingface/datasets/pull/344.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/344.patch",
"merged_at": null
} | This PR adds the Search QA dataset used in **SearchQA: A New Q&A Dataset Augmented with Context from a Search Engine**. The dataset has the following config name:
- raw_jeopardy: raw data
- train_test_val: which is the splitted version
#336 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/344/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/344/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/343 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/343/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/343/comments | https://api.github.com/repos/huggingface/datasets/issues/343/events | https://github.com/huggingface/datasets/pull/343 | 651,419,630 | MDExOlB1bGxSZXF1ZXN0NDQ0Njc4NDEw | 343 | Fix nested tensorflow format | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-06T10:13:45 | 2020-07-06T13:11:52 | 2020-07-06T13:11:51 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/343",
"html_url": "https://github.com/huggingface/datasets/pull/343",
"diff_url": "https://github.com/huggingface/datasets/pull/343.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/343.patch",
"merged_at": "2020-07-06T13:11:51"
} | In #339 and #337 we are thinking about adding a way to export datasets to tfrecords.
However I noticed that it was not possible to do `dset.set_format("tensorflow")` on datasets with nested features like `squad`. I fixed that using a nested map operations to convert features to `tf.ragged.constant`.
I also added tests on the `set_format` function. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/343/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/343/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/342 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/342/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/342/comments | https://api.github.com/repos/huggingface/datasets/issues/342/events | https://github.com/huggingface/datasets/issues/342 | 651,333,194 | MDU6SXNzdWU2NTEzMzMxOTQ= | 342 | Features should be updated when `map()` changes schema | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-06T08:03:23 | 2020-07-23T10:15:16 | 2020-07-23T10:15:16 | MEMBER | null | null | null | `dataset.map()` can change the schema and column names.
We should update the features in this case (with what is possible to infer). | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/342/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/342/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/341 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/341/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/341/comments | https://api.github.com/repos/huggingface/datasets/issues/341/events | https://github.com/huggingface/datasets/pull/341 | 650,611,969 | MDExOlB1bGxSZXF1ZXN0NDQ0MDcwMjEx | 341 | add fever dataset | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-03T13:53:07 | 2020-07-06T13:03:48 | 2020-07-06T13:03:47 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/341",
"html_url": "https://github.com/huggingface/datasets/pull/341",
"diff_url": "https://github.com/huggingface/datasets/pull/341.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/341.patch",
"merged_at": "2020-07-06T13:03:47"
} | This PR add the FEVER dataset https://fever.ai/ used in with the paper: FEVER: a large-scale dataset for Fact Extraction and VERification (https://arxiv.org/pdf/1803.05355.pdf).
#336 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/341/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/341/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/340 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/340/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/340/comments | https://api.github.com/repos/huggingface/datasets/issues/340/events | https://github.com/huggingface/datasets/pull/340 | 650,533,920 | MDExOlB1bGxSZXF1ZXN0NDQ0MDA2Nzcy | 340 | Update cfq.py | {
"login": "brainshawn",
"id": 4437290,
"node_id": "MDQ6VXNlcjQ0MzcyOTA=",
"avatar_url": "https://avatars.githubusercontent.com/u/4437290?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/brainshawn",
"html_url": "https://github.com/brainshawn",
"followers_url": "https://api.github.com/users/brainshawn/followers",
"following_url": "https://api.github.com/users/brainshawn/following{/other_user}",
"gists_url": "https://api.github.com/users/brainshawn/gists{/gist_id}",
"starred_url": "https://api.github.com/users/brainshawn/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/brainshawn/subscriptions",
"organizations_url": "https://api.github.com/users/brainshawn/orgs",
"repos_url": "https://api.github.com/users/brainshawn/repos",
"events_url": "https://api.github.com/users/brainshawn/events{/privacy}",
"received_events_url": "https://api.github.com/users/brainshawn/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-03T11:23:19 | 2020-07-03T12:33:50 | 2020-07-03T12:33:50 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/340",
"html_url": "https://github.com/huggingface/datasets/pull/340",
"diff_url": "https://github.com/huggingface/datasets/pull/340.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/340.patch",
"merged_at": "2020-07-03T12:33:50"
} | Make the dataset name consistent with in the paper: Compositional Freebase Question => Compositional Freebase Questions. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/340/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/340/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/339 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/339/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/339/comments | https://api.github.com/repos/huggingface/datasets/issues/339/events | https://github.com/huggingface/datasets/pull/339 | 650,156,468 | MDExOlB1bGxSZXF1ZXN0NDQzNzAyNTcw | 339 | Add dataset.export() to TFRecords | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 18 | 2020-07-02T19:26:27 | 2020-07-22T09:16:12 | 2020-07-22T09:16:12 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/339",
"html_url": "https://github.com/huggingface/datasets/pull/339",
"diff_url": "https://github.com/huggingface/datasets/pull/339.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/339.patch",
"merged_at": "2020-07-22T09:16:11"
} | Fixes https://github.com/huggingface/nlp/issues/337
Some design decisions:
- Simplified the function API to not handle sharding. It writes the entire dataset as a single TFRecord file. This simplifies the function logic and users can use other functions (`select`, `shard`, etc) to handle custom sharding or splitting.
- Use `from_generator()` instead of `from_tensor_slices()` to address the memory issues discussed in https://github.com/huggingface/nlp/issues/315 and https://github.com/huggingface/nlp/issues/193.
- Performs introspection using the values from `dataset.set_format()` to identify the TF datatypes. Currently it supports string, float, and int. If this should be extended for other datatypes, let me know.
- There are quite a few helper functions required within the `export()` method. If these are better placed in a utils file somewhere, let me know.
Also, I noticed that
```python
dataset = dataset.select(indices)
dataset.set_format("tensorflow")
# dataset._format_type is "tensorflow"
```
gives a different output than
```python
dataset.set_format("tensorflow")
dataset = dataset.select(indices)
# dataset._format_type is None
```
The latter loses the format of its parent dataset. Is there interest in making `set_format` a functional method that returns itself (can be chained), and that derived datasets maintain the format of their parent? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/339/reactions",
"total_count": 3,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 3,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/339/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/338 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/338/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/338/comments | https://api.github.com/repos/huggingface/datasets/issues/338/events | https://github.com/huggingface/datasets/pull/338 | 650,057,253 | MDExOlB1bGxSZXF1ZXN0NDQzNjIxMTEx | 338 | Run `make style` | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-02T16:19:47 | 2020-07-02T18:03:10 | 2020-07-02T18:03:10 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/338",
"html_url": "https://github.com/huggingface/datasets/pull/338",
"diff_url": "https://github.com/huggingface/datasets/pull/338.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/338.patch",
"merged_at": "2020-07-02T18:03:10"
} | These files get changed when I run `make style` on an unrelated PR. Upstreaming these changes so development on a different branch can be easier. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/338/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/338/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/337 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/337/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/337/comments | https://api.github.com/repos/huggingface/datasets/issues/337/events | https://github.com/huggingface/datasets/issues/337 | 650,035,887 | MDU6SXNzdWU2NTAwMzU4ODc= | 337 | [Feature request] Export Arrow dataset to TFRecords | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-07-02T15:47:12 | 2020-07-22T09:16:12 | 2020-07-22T09:16:12 | CONTRIBUTOR | null | null | null | The TFRecord generation process is error-prone and requires complex separate Python scripts to download and preprocess the data. I propose to combine the user-friendly features of `nlp` with the speed and efficiency of TFRecords. Sample API:
```python
# use these existing methods
ds = load_dataset("wikitext", "wikitext-2-raw-v1", split="train")
ds = ds.map(lambda ex: tokenizer(ex))
ds.set_format("tensorflow", columns=["input_ids", "token_type_ids", "attention_mask"])
# then add this method
ds.export(folder="/my/tfrecords", prefix="myrecord", num_shards=8, format="tfrecord")
```
which would create files like so:
```bash
/my/tfrecords/myrecord_1.tfrecord
/my/tfrecords/myrecord_2.tfrecord
...
```
I would be happy to contribute this method. We could use a similar approach for PyTorch. Thoughts? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/337/reactions",
"total_count": 3,
"+1": 3,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/337/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/336 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/336/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/336/comments | https://api.github.com/repos/huggingface/datasets/issues/336/events | https://github.com/huggingface/datasets/issues/336 | 649,914,203 | MDU6SXNzdWU2NDk5MTQyMDM= | 336 | [Dataset requests] New datasets for Open Question Answering | {
"login": "thomwolf",
"id": 7353373,
"node_id": "MDQ6VXNlcjczNTMzNzM=",
"avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/thomwolf",
"html_url": "https://github.com/thomwolf",
"followers_url": "https://api.github.com/users/thomwolf/followers",
"following_url": "https://api.github.com/users/thomwolf/following{/other_user}",
"gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions",
"organizations_url": "https://api.github.com/users/thomwolf/orgs",
"repos_url": "https://api.github.com/users/thomwolf/repos",
"events_url": "https://api.github.com/users/thomwolf/events{/privacy}",
"received_events_url": "https://api.github.com/users/thomwolf/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892884,
"node_id": "MDU6TGFiZWwxOTM1ODkyODg0",
"url": "https://api.github.com/repos/huggingface/datasets/labels/help%20wanted",
"name": "help wanted",
"color": "008672",
"default": true,
"description": "Extra attention is needed"
},
{
"id": 2067376369,
"node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request",
"name": "dataset request",
"color": "e99695",
"default": false,
"description": "Requesting to add a new dataset"
}
] | closed | false | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
}
] | null | 0 | 2020-07-02T13:03:03 | 2020-07-16T09:04:22 | 2020-07-16T09:04:22 | MEMBER | null | null | null | We are still a few datasets missing for Open-Question Answering which is currently a field in strong development.
Namely, it would be really nice to add:
- WebQuestions (Berant et al., 2013) [done]
- CuratedTrec (Baudis et al. 2015) [not open-source]
- MS-MARCO (NGuyen et al. 2016) [done]
- SearchQA (Dunn et al. 2017) [done]
- FEVER (Thorne et al. 2018) - [ done]
All these datasets are cited in http://arxiv.org/abs/2005.11401 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/336/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/336/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/335 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/335/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/335/comments | https://api.github.com/repos/huggingface/datasets/issues/335/events | https://github.com/huggingface/datasets/pull/335 | 649,765,179 | MDExOlB1bGxSZXF1ZXN0NDQzMzgwMjI1 | 335 | BioMRC Dataset presented in BioNLP 2020 ACL Workshop | {
"login": "PetrosStav",
"id": 15162021,
"node_id": "MDQ6VXNlcjE1MTYyMDIx",
"avatar_url": "https://avatars.githubusercontent.com/u/15162021?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/PetrosStav",
"html_url": "https://github.com/PetrosStav",
"followers_url": "https://api.github.com/users/PetrosStav/followers",
"following_url": "https://api.github.com/users/PetrosStav/following{/other_user}",
"gists_url": "https://api.github.com/users/PetrosStav/gists{/gist_id}",
"starred_url": "https://api.github.com/users/PetrosStav/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/PetrosStav/subscriptions",
"organizations_url": "https://api.github.com/users/PetrosStav/orgs",
"repos_url": "https://api.github.com/users/PetrosStav/repos",
"events_url": "https://api.github.com/users/PetrosStav/events{/privacy}",
"received_events_url": "https://api.github.com/users/PetrosStav/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-02T09:03:41 | 2020-07-15T08:02:07 | 2020-07-15T08:02:07 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/335",
"html_url": "https://github.com/huggingface/datasets/pull/335",
"diff_url": "https://github.com/huggingface/datasets/pull/335.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/335.patch",
"merged_at": "2020-07-15T08:02:07"
} | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/335/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/335/timeline | null |
|
https://api.github.com/repos/huggingface/datasets/issues/334 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/334/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/334/comments | https://api.github.com/repos/huggingface/datasets/issues/334/events | https://github.com/huggingface/datasets/pull/334 | 649,661,791 | MDExOlB1bGxSZXF1ZXN0NDQzMjk1NjQ0 | 334 | Add dataset.shard() method | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-07-02T06:05:19 | 2020-07-06T12:35:36 | 2020-07-06T12:35:36 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/334",
"html_url": "https://github.com/huggingface/datasets/pull/334",
"diff_url": "https://github.com/huggingface/datasets/pull/334.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/334.patch",
"merged_at": "2020-07-06T12:35:36"
} | Fixes https://github.com/huggingface/nlp/issues/312 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/334/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/334/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/333 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/333/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/333/comments | https://api.github.com/repos/huggingface/datasets/issues/333/events | https://github.com/huggingface/datasets/pull/333 | 649,236,516 | MDExOlB1bGxSZXF1ZXN0NDQyOTE1NDQ0 | 333 | fix variable name typo | {
"login": "stas00",
"id": 10676103,
"node_id": "MDQ6VXNlcjEwNjc2MTAz",
"avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/stas00",
"html_url": "https://github.com/stas00",
"followers_url": "https://api.github.com/users/stas00/followers",
"following_url": "https://api.github.com/users/stas00/following{/other_user}",
"gists_url": "https://api.github.com/users/stas00/gists{/gist_id}",
"starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/stas00/subscriptions",
"organizations_url": "https://api.github.com/users/stas00/orgs",
"repos_url": "https://api.github.com/users/stas00/repos",
"events_url": "https://api.github.com/users/stas00/events{/privacy}",
"received_events_url": "https://api.github.com/users/stas00/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-01T19:13:50 | 2020-07-24T15:43:31 | 2020-07-24T08:32:16 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/333",
"html_url": "https://github.com/huggingface/datasets/pull/333",
"diff_url": "https://github.com/huggingface/datasets/pull/333.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/333.patch",
"merged_at": null
} | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/333/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/333/timeline | null |
|
https://api.github.com/repos/huggingface/datasets/issues/332 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/332/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/332/comments | https://api.github.com/repos/huggingface/datasets/issues/332/events | https://github.com/huggingface/datasets/pull/332 | 649,140,135 | MDExOlB1bGxSZXF1ZXN0NDQyODMwMzMz | 332 | Add wiki_dpr | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-07-01T17:12:00 | 2020-07-06T12:21:17 | 2020-07-06T12:21:16 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/332",
"html_url": "https://github.com/huggingface/datasets/pull/332",
"diff_url": "https://github.com/huggingface/datasets/pull/332.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/332.patch",
"merged_at": "2020-07-06T12:21:16"
} | Presented in the [Dense Passage Retrieval paper](https://arxiv.org/pdf/2004.04906.pdf), this dataset consists in 21M passages from the english wikipedia along with their 768-dim embeddings computed using DPR's context encoder.
Note on the implementation:
- There are two configs: with and without the embeddings (73GB vs 14GB)
- I used a non-fixed-size sequence of floats to describe the feature format of the embeddings. I wanted to use fixed-size sequences but I had issues with reading the arrow file afterwards (for example `dataset[0]` was crashing)
- I added the case for lists of urls as input of the download_manager | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/332/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/332/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/331 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/331/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/331/comments | https://api.github.com/repos/huggingface/datasets/issues/331/events | https://github.com/huggingface/datasets/issues/331 | 648,533,199 | MDU6SXNzdWU2NDg1MzMxOTk= | 331 | Loading CNN/Daily Mail dataset produces `nlp.utils.info_utils.NonMatchingSplitsSizesError` | {
"login": "jxmorris12",
"id": 13238952,
"node_id": "MDQ6VXNlcjEzMjM4OTUy",
"avatar_url": "https://avatars.githubusercontent.com/u/13238952?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jxmorris12",
"html_url": "https://github.com/jxmorris12",
"followers_url": "https://api.github.com/users/jxmorris12/followers",
"following_url": "https://api.github.com/users/jxmorris12/following{/other_user}",
"gists_url": "https://api.github.com/users/jxmorris12/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jxmorris12/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jxmorris12/subscriptions",
"organizations_url": "https://api.github.com/users/jxmorris12/orgs",
"repos_url": "https://api.github.com/users/jxmorris12/repos",
"events_url": "https://api.github.com/users/jxmorris12/events{/privacy}",
"received_events_url": "https://api.github.com/users/jxmorris12/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067388877,
"node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug",
"name": "dataset bug",
"color": "2edb81",
"default": false,
"description": "A bug in a dataset script provided in the library"
}
] | closed | false | null | [] | null | 5 | 2020-06-30T22:21:33 | 2020-07-09T13:03:40 | 2020-07-09T13:03:40 | CONTRIBUTOR | null | null | null | ```
>>> import nlp
>>> nlp.load_dataset('cnn_dailymail', '3.0.0')
Downloading and preparing dataset cnn_dailymail/3.0.0 (download: 558.32 MiB, generated: 1.26 GiB, total: 1.81 GiB) to /u/jm8wx/.cache/huggingface/datasets/cnn_dailymail/3.0.0/3.0.0...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/p/qdata/jm8wx/datasets/nlp/src/nlp/load.py", line 520, in load_dataset
builder_instance.download_and_prepare(
File "/p/qdata/jm8wx/datasets/nlp/src/nlp/builder.py", line 431, in download_and_prepare
self._download_and_prepare(
File "/p/qdata/jm8wx/datasets/nlp/src/nlp/builder.py", line 488, in _download_and_prepare
verify_splits(self.info.splits, split_dict)
File "/p/qdata/jm8wx/datasets/nlp/src/nlp/utils/info_utils.py", line 70, in verify_splits
raise NonMatchingSplitsSizesError(str(bad_splits))
nlp.utils.info_utils.NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='test', num_bytes=49424491, num_examples=11490, dataset_name='cnn_dailymail'), 'recorded': SplitInfo(name='test', num_bytes=48931393, num_examples=11379, dataset_name='cnn_dailymail')}, {'expected': SplitInfo(name='train', num_bytes=1249178681, num_examples=287113, dataset_name='cnn_dailymail'), 'recorded': SplitInfo(name='train', num_bytes=1240618482, num_examples=285161, dataset_name='cnn_dailymail')}, {'expected': SplitInfo(name='validation', num_bytes=57149241, num_examples=13368, dataset_name='cnn_dailymail'), 'recorded': SplitInfo(name='validation', num_bytes=56637485, num_examples=13255, dataset_name='cnn_dailymail')}]
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/331/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/331/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/330 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/330/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/330/comments | https://api.github.com/repos/huggingface/datasets/issues/330/events | https://github.com/huggingface/datasets/pull/330 | 648,525,720 | MDExOlB1bGxSZXF1ZXN0NDQyMzIxMjEw | 330 | Doc red | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-30T22:05:31 | 2020-07-06T12:10:39 | 2020-07-05T12:27:29 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/330",
"html_url": "https://github.com/huggingface/datasets/pull/330",
"diff_url": "https://github.com/huggingface/datasets/pull/330.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/330.patch",
"merged_at": "2020-07-05T12:27:29"
} | Adding [DocRED](https://github.com/thunlp/DocRED) - a relation extraction dataset which tests document-level RE. A few implementation notes:
- There are 2 separate versions of the training set - *annotated* and *distant*. Instead of `nlp.Split.Train` I've used the splits `"train_annotated"` and `"train_distant"` to reflect this.
- As well as the relation id, the full relation name is mapped from `rel_info.json`
- I renamed the 'h', 'r', 't' keys to 'head', 'relation' and 'tail' to make them more readable.
- Used the fix from #319 to allow nested sequences of dicts. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/330/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/330/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/329 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/329/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/329/comments | https://api.github.com/repos/huggingface/datasets/issues/329/events | https://github.com/huggingface/datasets/issues/329 | 648,446,979 | MDU6SXNzdWU2NDg0NDY5Nzk= | 329 | [Bug] FileLock dependency incompatible with filesystem | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 4 | 2020-06-30T19:45:31 | 2020-07-01T06:55:58 | 2020-06-30T21:33:06 | CONTRIBUTOR | null | null | null | I'm downloading a dataset successfully with
`load_dataset("wikitext", "wikitext-2-raw-v1")`
But when I attempt to cache it on an external volume, it hangs indefinitely:
`load_dataset("wikitext", "wikitext-2-raw-v1", cache_dir="/fsx") # /fsx is an external volume mount`
The filesystem when hanging looks like this:
```bash
/fsx
----downloads
----94be...73.lock
----wikitext
----wikitext-2-raw
----wikitext-2-raw-1.0.0.incomplete
```
It appears that on this filesystem, the FileLock object is forever stuck in its "acquire" stage. I have verified that the issue lies specifically with the `filelock` dependency:
```python
open("/fsx/hello.txt").write("hello") # succeeds
from filelock import FileLock
with FileLock("/fsx/hello.lock"):
open("/fsx/hello.txt").write("hello") # hangs indefinitely
```
Has anyone else run into this issue? I'd raise it directly on the FileLock repo, but that project appears abandoned with the last update over a year ago. Or if there's a solution that would remove the FileLock dependency from the project, I would appreciate that. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/329/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/329/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/328 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/328/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/328/comments | https://api.github.com/repos/huggingface/datasets/issues/328/events | https://github.com/huggingface/datasets/issues/328 | 648,326,841 | MDU6SXNzdWU2NDgzMjY4NDE= | 328 | Fork dataset | {
"login": "timothyjlaurent",
"id": 2000204,
"node_id": "MDQ6VXNlcjIwMDAyMDQ=",
"avatar_url": "https://avatars.githubusercontent.com/u/2000204?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/timothyjlaurent",
"html_url": "https://github.com/timothyjlaurent",
"followers_url": "https://api.github.com/users/timothyjlaurent/followers",
"following_url": "https://api.github.com/users/timothyjlaurent/following{/other_user}",
"gists_url": "https://api.github.com/users/timothyjlaurent/gists{/gist_id}",
"starred_url": "https://api.github.com/users/timothyjlaurent/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/timothyjlaurent/subscriptions",
"organizations_url": "https://api.github.com/users/timothyjlaurent/orgs",
"repos_url": "https://api.github.com/users/timothyjlaurent/repos",
"events_url": "https://api.github.com/users/timothyjlaurent/events{/privacy}",
"received_events_url": "https://api.github.com/users/timothyjlaurent/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 5 | 2020-06-30T16:42:53 | 2020-07-06T21:43:59 | 2020-07-06T21:43:59 | NONE | null | null | null | We have a multi-task learning model training I'm trying to convert to using the Arrow-based nlp dataset.
We're currently training a custom TensorFlow model but the nlp paradigm should be a bridge for us to be able to use the wealth of pre-trained models in Transformers.
Our preprocessing flow parses raw text and json with Entity and Relations annotations and creates 2 datasets for training a NER and Relations prediction heads.
Is there some good way to "fork" dataset-
EG
1. text + json -> Dataset1
1. Dataset1 -> DatasetNER
1. Dataset1 -> DatasetREL
or
1. text + json -> Dataset1
1. Dataset1 -> DatasetNER
1. Dataset1 + DatasetNER -> DatasetREL
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/328/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/328/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/327 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/327/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/327/comments | https://api.github.com/repos/huggingface/datasets/issues/327/events | https://github.com/huggingface/datasets/pull/327 | 648,312,858 | MDExOlB1bGxSZXF1ZXN0NDQyMTQyOTQw | 327 | set seed for suffling tests | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-30T16:21:34 | 2020-07-02T08:34:05 | 2020-07-02T08:34:04 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/327",
"html_url": "https://github.com/huggingface/datasets/pull/327",
"diff_url": "https://github.com/huggingface/datasets/pull/327.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/327.patch",
"merged_at": "2020-07-02T08:34:04"
} | Some tests were randomly failing because of a missing seed in a test for `train_test_split(shuffle=True)` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/327/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/327/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/326 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/326/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/326/comments | https://api.github.com/repos/huggingface/datasets/issues/326/events | https://github.com/huggingface/datasets/issues/326 | 648,126,103 | MDU6SXNzdWU2NDgxMjYxMDM= | 326 | Large dataset in Squad2-format | {
"login": "flozi00",
"id": 47894090,
"node_id": "MDQ6VXNlcjQ3ODk0MDkw",
"avatar_url": "https://avatars.githubusercontent.com/u/47894090?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/flozi00",
"html_url": "https://github.com/flozi00",
"followers_url": "https://api.github.com/users/flozi00/followers",
"following_url": "https://api.github.com/users/flozi00/following{/other_user}",
"gists_url": "https://api.github.com/users/flozi00/gists{/gist_id}",
"starred_url": "https://api.github.com/users/flozi00/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/flozi00/subscriptions",
"organizations_url": "https://api.github.com/users/flozi00/orgs",
"repos_url": "https://api.github.com/users/flozi00/repos",
"events_url": "https://api.github.com/users/flozi00/events{/privacy}",
"received_events_url": "https://api.github.com/users/flozi00/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 8 | 2020-06-30T12:18:59 | 2020-07-09T09:01:50 | 2020-07-09T09:01:50 | NONE | null | null | null | At the moment we are building an large question answering dataset and think about sharing it with the huggingface community.
Caused the computing power we splitted it into multiple tiles, but they are all in the same format.
Right now the most important facts about are this:
- Contexts: 1.047.671
- questions: 1.677.732
- Answers: 6.742.406
- unanswerable: 377.398
It is already cleaned
<pre><code>
train_data = [
{
'context': "this is the context",
'qas': [
{
'id': "00002",
'is_impossible': False,
'question': "whats is this",
'answers': [
{
'text': "answer",
'answer_start': 0
}
]
},
{
'id': "00003",
'is_impossible': False,
'question': "question2",
'answers': [
{
'text': "answer2",
'answer_start': 1
}
]
}
]
}
]
</code></pre>
Cause it is growing every day we are thinking about an structure like this:
We host an Json file, containing all the download links and the script can load it dynamically.
At the moment it is around ~20GB
Any advice how to handle this, or an ready to use template ? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/326/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/326/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/325 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/325/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/325/comments | https://api.github.com/repos/huggingface/datasets/issues/325/events | https://github.com/huggingface/datasets/pull/325 | 647,601,592 | MDExOlB1bGxSZXF1ZXN0NDQxNTk3NTgw | 325 | Add SQuADShifts dataset | {
"login": "millerjohnp",
"id": 8953195,
"node_id": "MDQ6VXNlcjg5NTMxOTU=",
"avatar_url": "https://avatars.githubusercontent.com/u/8953195?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/millerjohnp",
"html_url": "https://github.com/millerjohnp",
"followers_url": "https://api.github.com/users/millerjohnp/followers",
"following_url": "https://api.github.com/users/millerjohnp/following{/other_user}",
"gists_url": "https://api.github.com/users/millerjohnp/gists{/gist_id}",
"starred_url": "https://api.github.com/users/millerjohnp/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/millerjohnp/subscriptions",
"organizations_url": "https://api.github.com/users/millerjohnp/orgs",
"repos_url": "https://api.github.com/users/millerjohnp/repos",
"events_url": "https://api.github.com/users/millerjohnp/events{/privacy}",
"received_events_url": "https://api.github.com/users/millerjohnp/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-29T19:11:16 | 2020-06-30T17:07:31 | 2020-06-30T17:07:31 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/325",
"html_url": "https://github.com/huggingface/datasets/pull/325",
"diff_url": "https://github.com/huggingface/datasets/pull/325.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/325.patch",
"merged_at": "2020-06-30T17:07:31"
} | This PR adds the four new variants of the SQuAD dataset used in [The Effect of Natural Distribution Shift on Question Answering Models](https://arxiv.org/abs/2004.14444) to facilitate evaluating model robustness to distribution shift. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/325/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/325/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/324 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/324/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/324/comments | https://api.github.com/repos/huggingface/datasets/issues/324/events | https://github.com/huggingface/datasets/issues/324 | 647,525,725 | MDU6SXNzdWU2NDc1MjU3MjU= | 324 | Error when calculating glue score | {
"login": "D-i-l-r-u-k-s-h-i",
"id": 47185867,
"node_id": "MDQ6VXNlcjQ3MTg1ODY3",
"avatar_url": "https://avatars.githubusercontent.com/u/47185867?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i",
"html_url": "https://github.com/D-i-l-r-u-k-s-h-i",
"followers_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/followers",
"following_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/following{/other_user}",
"gists_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/gists{/gist_id}",
"starred_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/subscriptions",
"organizations_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/orgs",
"repos_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/repos",
"events_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/events{/privacy}",
"received_events_url": "https://api.github.com/users/D-i-l-r-u-k-s-h-i/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 4 | 2020-06-29T16:53:48 | 2020-07-09T09:13:34 | 2020-07-09T09:13:34 | NONE | null | null | null | I was trying glue score along with other metrics here. But glue gives me this error;
```
import nlp
glue_metric = nlp.load_metric('glue',name="cola")
glue_score = glue_metric.compute(predictions, references)
```
```
---------------------------------------------------------------------------
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-8-b9210a524504> in <module>()
----> 1 glue_score = glue_metric.compute(predictions, references)
6 frames
/usr/local/lib/python3.6/dist-packages/nlp/metric.py in compute(self, predictions, references, timeout, **metrics_kwargs)
191 """
192 if predictions is not None:
--> 193 self.add_batch(predictions=predictions, references=references)
194 self.finalize(timeout=timeout)
195
/usr/local/lib/python3.6/dist-packages/nlp/metric.py in add_batch(self, predictions, references, **kwargs)
207 if self.writer is None:
208 self._init_writer()
--> 209 self.writer.write_batch(batch)
210
211 def add(self, prediction=None, reference=None, **kwargs):
/usr/local/lib/python3.6/dist-packages/nlp/arrow_writer.py in write_batch(self, batch_examples, writer_batch_size)
155 if self.pa_writer is None:
156 self._build_writer(pa_table=pa.Table.from_pydict(batch_examples))
--> 157 pa_table: pa.Table = pa.Table.from_pydict(batch_examples, schema=self._schema)
158 if writer_batch_size is None:
159 writer_batch_size = self.writer_batch_size
/usr/local/lib/python3.6/dist-packages/pyarrow/types.pxi in __iter__()
/usr/local/lib/python3.6/dist-packages/pyarrow/array.pxi in pyarrow.lib.asarray()
/usr/local/lib/python3.6/dist-packages/pyarrow/array.pxi in pyarrow.lib.array()
/usr/local/lib/python3.6/dist-packages/pyarrow/array.pxi in pyarrow.lib._sequence_to_array()
TypeError: an integer is required (got type str)
```
I'm not sure whether I'm doing this wrong or whether it's an issue. I would like to know a workaround. Thank you. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/324/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/324/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/323 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/323/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/323/comments | https://api.github.com/repos/huggingface/datasets/issues/323/events | https://github.com/huggingface/datasets/pull/323 | 647,521,308 | MDExOlB1bGxSZXF1ZXN0NDQxNTMxOTY3 | 323 | Add package path to sys when downloading package as github archive | {
"login": "yjernite",
"id": 10469459,
"node_id": "MDQ6VXNlcjEwNDY5NDU5",
"avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/yjernite",
"html_url": "https://github.com/yjernite",
"followers_url": "https://api.github.com/users/yjernite/followers",
"following_url": "https://api.github.com/users/yjernite/following{/other_user}",
"gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}",
"starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/yjernite/subscriptions",
"organizations_url": "https://api.github.com/users/yjernite/orgs",
"repos_url": "https://api.github.com/users/yjernite/repos",
"events_url": "https://api.github.com/users/yjernite/events{/privacy}",
"received_events_url": "https://api.github.com/users/yjernite/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-06-29T16:46:01 | 2020-07-30T14:00:23 | 2020-07-30T14:00:23 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/323",
"html_url": "https://github.com/huggingface/datasets/pull/323",
"diff_url": "https://github.com/huggingface/datasets/pull/323.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/323.patch",
"merged_at": null
} | This fixes the `coval.py` metric so that imports within the downloaded module work correctly. We can use a similar trick to add the BLEURT metric (@ankparikh)
@thomwolf not sure how you feel about adding to the `PYTHONPATH` from the script. This is the only way I could make it work with my understanding of `importlib` but there might be a more elegant method.
This PR fixes https://github.com/huggingface/nlp/issues/305 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/323/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/323/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/322 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/322/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/322/comments | https://api.github.com/repos/huggingface/datasets/issues/322/events | https://github.com/huggingface/datasets/pull/322 | 647,483,850 | MDExOlB1bGxSZXF1ZXN0NDQxNTAyMjc2 | 322 | output nested dict in get_nearest_examples | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-29T15:47:47 | 2020-07-02T08:33:33 | 2020-07-02T08:33:32 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/322",
"html_url": "https://github.com/huggingface/datasets/pull/322",
"diff_url": "https://github.com/huggingface/datasets/pull/322.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/322.patch",
"merged_at": "2020-07-02T08:33:32"
} | As we are using a columnar format like arrow as the backend for datasets, we expect to have a dictionary of columns when we slice a dataset like in this example:
```python
my_examples = dataset[0:10]
print(type(my_examples))
# >>> dict
print(my_examples["my_column"][0]
# >>> this is the first element of the column 'my_column'
```
Therefore I wanted to keep this logic when calling `get_nearest_examples` that returns the top 10 nearest examples:
```python
dataset.add_faiss_index(column="embeddings")
scores, examples = dataset.get_nearest_examples("embeddings", query=my_numpy_embedding)
print(type(examples))
# >>> dict
```
Previously it was returning a list[dict]. It was the only place that was using this output format.
To make it work I had to implement `__getitem__(key)` where `key` is a list.
This is different from `.select` because `.select` is a dataset transform (it returns a new dataset object) while `__getitem__` is an extraction method (it returns python dictionaries). | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/322/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/322/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/321 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/321/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/321/comments | https://api.github.com/repos/huggingface/datasets/issues/321/events | https://github.com/huggingface/datasets/issues/321 | 647,271,526 | MDU6SXNzdWU2NDcyNzE1MjY= | 321 | ERROR:root:mwparserfromhell | {
"login": "Shiro-LK",
"id": 26505641,
"node_id": "MDQ6VXNlcjI2NTA1NjQx",
"avatar_url": "https://avatars.githubusercontent.com/u/26505641?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Shiro-LK",
"html_url": "https://github.com/Shiro-LK",
"followers_url": "https://api.github.com/users/Shiro-LK/followers",
"following_url": "https://api.github.com/users/Shiro-LK/following{/other_user}",
"gists_url": "https://api.github.com/users/Shiro-LK/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Shiro-LK/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Shiro-LK/subscriptions",
"organizations_url": "https://api.github.com/users/Shiro-LK/orgs",
"repos_url": "https://api.github.com/users/Shiro-LK/repos",
"events_url": "https://api.github.com/users/Shiro-LK/events{/privacy}",
"received_events_url": "https://api.github.com/users/Shiro-LK/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067388877,
"node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug",
"name": "dataset bug",
"color": "2edb81",
"default": false,
"description": "A bug in a dataset script provided in the library"
}
] | open | false | null | [] | null | 9 | 2020-06-29T11:10:43 | 2020-07-23T16:28:34 | null | NONE | null | null | null | Hi,
I am trying to download some wikipedia data but I got this error for spanish "es" (but there are maybe some others languages which have the same error I haven't tried all of them ).
`ERROR:root:mwparserfromhell ParseError: This is a bug and should be reported. Info: C tokenizer exited with non-empty token stack.`
The code I have use was :
`dataset = load_dataset('wikipedia', '20200501.es', beam_runner='DirectRunner')`
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/321/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/321/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/320 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/320/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/320/comments | https://api.github.com/repos/huggingface/datasets/issues/320/events | https://github.com/huggingface/datasets/issues/320 | 647,188,167 | MDU6SXNzdWU2NDcxODgxNjc= | 320 | Blog Authorship Corpus, Non Matching Splits Sizes Error, nlp viewer | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2107841032,
"node_id": "MDU6TGFiZWwyMTA3ODQxMDMy",
"url": "https://api.github.com/repos/huggingface/datasets/labels/nlp-viewer",
"name": "nlp-viewer",
"color": "94203D",
"default": false,
"description": ""
}
] | closed | false | null | [] | null | 2 | 2020-06-29T07:36:35 | 2020-06-29T14:44:42 | 2020-06-29T14:44:42 | CONTRIBUTOR | null | null | null | Selecting `blog_authorship_corpus` in the nlp viewer throws the following error:
```
NonMatchingSplitsSizesError: [{'expected': SplitInfo(name='train', num_bytes=610252351, num_examples=532812, dataset_name='blog_authorship_corpus'), 'recorded': SplitInfo(name='train', num_bytes=614706451, num_examples=535568, dataset_name='blog_authorship_corpus')}, {'expected': SplitInfo(name='validation', num_bytes=37500394, num_examples=31277, dataset_name='blog_authorship_corpus'), 'recorded': SplitInfo(name='validation', num_bytes=32553710, num_examples=28521, dataset_name='blog_authorship_corpus')}]
Traceback:
File "/home/sasha/streamlit/lib/streamlit/ScriptRunner.py", line 322, in _run_script
exec(code, module.__dict__)
File "/home/sasha/nlp-viewer/run.py", line 172, in <module>
dts, fail = get(str(option.id), str(conf_option.name) if conf_option else None)
File "/home/sasha/streamlit/lib/streamlit/caching.py", line 591, in wrapped_func
return get_or_create_cached_value()
File "/home/sasha/streamlit/lib/streamlit/caching.py", line 575, in get_or_create_cached_value
return_value = func(*args, **kwargs)
File "/home/sasha/nlp-viewer/run.py", line 132, in get
builder_instance.download_and_prepare()
File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/nlp/builder.py", line 432, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/nlp/builder.py", line 488, in _download_and_prepare
verify_splits(self.info.splits, split_dict)
File "/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/nlp/utils/info_utils.py", line 70, in verify_splits
raise NonMatchingSplitsSizesError(str(bad_splits))
```
@srush @lhoestq | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/320/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/320/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/319 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/319/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/319/comments | https://api.github.com/repos/huggingface/datasets/issues/319/events | https://github.com/huggingface/datasets/issues/319 | 646,792,487 | MDU6SXNzdWU2NDY3OTI0ODc= | 319 | Nested sequences with dicts | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-27T23:45:17 | 2020-07-03T10:22:00 | 2020-07-03T10:22:00 | CONTRIBUTOR | null | null | null | Am pretty much finished [adding a dataset](https://github.com/ghomasHudson/nlp/blob/DocRED/datasets/docred/docred.py) for [DocRED](https://github.com/thunlp/DocRED), but am getting an error when trying to add a nested `nlp.features.sequence(nlp.features.sequence({key:value,...}))`.
The original data is in this format:
```python
{
'title': "Title of wiki page",
'vertexSet': [
[
{ 'name': "mention_name",
'sent_id': "mention in which sentence",
'pos': ["postion of mention in a sentence"],
'type': "NER_type"},
{another mention}
],
[another entity]
]
...
}
```
So to represent this I've attempted to write:
```
...
features=nlp.Features({
"title": nlp.Value("string"),
"vertexSet": nlp.features.Sequence(nlp.features.Sequence({
"name": nlp.Value("string"),
"sent_id": nlp.Value("int32"),
"pos": nlp.features.Sequence(nlp.Value("int32")),
"type": nlp.Value("string"),
})),
...
}),
...
```
This is giving me the error:
```
pyarrow.lib.ArrowTypeError: Could not convert [{'pos': [[0,2], [2,4], [3,5]], "type": ["ORG", "ORG", "ORG"], "name": ["Lark Force", "Lark Force", "Lark Force", "sent_id": [0, 3, 4]}..... with type list: was not a dict, tuple, or recognized null value for conversion to struct type
```
Do we expect the pyarrow stuff to break when doing this deeper nesting? I've checked that it still works when you do `nlp.features.Sequence(nlp.features.Sequence(nlp.Value("string"))` or `nlp.features.Sequence({key:value,...})` just not nested sequences with a dict.
If it's not possible, I can always convert it to a shallower structure. I'd rather not change the DocRED authors' structure if I don't have to though. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/319/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/319/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/318 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/318/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/318/comments | https://api.github.com/repos/huggingface/datasets/issues/318/events | https://github.com/huggingface/datasets/pull/318 | 646,682,840 | MDExOlB1bGxSZXF1ZXN0NDQwOTExOTYy | 318 | Multitask | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | open | false | null | [] | null | 18 | 2020-06-27T13:27:29 | 2021-12-02T23:24:49 | null | CONTRIBUTOR | null | true | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/318",
"html_url": "https://github.com/huggingface/datasets/pull/318",
"diff_url": "https://github.com/huggingface/datasets/pull/318.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/318.patch",
"merged_at": null
} | Following our discussion in #217, I've implemented a first working version of `MultiDataset`.
There's a function `build_multitask()` which takes either individual `nlp.Dataset`s or `dicts` of splits and constructs `MultiDataset`(s). I've added a notebook with example usage.
I've implemented many of the `nlp.Dataset` methods (cache_files, columns, nbytes, num_columns, num_rows, column_names, schema, shape). Some of the other methods are complicated as they change the number of examples. These raise `NotImplementedError`s at the moment.
This will need some tests which I haven't written yet.
There's definitely room for improvements but I think the general approach is sound. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/318/reactions",
"total_count": 1,
"+1": 1,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/318/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/317 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/317/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/317/comments | https://api.github.com/repos/huggingface/datasets/issues/317/events | https://github.com/huggingface/datasets/issues/317 | 646,555,384 | MDU6SXNzdWU2NDY1NTUzODQ= | 317 | Adding a dataset with multiple subtasks | {
"login": "erickrf",
"id": 294483,
"node_id": "MDQ6VXNlcjI5NDQ4Mw==",
"avatar_url": "https://avatars.githubusercontent.com/u/294483?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/erickrf",
"html_url": "https://github.com/erickrf",
"followers_url": "https://api.github.com/users/erickrf/followers",
"following_url": "https://api.github.com/users/erickrf/following{/other_user}",
"gists_url": "https://api.github.com/users/erickrf/gists{/gist_id}",
"starred_url": "https://api.github.com/users/erickrf/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/erickrf/subscriptions",
"organizations_url": "https://api.github.com/users/erickrf/orgs",
"repos_url": "https://api.github.com/users/erickrf/repos",
"events_url": "https://api.github.com/users/erickrf/events{/privacy}",
"received_events_url": "https://api.github.com/users/erickrf/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-26T23:14:19 | 2020-10-27T15:36:52 | 2020-10-27T15:36:52 | NONE | null | null | null | I intent to add the datasets of the MT Quality Estimation shared tasks to `nlp`. However, they have different subtasks -- such as word-level, sentence-level and document-level quality estimation, each of which having different language pairs, and some of the data reused in different subtasks.
For example, in [QE 2019,](http://www.statmt.org/wmt19/qe-task.html) we had the same English-Russian and English-German data for word-level and sentence-level QE.
I suppose these datasets could have both their word and sentence-level labels inside `nlp.Features`; but what about other subtasks? Should they be considered a different dataset altogether?
I read the discussion on #217 but the case of QE seems a lot simpler. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/317/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/317/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/316 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/316/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/316/comments | https://api.github.com/repos/huggingface/datasets/issues/316/events | https://github.com/huggingface/datasets/pull/316 | 646,366,450 | MDExOlB1bGxSZXF1ZXN0NDQwNjY5NzY5 | 316 | add AG News dataset | {
"login": "jxmorris12",
"id": 13238952,
"node_id": "MDQ6VXNlcjEzMjM4OTUy",
"avatar_url": "https://avatars.githubusercontent.com/u/13238952?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jxmorris12",
"html_url": "https://github.com/jxmorris12",
"followers_url": "https://api.github.com/users/jxmorris12/followers",
"following_url": "https://api.github.com/users/jxmorris12/following{/other_user}",
"gists_url": "https://api.github.com/users/jxmorris12/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jxmorris12/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jxmorris12/subscriptions",
"organizations_url": "https://api.github.com/users/jxmorris12/orgs",
"repos_url": "https://api.github.com/users/jxmorris12/repos",
"events_url": "https://api.github.com/users/jxmorris12/events{/privacy}",
"received_events_url": "https://api.github.com/users/jxmorris12/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-26T16:11:58 | 2020-06-30T09:58:08 | 2020-06-30T08:31:55 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/316",
"html_url": "https://github.com/huggingface/datasets/pull/316",
"diff_url": "https://github.com/huggingface/datasets/pull/316.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/316.patch",
"merged_at": "2020-06-30T08:31:55"
} | adds support for the AG-News topic classification dataset | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/316/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/316/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/315 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/315/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/315/comments | https://api.github.com/repos/huggingface/datasets/issues/315/events | https://github.com/huggingface/datasets/issues/315 | 645,888,943 | MDU6SXNzdWU2NDU4ODg5NDM= | 315 | [Question] Best way to batch a large dataset? | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067400324,
"node_id": "MDU6TGFiZWwyMDY3NDAwMzI0",
"url": "https://api.github.com/repos/huggingface/datasets/labels/generic%20discussion",
"name": "generic discussion",
"color": "c5def5",
"default": false,
"description": "Generic discussion on the library"
}
] | open | false | null | [] | null | 11 | 2020-06-25T22:30:20 | 2020-10-27T15:38:17 | null | CONTRIBUTOR | null | null | null | I'm training on large datasets such as Wikipedia and BookCorpus. Following the instructions in [the tutorial notebook](https://colab.research.google.com/github/huggingface/nlp/blob/master/notebooks/Overview.ipynb), I see the following recommended for TensorFlow:
```python
train_tf_dataset = train_tf_dataset.filter(remove_none_values, load_from_cache_file=False)
columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions']
train_tf_dataset.set_format(type='tensorflow', columns=columns)
features = {x: train_tf_dataset[x].to_tensor(default_value=0, shape=[None, tokenizer.max_len]) for x in columns[:3]}
labels = {"output_1": train_tf_dataset["start_positions"].to_tensor(default_value=0, shape=[None, 1])}
labels["output_2"] = train_tf_dataset["end_positions"].to_tensor(default_value=0, shape=[None, 1])
### Question about this last line ###
tfdataset = tf.data.Dataset.from_tensor_slices((features, labels)).batch(8)
```
This code works for something like WikiText-2. However, scaling up to WikiText-103, the last line takes 5-10 minutes to run. I assume it is because tf.data.Dataset.from_tensor_slices() is pulling everything into memory, not lazily loading. This approach won't scale up to datasets 25x larger such as Wikipedia.
So I tried manual batching using `dataset.select()`:
```python
idxs = np.random.randint(len(dataset), size=bsz)
batch = dataset.select(idxs).map(lambda example: {"input_ids": tokenizer(example["text"])})
tf_batch = tf.constant(batch["ids"], dtype=tf.int64)
```
This appears to create a new Apache Arrow dataset with every batch I grab, and then tries to cache it. The runtime of `dataset.select([0, 1])` appears to be much worse than `dataset[:2]`. So using `select()` doesn't seem to be performant enough for a training loop.
Is there a performant scalable way to lazily load batches of nlp Datasets? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/315/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 1
} | https://api.github.com/repos/huggingface/datasets/issues/315/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/314 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/314/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/314/comments | https://api.github.com/repos/huggingface/datasets/issues/314/events | https://github.com/huggingface/datasets/pull/314 | 645,461,174 | MDExOlB1bGxSZXF1ZXN0NDM5OTM4MTMw | 314 | Fixed singlular very minor spelling error | {
"login": "SchizoidBat",
"id": 40696362,
"node_id": "MDQ6VXNlcjQwNjk2MzYy",
"avatar_url": "https://avatars.githubusercontent.com/u/40696362?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/SchizoidBat",
"html_url": "https://github.com/SchizoidBat",
"followers_url": "https://api.github.com/users/SchizoidBat/followers",
"following_url": "https://api.github.com/users/SchizoidBat/following{/other_user}",
"gists_url": "https://api.github.com/users/SchizoidBat/gists{/gist_id}",
"starred_url": "https://api.github.com/users/SchizoidBat/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/SchizoidBat/subscriptions",
"organizations_url": "https://api.github.com/users/SchizoidBat/orgs",
"repos_url": "https://api.github.com/users/SchizoidBat/repos",
"events_url": "https://api.github.com/users/SchizoidBat/events{/privacy}",
"received_events_url": "https://api.github.com/users/SchizoidBat/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-25T10:45:59 | 2020-06-26T08:46:41 | 2020-06-25T12:43:59 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/314",
"html_url": "https://github.com/huggingface/datasets/pull/314",
"diff_url": "https://github.com/huggingface/datasets/pull/314.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/314.patch",
"merged_at": "2020-06-25T12:43:59"
} | An instance of "independantly" was changed to "independently". That's all. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/314/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/314/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/313 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/313/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/313/comments | https://api.github.com/repos/huggingface/datasets/issues/313/events | https://github.com/huggingface/datasets/pull/313 | 645,390,088 | MDExOlB1bGxSZXF1ZXN0NDM5ODc4MDg5 | 313 | Add MWSC | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "patrickvonplaten",
"id": 23423619,
"node_id": "MDQ6VXNlcjIzNDIzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patrickvonplaten",
"html_url": "https://github.com/patrickvonplaten",
"followers_url": "https://api.github.com/users/patrickvonplaten/followers",
"following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}",
"gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions",
"organizations_url": "https://api.github.com/users/patrickvonplaten/orgs",
"repos_url": "https://api.github.com/users/patrickvonplaten/repos",
"events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}",
"received_events_url": "https://api.github.com/users/patrickvonplaten/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "patrickvonplaten",
"id": 23423619,
"node_id": "MDQ6VXNlcjIzNDIzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patrickvonplaten",
"html_url": "https://github.com/patrickvonplaten",
"followers_url": "https://api.github.com/users/patrickvonplaten/followers",
"following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}",
"gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions",
"organizations_url": "https://api.github.com/users/patrickvonplaten/orgs",
"repos_url": "https://api.github.com/users/patrickvonplaten/repos",
"events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}",
"received_events_url": "https://api.github.com/users/patrickvonplaten/received_events",
"type": "User",
"site_admin": false
},
{
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
},
{
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
}
] | null | 1 | 2020-06-25T09:22:02 | 2020-06-30T08:28:11 | 2020-06-30T08:28:11 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/313",
"html_url": "https://github.com/huggingface/datasets/pull/313",
"diff_url": "https://github.com/huggingface/datasets/pull/313.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/313.patch",
"merged_at": "2020-06-30T08:28:10"
} | Adding the [Modified Winograd Schema Challenge](https://github.com/salesforce/decaNLP/blob/master/local_data/schema.txt) dataset which formed part of the [decaNLP](http://decanlp.com/) benchmark. Not sure how much use people would find for it it outside of the benchmark, but it is general purpose.
Code is heavily borrowed from the [decaNLP repo](https://github.com/salesforce/decaNLP/blob/1e9605f246b9e05199b28bde2a2093bc49feeeaa/text/torchtext/datasets/generic.py#L773-L877).
There's a few (possibly overly opinionated) design choices I made:
- I used the train/test/dev split [buried in the decaNLP code](https://github.com/salesforce/decaNLP/blob/1e9605f246b9e05199b28bde2a2093bc49feeeaa/text/torchtext/datasets/generic.py#L852-L855)
- I split out each example into the 2 alternatives. Originally the data uses the format:
```
The city councilmen refused the demonstrators a permit because they [feared/advocated] violence.
Who [feared/advocated] violence?
councilmen/demonstrators
```
I split into the 2 variants:
```
The city councilmen refused the demonstrators a permit because they feared violence.
Who feared violence?
councilmen/demonstrators
The city councilmen refused the demonstrators a permit because they advocated violence.
Who advocated violence?
councilmen/demonstrators
```
I can't see any use for having the options combined into a single example (splitting them is [the way decaNLP processes](https://github.com/salesforce/decaNLP/blob/1e9605f246b9e05199b28bde2a2093bc49feeeaa/text/torchtext/datasets/generic.py#L846-L850)) them. You can't train on both versions with them combined, and splitting the examples later would be a pain to do. I think [winogrande.py](https://github.com/huggingface/nlp/blob/master/datasets/winogrande/winogrande.py) presents the data in this way?
- I've not used the decaNLP framing (appending the options to the question e.g. `Who feared violence?
-- councilmen or demonstrators?`) but left it more generic by adding the options as a new key: `"options":["councilmen","demonstrators"]` This should be an easy thing to change using `map` if needed by a specific application.
Dataset is working as-is but if anyone has any thoughts/preferences on the design decisions here I'm definitely open to different choices. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/313/reactions",
"total_count": 3,
"+1": 3,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/313/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/312 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/312/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/312/comments | https://api.github.com/repos/huggingface/datasets/issues/312/events | https://github.com/huggingface/datasets/issues/312 | 645,025,561 | MDU6SXNzdWU2NDUwMjU1NjE= | 312 | [Feature request] Add `shard()` method to dataset | {
"login": "jarednielsen",
"id": 4564897,
"node_id": "MDQ6VXNlcjQ1NjQ4OTc=",
"avatar_url": "https://avatars.githubusercontent.com/u/4564897?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jarednielsen",
"html_url": "https://github.com/jarednielsen",
"followers_url": "https://api.github.com/users/jarednielsen/followers",
"following_url": "https://api.github.com/users/jarednielsen/following{/other_user}",
"gists_url": "https://api.github.com/users/jarednielsen/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jarednielsen/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jarednielsen/subscriptions",
"organizations_url": "https://api.github.com/users/jarednielsen/orgs",
"repos_url": "https://api.github.com/users/jarednielsen/repos",
"events_url": "https://api.github.com/users/jarednielsen/events{/privacy}",
"received_events_url": "https://api.github.com/users/jarednielsen/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-06-24T22:48:33 | 2020-07-06T12:35:36 | 2020-07-06T12:35:36 | CONTRIBUTOR | null | null | null | Currently, to shard a dataset into 10 pieces on different ranks, you can run
```python
rank = 3 # for example
size = 10
dataset = nlp.load_dataset('wikitext', 'wikitext-2-raw-v1', split=f"train[{rank*10}%:{(rank+1)*10}%]")
```
However, this breaks down if you have a number of ranks that doesn't divide cleanly into 100, such as 64 ranks. Is there interest in adding a method shard() that looks like this?
```python
rank = 3
size = 64
dataset = nlp.load_dataset("wikitext", "wikitext-2-raw-v1", split="train").shard(rank=rank, size=size)
```
TensorFlow has a similar API: https://www.tensorflow.org/api_docs/python/tf/data/Dataset#shard. I'd be happy to contribute this code. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/312/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/312/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/311 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/311/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/311/comments | https://api.github.com/repos/huggingface/datasets/issues/311/events | https://github.com/huggingface/datasets/pull/311 | 645,013,131 | MDExOlB1bGxSZXF1ZXN0NDM5NTQ3OTg0 | 311 | Add qa_zre | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-24T22:17:22 | 2020-06-29T16:37:38 | 2020-06-29T16:37:38 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/311",
"html_url": "https://github.com/huggingface/datasets/pull/311",
"diff_url": "https://github.com/huggingface/datasets/pull/311.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/311.patch",
"merged_at": "2020-06-29T16:37:38"
} | Adding the QA-ZRE dataset from ["Zero-Shot Relation Extraction via Reading Comprehension"](http://nlp.cs.washington.edu/zeroshot/).
A common processing step seems to be replacing the `XXX` placeholder with the `subject`. I've left this out as it's something you could easily do with `map`. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/311/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/311/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/310 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/310/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/310/comments | https://api.github.com/repos/huggingface/datasets/issues/310/events | https://github.com/huggingface/datasets/pull/310 | 644,806,720 | MDExOlB1bGxSZXF1ZXN0NDM5MzY1MDg5 | 310 | add wikisql | {
"login": "ghomasHudson",
"id": 13795113,
"node_id": "MDQ6VXNlcjEzNzk1MTEz",
"avatar_url": "https://avatars.githubusercontent.com/u/13795113?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/ghomasHudson",
"html_url": "https://github.com/ghomasHudson",
"followers_url": "https://api.github.com/users/ghomasHudson/followers",
"following_url": "https://api.github.com/users/ghomasHudson/following{/other_user}",
"gists_url": "https://api.github.com/users/ghomasHudson/gists{/gist_id}",
"starred_url": "https://api.github.com/users/ghomasHudson/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/ghomasHudson/subscriptions",
"organizations_url": "https://api.github.com/users/ghomasHudson/orgs",
"repos_url": "https://api.github.com/users/ghomasHudson/repos",
"events_url": "https://api.github.com/users/ghomasHudson/events{/privacy}",
"received_events_url": "https://api.github.com/users/ghomasHudson/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-24T18:00:35 | 2020-06-25T12:32:25 | 2020-06-25T12:32:25 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/310",
"html_url": "https://github.com/huggingface/datasets/pull/310",
"diff_url": "https://github.com/huggingface/datasets/pull/310.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/310.patch",
"merged_at": "2020-06-25T12:32:25"
} | Adding the [WikiSQL](https://github.com/salesforce/WikiSQL) dataset.
Interesting things to note:
- Have copied the function (`_convert_to_human_readable`) which converts the SQL query to a human-readable (string) format as this is what most people will want when actually using this dataset for NLP applications.
- `conds` was originally a tuple but is converted to a dictionary to support differing types.
Would be nice to add the logical_form metrics too at some point. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/310/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/310/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/309 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/309/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/309/comments | https://api.github.com/repos/huggingface/datasets/issues/309/events | https://github.com/huggingface/datasets/pull/309 | 644,783,822 | MDExOlB1bGxSZXF1ZXN0NDM5MzQ1NzYz | 309 | Add narrative qa | {
"login": "Varal7",
"id": 8019486,
"node_id": "MDQ6VXNlcjgwMTk0ODY=",
"avatar_url": "https://avatars.githubusercontent.com/u/8019486?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Varal7",
"html_url": "https://github.com/Varal7",
"followers_url": "https://api.github.com/users/Varal7/followers",
"following_url": "https://api.github.com/users/Varal7/following{/other_user}",
"gists_url": "https://api.github.com/users/Varal7/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Varal7/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Varal7/subscriptions",
"organizations_url": "https://api.github.com/users/Varal7/orgs",
"repos_url": "https://api.github.com/users/Varal7/repos",
"events_url": "https://api.github.com/users/Varal7/events{/privacy}",
"received_events_url": "https://api.github.com/users/Varal7/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 11 | 2020-06-24T17:26:18 | 2020-09-03T09:02:10 | 2020-09-03T09:02:09 | NONE | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/309",
"html_url": "https://github.com/huggingface/datasets/pull/309",
"diff_url": "https://github.com/huggingface/datasets/pull/309.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/309.patch",
"merged_at": null
} | Test cases for dummy data don't pass
Only contains data for summaries (not whole story) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/309/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/309/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/308 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/308/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/308/comments | https://api.github.com/repos/huggingface/datasets/issues/308/events | https://github.com/huggingface/datasets/pull/308 | 644,195,251 | MDExOlB1bGxSZXF1ZXN0NDM4ODYyMzYy | 308 | Specify utf-8 encoding for MRPC files | {
"login": "patpizio",
"id": 15801338,
"node_id": "MDQ6VXNlcjE1ODAxMzM4",
"avatar_url": "https://avatars.githubusercontent.com/u/15801338?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patpizio",
"html_url": "https://github.com/patpizio",
"followers_url": "https://api.github.com/users/patpizio/followers",
"following_url": "https://api.github.com/users/patpizio/following{/other_user}",
"gists_url": "https://api.github.com/users/patpizio/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patpizio/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patpizio/subscriptions",
"organizations_url": "https://api.github.com/users/patpizio/orgs",
"repos_url": "https://api.github.com/users/patpizio/repos",
"events_url": "https://api.github.com/users/patpizio/events{/privacy}",
"received_events_url": "https://api.github.com/users/patpizio/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-23T22:44:36 | 2020-06-25T12:52:21 | 2020-06-25T12:16:10 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/308",
"html_url": "https://github.com/huggingface/datasets/pull/308",
"diff_url": "https://github.com/huggingface/datasets/pull/308.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/308.patch",
"merged_at": "2020-06-25T12:16:09"
} | Fixes #307, again probably a Windows-related issue. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/308/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/308/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/307 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/307/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/307/comments | https://api.github.com/repos/huggingface/datasets/issues/307/events | https://github.com/huggingface/datasets/issues/307 | 644,187,262 | MDU6SXNzdWU2NDQxODcyNjI= | 307 | Specify encoding for MRPC | {
"login": "patpizio",
"id": 15801338,
"node_id": "MDQ6VXNlcjE1ODAxMzM4",
"avatar_url": "https://avatars.githubusercontent.com/u/15801338?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patpizio",
"html_url": "https://github.com/patpizio",
"followers_url": "https://api.github.com/users/patpizio/followers",
"following_url": "https://api.github.com/users/patpizio/following{/other_user}",
"gists_url": "https://api.github.com/users/patpizio/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patpizio/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patpizio/subscriptions",
"organizations_url": "https://api.github.com/users/patpizio/orgs",
"repos_url": "https://api.github.com/users/patpizio/repos",
"events_url": "https://api.github.com/users/patpizio/events{/privacy}",
"received_events_url": "https://api.github.com/users/patpizio/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-23T22:24:49 | 2020-06-25T12:16:09 | 2020-06-25T12:16:09 | CONTRIBUTOR | null | null | null | Same as #242, but with MRPC: on Windows, I get a `UnicodeDecodeError` when I try to download the dataset:
```python
dataset = nlp.load_dataset('glue', 'mrpc')
```
```python
Downloading and preparing dataset glue/mrpc (download: Unknown size, generated: Unknown size, total: Unknown size) to C:\Users\Python\.cache\huggingface\datasets\glue\mrpc\1.0.0...
---------------------------------------------------------------------------
UnicodeDecodeError Traceback (most recent call last)
~\Miniconda3\envs\nlp\lib\site-packages\nlp\builder.py in incomplete_dir(dirname)
369 try:
--> 370 yield tmp_dir
371 if os.path.isdir(dirname):
~\Miniconda3\envs\nlp\lib\site-packages\nlp\builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, save_infos, try_from_hf_gcs, dl_manager, **download_and_prepare_kwargs)
430 verify_infos = not save_infos and not ignore_verifications
--> 431 self._download_and_prepare(
432 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
~\Miniconda3\envs\nlp\lib\site-packages\nlp\builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)
482 # Prepare split will record examples associated to the split
--> 483 self._prepare_split(split_generator, **prepare_split_kwargs)
484 except OSError:
~\Miniconda3\envs\nlp\lib\site-packages\nlp\builder.py in _prepare_split(self, split_generator)
663 generator = self._generate_examples(**split_generator.gen_kwargs)
--> 664 for key, record in utils.tqdm(generator, unit=" examples", total=split_info.num_examples, leave=False):
665 example = self.info.features.encode_example(record)
~\Miniconda3\envs\nlp\lib\site-packages\tqdm\notebook.py in __iter__(self, *args, **kwargs)
217 try:
--> 218 for obj in super(tqdm_notebook, self).__iter__(*args, **kwargs):
219 # return super(tqdm...) will not catch exception
~\Miniconda3\envs\nlp\lib\site-packages\tqdm\std.py in __iter__(self)
1128 try:
-> 1129 for obj in iterable:
1130 yield obj
~\Miniconda3\envs\nlp\lib\site-packages\nlp\datasets\glue\7fc58099eb3983a04c8dac8500b70d27e6eceae63ffb40d7900c977897bb58c6\glue.py in _generate_examples(self, data_file, split, mrpc_files)
514 examples = self._generate_example_mrpc_files(mrpc_files=mrpc_files, split=split)
--> 515 for example in examples:
516 yield example["idx"], example
~\Miniconda3\envs\nlp\lib\site-packages\nlp\datasets\glue\7fc58099eb3983a04c8dac8500b70d27e6eceae63ffb40d7900c977897bb58c6\glue.py in _generate_example_mrpc_files(self, mrpc_files, split)
576 reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
--> 577 for n, row in enumerate(reader):
578 is_row_in_dev = [row["#1 ID"], row["#2 ID"]] in dev_ids
~\Miniconda3\envs\nlp\lib\csv.py in __next__(self)
110 self.fieldnames
--> 111 row = next(self.reader)
112 self.line_num = self.reader.line_num
~\Miniconda3\envs\nlp\lib\encodings\cp1252.py in decode(self, input, final)
22 def decode(self, input, final=False):
---> 23 return codecs.charmap_decode(input,self.errors,decoding_table)[0]
24
UnicodeDecodeError: 'charmap' codec can't decode byte 0x9d in position 1180: character maps to <undefined>
```
The fix is the same: specify `utf-8` encoding when opening the file. The previous fix didn't work as MRPC's download process is different from the others in GLUE.
I am going to propose a new PR :) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/307/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/307/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/306 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/306/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/306/comments | https://api.github.com/repos/huggingface/datasets/issues/306/events | https://github.com/huggingface/datasets/pull/306 | 644,176,078 | MDExOlB1bGxSZXF1ZXN0NDM4ODQ2MTI3 | 306 | add pg19 dataset | {
"login": "lucidrains",
"id": 108653,
"node_id": "MDQ6VXNlcjEwODY1Mw==",
"avatar_url": "https://avatars.githubusercontent.com/u/108653?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lucidrains",
"html_url": "https://github.com/lucidrains",
"followers_url": "https://api.github.com/users/lucidrains/followers",
"following_url": "https://api.github.com/users/lucidrains/following{/other_user}",
"gists_url": "https://api.github.com/users/lucidrains/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lucidrains/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lucidrains/subscriptions",
"organizations_url": "https://api.github.com/users/lucidrains/orgs",
"repos_url": "https://api.github.com/users/lucidrains/repos",
"events_url": "https://api.github.com/users/lucidrains/events{/privacy}",
"received_events_url": "https://api.github.com/users/lucidrains/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 12 | 2020-06-23T22:03:52 | 2020-07-06T07:55:59 | 2020-07-06T07:55:59 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/306",
"html_url": "https://github.com/huggingface/datasets/pull/306",
"diff_url": "https://github.com/huggingface/datasets/pull/306.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/306.patch",
"merged_at": "2020-07-06T07:55:59"
} | https://github.com/huggingface/nlp/issues/274
Add functioning PG19 dataset with dummy data
`cos_e.py` was just auto-linted by `make style` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/306/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/306/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/305 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/305/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/305/comments | https://api.github.com/repos/huggingface/datasets/issues/305/events | https://github.com/huggingface/datasets/issues/305 | 644,148,149 | MDU6SXNzdWU2NDQxNDgxNDk= | 305 | Importing downloaded package repository fails | {
"login": "yjernite",
"id": 10469459,
"node_id": "MDQ6VXNlcjEwNDY5NDU5",
"avatar_url": "https://avatars.githubusercontent.com/u/10469459?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/yjernite",
"html_url": "https://github.com/yjernite",
"followers_url": "https://api.github.com/users/yjernite/followers",
"following_url": "https://api.github.com/users/yjernite/following{/other_user}",
"gists_url": "https://api.github.com/users/yjernite/gists{/gist_id}",
"starred_url": "https://api.github.com/users/yjernite/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/yjernite/subscriptions",
"organizations_url": "https://api.github.com/users/yjernite/orgs",
"repos_url": "https://api.github.com/users/yjernite/repos",
"events_url": "https://api.github.com/users/yjernite/events{/privacy}",
"received_events_url": "https://api.github.com/users/yjernite/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067393914,
"node_id": "MDU6TGFiZWwyMDY3MzkzOTE0",
"url": "https://api.github.com/repos/huggingface/datasets/labels/metric%20bug",
"name": "metric bug",
"color": "25b21e",
"default": false,
"description": "A bug in a metric script"
}
] | closed | false | null | [] | null | 0 | 2020-06-23T21:09:05 | 2020-07-30T16:44:23 | 2020-07-30T16:44:23 | MEMBER | null | null | null | The `get_imports` function in `src/nlp/load.py` has a feature to download a package as a zip archive of the github repository and import functions from the unpacked directory. This is used for example in the `metrics/coval.py` file, and would be useful to add BLEURT (@ankparikh).
Currently however, the code seems to have trouble with imports within the package. For example:
```
import nlp
coval = nlp.load_metric('coval')
```
yields:
```
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/yacine/Code/nlp/src/nlp/load.py", line 432, in load_metric
metric_cls = import_main_class(module_path, dataset=False)
File "/home/yacine/Code/nlp/src/nlp/load.py", line 57, in import_main_class
module = importlib.import_module(module_path)
File "/home/yacine/anaconda3/lib/python3.7/importlib/__init__.py", line 127, in import_module
return _bootstrap._gcd_import(name[level:], package, level)
File "<frozen importlib._bootstrap>", line 1006, in _gcd_import
File "<frozen importlib._bootstrap>", line 983, in _find_and_load
File "<frozen importlib._bootstrap>", line 967, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 677, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 728, in exec_module
File "<frozen importlib._bootstrap>", line 219, in _call_with_frames_removed
File "/home/yacine/Code/nlp/src/nlp/metrics/coval/a78807df33ac45edbb71799caf2b3b47e55df4fd690267808fe963a5e8b30952/coval.py", line 21, in <module>
from .coval_backend.conll import reader # From: https://github.com/ns-moosavi/coval
File "/home/yacine/Code/nlp/src/nlp/metrics/coval/a78807df33ac45edbb71799caf2b3b47e55df4fd690267808fe963a5e8b30952/coval_backend/conll/reader.py", line 2, in <module>
from conll import mention
ModuleNotFoundError: No module named 'conll'
```
Not sure what the fix would be there. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/305/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 1,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/305/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/304 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/304/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/304/comments | https://api.github.com/repos/huggingface/datasets/issues/304/events | https://github.com/huggingface/datasets/issues/304 | 644,091,970 | MDU6SXNzdWU2NDQwOTE5NzA= | 304 | Problem while printing doc string when instantiating multiple metrics. | {
"login": "codehunk628",
"id": 51091425,
"node_id": "MDQ6VXNlcjUxMDkxNDI1",
"avatar_url": "https://avatars.githubusercontent.com/u/51091425?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/codehunk628",
"html_url": "https://github.com/codehunk628",
"followers_url": "https://api.github.com/users/codehunk628/followers",
"following_url": "https://api.github.com/users/codehunk628/following{/other_user}",
"gists_url": "https://api.github.com/users/codehunk628/gists{/gist_id}",
"starred_url": "https://api.github.com/users/codehunk628/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/codehunk628/subscriptions",
"organizations_url": "https://api.github.com/users/codehunk628/orgs",
"repos_url": "https://api.github.com/users/codehunk628/repos",
"events_url": "https://api.github.com/users/codehunk628/events{/privacy}",
"received_events_url": "https://api.github.com/users/codehunk628/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067393914,
"node_id": "MDU6TGFiZWwyMDY3MzkzOTE0",
"url": "https://api.github.com/repos/huggingface/datasets/labels/metric%20bug",
"name": "metric bug",
"color": "25b21e",
"default": false,
"description": "A bug in a metric script"
}
] | closed | false | null | [] | null | 0 | 2020-06-23T19:32:05 | 2020-07-22T09:50:58 | 2020-07-22T09:50:58 | CONTRIBUTOR | null | null | null | When I load more than one metric and try to print doc string of a particular metric,. It shows the doc strings of all imported metric one after the other which looks quite confusing and clumsy.
Attached [Colab](https://colab.research.google.com/drive/13H0ZgyQ2se0mqJ2yyew0bNEgJuHaJ8H3?usp=sharing) Notebook for problem clarification.. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/304/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/304/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/303 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/303/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/303/comments | https://api.github.com/repos/huggingface/datasets/issues/303/events | https://github.com/huggingface/datasets/pull/303 | 643,912,464 | MDExOlB1bGxSZXF1ZXN0NDM4NjI3Nzcw | 303 | allow to move files across file systems | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-23T14:56:08 | 2020-06-23T15:08:44 | 2020-06-23T15:08:43 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/303",
"html_url": "https://github.com/huggingface/datasets/pull/303",
"diff_url": "https://github.com/huggingface/datasets/pull/303.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/303.patch",
"merged_at": "2020-06-23T15:08:43"
} | Users are allowed to use the `cache_dir` that they want.
Therefore it can happen that we try to move files across filesystems.
We were using `os.rename` that doesn't allow that, so I changed some of them to `shutil.move`.
This should fix #301 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/303/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/303/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/302 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/302/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/302/comments | https://api.github.com/repos/huggingface/datasets/issues/302/events | https://github.com/huggingface/datasets/issues/302 | 643,910,418 | MDU6SXNzdWU2NDM5MTA0MTg= | 302 | Question - Sign Language Datasets | {
"login": "AmitMY",
"id": 5757359,
"node_id": "MDQ6VXNlcjU3NTczNTk=",
"avatar_url": "https://avatars.githubusercontent.com/u/5757359?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/AmitMY",
"html_url": "https://github.com/AmitMY",
"followers_url": "https://api.github.com/users/AmitMY/followers",
"following_url": "https://api.github.com/users/AmitMY/following{/other_user}",
"gists_url": "https://api.github.com/users/AmitMY/gists{/gist_id}",
"starred_url": "https://api.github.com/users/AmitMY/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/AmitMY/subscriptions",
"organizations_url": "https://api.github.com/users/AmitMY/orgs",
"repos_url": "https://api.github.com/users/AmitMY/repos",
"events_url": "https://api.github.com/users/AmitMY/events{/privacy}",
"received_events_url": "https://api.github.com/users/AmitMY/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 1935892871,
"node_id": "MDU6TGFiZWwxOTM1ODkyODcx",
"url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement",
"name": "enhancement",
"color": "a2eeef",
"default": true,
"description": "New feature or request"
},
{
"id": 2067400324,
"node_id": "MDU6TGFiZWwyMDY3NDAwMzI0",
"url": "https://api.github.com/repos/huggingface/datasets/labels/generic%20discussion",
"name": "generic discussion",
"color": "c5def5",
"default": false,
"description": "Generic discussion on the library"
}
] | closed | false | null | [] | null | 3 | 2020-06-23T14:53:40 | 2020-11-25T11:25:33 | 2020-11-25T11:25:33 | CONTRIBUTOR | null | null | null | An emerging field in NLP is SLP - sign language processing.
I was wondering about adding datasets here, specifically because it's shaping up to be large and easily usable.
The metrics for sign language to text translation are the same.
So, what do you think about (me, or others) adding datasets here?
An example dataset would be [RWTH-PHOENIX-Weather 2014 T](https://www-i6.informatik.rwth-aachen.de/~koller/RWTH-PHOENIX-2014-T/)
For every item in the dataset, the data object includes:
1. video_path - path to mp4 file
2. pose_path - a path to `.pose` file with human pose landmarks
3. openpose_path - a path to a `.json` file with human pose landmarks
4. gloss - string
5. text - string
6. video_metadata - height, width, frames, framerate
------
To make it a tad more complicated - what if sign language libraries add requirements to `nlp`? for example, sign language is commonly annotated using `ilex`, `eaf`, or `srt` files, which are all loadable as text, but there is no reason for the dataset to parse that file by itself, if libraries exist to do so. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/302/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/302/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/301 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/301/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/301/comments | https://api.github.com/repos/huggingface/datasets/issues/301/events | https://github.com/huggingface/datasets/issues/301 | 643,763,525 | MDU6SXNzdWU2NDM3NjM1MjU= | 301 | Setting cache_dir gives error on wikipedia download | {
"login": "hallvagi",
"id": 33862536,
"node_id": "MDQ6VXNlcjMzODYyNTM2",
"avatar_url": "https://avatars.githubusercontent.com/u/33862536?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/hallvagi",
"html_url": "https://github.com/hallvagi",
"followers_url": "https://api.github.com/users/hallvagi/followers",
"following_url": "https://api.github.com/users/hallvagi/following{/other_user}",
"gists_url": "https://api.github.com/users/hallvagi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/hallvagi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/hallvagi/subscriptions",
"organizations_url": "https://api.github.com/users/hallvagi/orgs",
"repos_url": "https://api.github.com/users/hallvagi/repos",
"events_url": "https://api.github.com/users/hallvagi/events{/privacy}",
"received_events_url": "https://api.github.com/users/hallvagi/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-06-23T11:31:44 | 2020-06-24T07:05:07 | 2020-06-24T07:05:07 | NONE | null | null | null | First of all thank you for a super handy library! I'd like to download large files to a specific drive so I set `cache_dir=my_path`. This works fine with e.g. imdb and squad. But on wikipedia I get an error:
```
nlp.load_dataset('wikipedia', '20200501.de', split = 'train', cache_dir=my_path)
```
```
OSError Traceback (most recent call last)
<ipython-input-2-23551344d7bc> in <module>
1 import nlp
----> 2 nlp.load_dataset('wikipedia', '20200501.de', split = 'train', cache_dir=path)
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/nlp/load.py in load_dataset(path, name, version, data_dir, data_files, split, cache_dir, download_config, download_mode, ignore_verifications, save_infos, **config_kwargs)
522 download_mode=download_mode,
523 ignore_verifications=ignore_verifications,
--> 524 save_infos=save_infos,
525 )
526
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/nlp/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, save_infos, try_from_hf_gcs, dl_manager, **download_and_prepare_kwargs)
385 with utils.temporary_assignment(self, "_cache_dir", tmp_data_dir):
386 reader = ArrowReader(self._cache_dir, self.info)
--> 387 reader.download_from_hf_gcs(self._cache_dir, self._relative_data_dir(with_version=True))
388 downloaded_info = DatasetInfo.from_directory(self._cache_dir)
389 self.info.update(downloaded_info)
~/anaconda3/envs/fastai2/lib/python3.7/site-packages/nlp/arrow_reader.py in download_from_hf_gcs(self, cache_dir, relative_data_dir)
231 remote_dataset_info = os.path.join(remote_cache_dir, "dataset_info.json")
232 downloaded_dataset_info = cached_path(remote_dataset_info)
--> 233 os.rename(downloaded_dataset_info, os.path.join(cache_dir, "dataset_info.json"))
234 if self._info is not None:
235 self._info.update(self._info.from_directory(cache_dir))
OSError: [Errno 18] Invalid cross-device link: '/home/local/NTU/nn/.cache/huggingface/datasets/025fa4fd4f04aaafc9e939260fbc8f0bb190ce14c61310c8ae1ddd1dcb31f88c.9637f367b6711a79ca478be55fe6989b8aea4941b7ef7adc67b89ff403020947' -> '/data/nn/nlp/wikipedia/20200501.de/1.0.0.incomplete/dataset_info.json'
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/301/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/301/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/300 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/300/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/300/comments | https://api.github.com/repos/huggingface/datasets/issues/300/events | https://github.com/huggingface/datasets/pull/300 | 643,688,304 | MDExOlB1bGxSZXF1ZXN0NDM4NDQ4Mjk1 | 300 | Fix bertscore references | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-23T09:38:59 | 2020-06-23T14:47:38 | 2020-06-23T14:47:37 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/300",
"html_url": "https://github.com/huggingface/datasets/pull/300",
"diff_url": "https://github.com/huggingface/datasets/pull/300.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/300.patch",
"merged_at": "2020-06-23T14:47:36"
} | I added some type checking for metrics. There was an issue where a metric could interpret a string a a list. A `ValueError` is raised if a string is given instead of a list.
Moreover I added support for both strings and lists of strings for `references` in `bertscore`, as it is the case in the original code.
Both ways work:
```
import nlp
scorer = nlp.load_metric("bertscore")
with open("pred.txt") as p, open("ref.txt") as g:
for lp, lg in zip(p, g):
scorer.add(lp, [lg])
score = scorer.compute(lang="en")
```
```
import nlp
scorer = nlp.load_metric("bertscore")
with open("pred.txt") as p, open("ref.txt") as g:
for lp, lg in zip(p, g):
scorer.add(lp, lg)
score = scorer.compute(lang="en")
```
This should fix #295 and #238 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/300/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/300/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/299 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/299/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/299/comments | https://api.github.com/repos/huggingface/datasets/issues/299/events | https://github.com/huggingface/datasets/pull/299 | 643,611,557 | MDExOlB1bGxSZXF1ZXN0NDM4Mzg0NDgw | 299 | remove some print in snli file | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-23T07:46:06 | 2020-06-23T08:10:46 | 2020-06-23T08:10:44 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/299",
"html_url": "https://github.com/huggingface/datasets/pull/299",
"diff_url": "https://github.com/huggingface/datasets/pull/299.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/299.patch",
"merged_at": "2020-06-23T08:10:44"
} | This PR removes unwanted `print` statements in some files such as `snli.py` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/299/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/299/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/298 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/298/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/298/comments | https://api.github.com/repos/huggingface/datasets/issues/298/events | https://github.com/huggingface/datasets/pull/298 | 643,603,804 | MDExOlB1bGxSZXF1ZXN0NDM4Mzc4MDM4 | 298 | Add searchable datasets | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 8 | 2020-06-23T07:33:03 | 2020-06-26T07:50:44 | 2020-06-26T07:50:43 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/298",
"html_url": "https://github.com/huggingface/datasets/pull/298",
"diff_url": "https://github.com/huggingface/datasets/pull/298.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/298.patch",
"merged_at": "2020-06-26T07:50:43"
} | # Better support for Numpy format + Add Indexed Datasets
I was working on adding Indexed Datasets but in the meantime I had to also add more support for Numpy arrays in the lib.
## Better support for Numpy format
New features:
- New fast method to convert Numpy arrays from Arrow structure (up to x100 speed up) using Pandas.
- Allow to output Numpy arrays in batched `.map`, which was the only missing part to fully support Numpy arrays.
Pandas offers fast zero-copy Numpy arrays conversion from Arrow structures.
Using it we can speed up the reading of memory-mapped Numpy array stored in Arrow format.
With these changes you can easily compute embeddings of texts using `.map()`. For example:
```python
def embed(text):
tokenized_example = tokenizer.encode(text, return_tensors="pt")
embeddings = bert_encoder(tokenized_examples).numpy()
return embeddings
dset_with_embeddings = dset.map(lambda example: {"embeddings": embed(example["text])})
```
And then reading the embeddings from the arrow format is be very fast.
PS1: Note that right now only 1d arrays are supported.
PS2: It seems possible to do without pandas but it will require more _trickery_.
PS3: I did a simple benchmark with google colab that you can view here:
https://colab.research.google.com/drive/1QlLTR6LRwYOKGJ-hTHmHyolE3wJzvfFg?usp=sharing
## Add Indexed Datasets
For many retrieval tasks it is convenient to index a dataset to be able to run fast queries.
For example for models like DPR, REALM, RAG etc. that are models for Open Domain QA, the retrieval step is very important.
Therefore I added two ways to add an index to a column of a dataset:
1) You can index it using a Dense Index like Faiss. It is used to index vectors.
Faiss is a library for efficient similarity search and clustering of dense vectors.
It contains algorithms that search in sets of vectors of any size, up to ones that possibly do not fit in RAM.
2) You can index it using a Sparse Index like Elasticsearch. It is used to index text and run queries based on BM25 similarity.
Example of usage:
```python
ds = nlp.load_dataset('crime_and_punish', split='train')
ds_with_embeddings = ds.map(lambda example: {'embeddings': embed(example['line']})) # `embed` outputs a `np.array`
ds_with_embeddings.add_vector_index(column='embeddings')
scores, retrieved_examples = ds_with_embeddings.get_nearest(column='embeddings', query=embed('my new query'), k=10)
```
```python
ds = nlp.load_dataset('crime_and_punish', split='train')
es_client = elasticsearch.Elasticsearch()
ds.add_text_index(column='line', es_client=es_client, index_name="my_es_index")
scores, retrieved_examples = ds.get_nearest(column='line', query='my new query', k=10)
```
PS4: Faiss allows to specify many options for the [index](https://github.com/facebookresearch/faiss/wiki/The-index-factory) and for [GPU settings](https://github.com/facebookresearch/faiss/wiki/Faiss-on-the-GPU). I made sure that the user has full control over those settings.
## Tests
I added tests for Faiss, Elasticsearch and indexed datasets.
I had to edit the CI config because all the test scripts were not being run by CircleCI.
------------------
I'd be really happy to have some feedbacks :) | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/298/reactions",
"total_count": 1,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 1,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/298/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/297 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/297/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/297/comments | https://api.github.com/repos/huggingface/datasets/issues/297/events | https://github.com/huggingface/datasets/issues/297 | 643,444,625 | MDU6SXNzdWU2NDM0NDQ2MjU= | 297 | Error in Demo for Specific Datasets | {
"login": "s-jse",
"id": 60150701,
"node_id": "MDQ6VXNlcjYwMTUwNzAx",
"avatar_url": "https://avatars.githubusercontent.com/u/60150701?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/s-jse",
"html_url": "https://github.com/s-jse",
"followers_url": "https://api.github.com/users/s-jse/followers",
"following_url": "https://api.github.com/users/s-jse/following{/other_user}",
"gists_url": "https://api.github.com/users/s-jse/gists{/gist_id}",
"starred_url": "https://api.github.com/users/s-jse/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/s-jse/subscriptions",
"organizations_url": "https://api.github.com/users/s-jse/orgs",
"repos_url": "https://api.github.com/users/s-jse/repos",
"events_url": "https://api.github.com/users/s-jse/events{/privacy}",
"received_events_url": "https://api.github.com/users/s-jse/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2107841032,
"node_id": "MDU6TGFiZWwyMTA3ODQxMDMy",
"url": "https://api.github.com/repos/huggingface/datasets/labels/nlp-viewer",
"name": "nlp-viewer",
"color": "94203D",
"default": false,
"description": ""
}
] | closed | false | null | [] | null | 3 | 2020-06-23T00:38:42 | 2020-07-17T17:43:06 | 2020-07-17T17:43:06 | NONE | null | null | null | Selecting `natural_questions` or `newsroom` dataset in the online demo results in an error similar to the following.
![image](https://user-images.githubusercontent.com/60150701/85347842-ac861900-b4ae-11ea-98c4-a53a00934783.png)
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/297/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/297/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/296 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/296/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/296/comments | https://api.github.com/repos/huggingface/datasets/issues/296/events | https://github.com/huggingface/datasets/issues/296 | 643,423,717 | MDU6SXNzdWU2NDM0MjM3MTc= | 296 | snli -1 labels | {
"login": "jxmorris12",
"id": 13238952,
"node_id": "MDQ6VXNlcjEzMjM4OTUy",
"avatar_url": "https://avatars.githubusercontent.com/u/13238952?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jxmorris12",
"html_url": "https://github.com/jxmorris12",
"followers_url": "https://api.github.com/users/jxmorris12/followers",
"following_url": "https://api.github.com/users/jxmorris12/following{/other_user}",
"gists_url": "https://api.github.com/users/jxmorris12/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jxmorris12/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jxmorris12/subscriptions",
"organizations_url": "https://api.github.com/users/jxmorris12/orgs",
"repos_url": "https://api.github.com/users/jxmorris12/repos",
"events_url": "https://api.github.com/users/jxmorris12/events{/privacy}",
"received_events_url": "https://api.github.com/users/jxmorris12/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 4 | 2020-06-22T23:33:30 | 2020-06-23T14:41:59 | 2020-06-23T14:41:58 | CONTRIBUTOR | null | null | null | I'm trying to train a model on the SNLI dataset. Why does it have so many -1 labels?
```
import nlp
from collections import Counter
data = nlp.load_dataset('snli')['train']
print(Counter(data['label']))
Counter({0: 183416, 2: 183187, 1: 182764, -1: 785})
```
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/296/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/296/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/295 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/295/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/295/comments | https://api.github.com/repos/huggingface/datasets/issues/295/events | https://github.com/huggingface/datasets/issues/295 | 643,245,412 | MDU6SXNzdWU2NDMyNDU0MTI= | 295 | Improve input warning for evaluation metrics | {
"login": "Tiiiger",
"id": 19514537,
"node_id": "MDQ6VXNlcjE5NTE0NTM3",
"avatar_url": "https://avatars.githubusercontent.com/u/19514537?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/Tiiiger",
"html_url": "https://github.com/Tiiiger",
"followers_url": "https://api.github.com/users/Tiiiger/followers",
"following_url": "https://api.github.com/users/Tiiiger/following{/other_user}",
"gists_url": "https://api.github.com/users/Tiiiger/gists{/gist_id}",
"starred_url": "https://api.github.com/users/Tiiiger/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/Tiiiger/subscriptions",
"organizations_url": "https://api.github.com/users/Tiiiger/orgs",
"repos_url": "https://api.github.com/users/Tiiiger/repos",
"events_url": "https://api.github.com/users/Tiiiger/events{/privacy}",
"received_events_url": "https://api.github.com/users/Tiiiger/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-22T17:28:57 | 2020-06-23T14:47:37 | 2020-06-23T14:47:37 | NONE | null | null | null | Hi,
I am the author of `bert_score`. Recently, we received [ an issue ](https://github.com/Tiiiger/bert_score/issues/62) reporting a problem in using `bert_score` from the `nlp` package (also see #238 in this repo). After looking into this, I realized that the problem arises from the format `nlp.Metric` takes input.
Here is a minimal example:
```python
import nlp
scorer = nlp.load_metric("bertscore")
with open("pred.txt") as p, open("ref.txt") as g:
for lp, lg in zip(p, g):
scorer.add(lp, lg)
score = scorer.compute(lang="en")
```
The problem in the above code is that `scorer.add()` expects a list of strings as input for the references. As a result, the `scorer` here would take a list of characters in `lg` to be the references. The correct implementation would be calling
```python
scorer.add(lp, [lg])
```
I just want to raise this issue to you to prevent future user errors of a similar kind. I assume some simple type checking can prevent this from happening?
Thanks! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/295/reactions",
"total_count": 2,
"+1": 2,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/295/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/294 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/294/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/294/comments | https://api.github.com/repos/huggingface/datasets/issues/294/events | https://github.com/huggingface/datasets/issues/294 | 643,181,179 | MDU6SXNzdWU2NDMxODExNzk= | 294 | Cannot load arxiv dataset on MacOS? | {
"login": "JohnGiorgi",
"id": 8917831,
"node_id": "MDQ6VXNlcjg5MTc4MzE=",
"avatar_url": "https://avatars.githubusercontent.com/u/8917831?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/JohnGiorgi",
"html_url": "https://github.com/JohnGiorgi",
"followers_url": "https://api.github.com/users/JohnGiorgi/followers",
"following_url": "https://api.github.com/users/JohnGiorgi/following{/other_user}",
"gists_url": "https://api.github.com/users/JohnGiorgi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/JohnGiorgi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JohnGiorgi/subscriptions",
"organizations_url": "https://api.github.com/users/JohnGiorgi/orgs",
"repos_url": "https://api.github.com/users/JohnGiorgi/repos",
"events_url": "https://api.github.com/users/JohnGiorgi/events{/privacy}",
"received_events_url": "https://api.github.com/users/JohnGiorgi/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067388877,
"node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug",
"name": "dataset bug",
"color": "2edb81",
"default": false,
"description": "A bug in a dataset script provided in the library"
}
] | closed | false | null | [] | null | 4 | 2020-06-22T15:46:55 | 2020-06-30T15:25:10 | 2020-06-30T15:25:10 | NONE | null | null | null | I am having trouble loading the `"arxiv"` config from the `"scientific_papers"` dataset on MacOS. When I try loading the dataset with:
```python
arxiv = nlp.load_dataset("scientific_papers", "arxiv")
```
I get the following stack trace:
```bash
JSONDecodeError Traceback (most recent call last)
<ipython-input-2-8e00c55d5a59> in <module>
----> 1 arxiv = nlp.load_dataset("scientific_papers", "arxiv")
~/miniconda3/envs/t2t/lib/python3.7/site-packages/nlp/load.py in load_dataset(path, name, version, data_dir, data_files, split, cache_dir, download_config, download_mode, ignore_verifications, save_infos, **config_kwargs)
522 download_mode=download_mode,
523 ignore_verifications=ignore_verifications,
--> 524 save_infos=save_infos,
525 )
526
~/miniconda3/envs/t2t/lib/python3.7/site-packages/nlp/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, save_infos, try_from_hf_gcs, dl_manager, **download_and_prepare_kwargs)
430 verify_infos = not save_infos and not ignore_verifications
431 self._download_and_prepare(
--> 432 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
433 )
434 # Sync info
~/miniconda3/envs/t2t/lib/python3.7/site-packages/nlp/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)
481 try:
482 # Prepare split will record examples associated to the split
--> 483 self._prepare_split(split_generator, **prepare_split_kwargs)
484 except OSError:
485 raise OSError("Cannot find data file. " + (self.manual_download_instructions or ""))
~/miniconda3/envs/t2t/lib/python3.7/site-packages/nlp/builder.py in _prepare_split(self, split_generator)
662
663 generator = self._generate_examples(**split_generator.gen_kwargs)
--> 664 for key, record in utils.tqdm(generator, unit=" examples", total=split_info.num_examples, leave=False):
665 example = self.info.features.encode_example(record)
666 writer.write(example)
~/miniconda3/envs/t2t/lib/python3.7/site-packages/tqdm/std.py in __iter__(self)
1106 fp_write=getattr(self.fp, 'write', sys.stderr.write))
1107
-> 1108 for obj in iterable:
1109 yield obj
1110 # Update and possibly print the progressbar.
~/miniconda3/envs/t2t/lib/python3.7/site-packages/nlp/datasets/scientific_papers/107a416c0e1958cb846f5934b5aae292f7884a5b27e86af3f3ef1a093e058bbc/scientific_papers.py in _generate_examples(self, path)
114 # "section_names": list[str], list of section names.
115 # "sections": list[list[str]], list of sections (list of paragraphs)
--> 116 d = json.loads(line)
117 summary = "\n".join(d["abstract_text"])
118 # In original paper, <S> and </S> are not used in vocab during training
~/miniconda3/envs/t2t/lib/python3.7/json/__init__.py in loads(s, encoding, cls, object_hook, parse_float, parse_int, parse_constant, object_pairs_hook, **kw)
346 parse_int is None and parse_float is None and
347 parse_constant is None and object_pairs_hook is None and not kw):
--> 348 return _default_decoder.decode(s)
349 if cls is None:
350 cls = JSONDecoder
~/miniconda3/envs/t2t/lib/python3.7/json/decoder.py in decode(self, s, _w)
335
336 """
--> 337 obj, end = self.raw_decode(s, idx=_w(s, 0).end())
338 end = _w(s, end).end()
339 if end != len(s):
~/miniconda3/envs/t2t/lib/python3.7/json/decoder.py in raw_decode(self, s, idx)
351 """
352 try:
--> 353 obj, end = self.scan_once(s, idx)
354 except StopIteration as err:
355 raise JSONDecodeError("Expecting value", s, err.value) from None
JSONDecodeError: Unterminated string starting at: line 1 column 46983 (char 46982)
163502 examples [02:10, 2710.68 examples/s]
```
I am not sure how to trace back to the specific JSON file that has the "Unterminated string". Also, I do not get this error on colab so I suspect it may be MacOS specific. Copy pasting the relevant lines from `transformers-cli env` below:
- Platform: Darwin-19.5.0-x86_64-i386-64bit
- Python version: 3.7.5
- PyTorch version (GPU?): 1.5.0 (False)
- Tensorflow version (GPU?): 2.2.0 (False)
Any ideas? | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/294/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/294/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/293 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/293/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/293/comments | https://api.github.com/repos/huggingface/datasets/issues/293/events | https://github.com/huggingface/datasets/pull/293 | 642,942,182 | MDExOlB1bGxSZXF1ZXN0NDM3ODM1ODI4 | 293 | Don't test community datasets | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-22T10:15:33 | 2020-06-22T11:07:00 | 2020-06-22T11:06:59 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/293",
"html_url": "https://github.com/huggingface/datasets/pull/293",
"diff_url": "https://github.com/huggingface/datasets/pull/293.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/293.patch",
"merged_at": "2020-06-22T11:06:59"
} | This PR disables testing for community datasets on aws.
It should fix the CI that is currently failing. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/293/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/293/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/292 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/292/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/292/comments | https://api.github.com/repos/huggingface/datasets/issues/292/events | https://github.com/huggingface/datasets/pull/292 | 642,897,797 | MDExOlB1bGxSZXF1ZXN0NDM3Nzk4NTM2 | 292 | Update metadata for x_stance dataset | {
"login": "jvamvas",
"id": 5830820,
"node_id": "MDQ6VXNlcjU4MzA4MjA=",
"avatar_url": "https://avatars.githubusercontent.com/u/5830820?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/jvamvas",
"html_url": "https://github.com/jvamvas",
"followers_url": "https://api.github.com/users/jvamvas/followers",
"following_url": "https://api.github.com/users/jvamvas/following{/other_user}",
"gists_url": "https://api.github.com/users/jvamvas/gists{/gist_id}",
"starred_url": "https://api.github.com/users/jvamvas/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/jvamvas/subscriptions",
"organizations_url": "https://api.github.com/users/jvamvas/orgs",
"repos_url": "https://api.github.com/users/jvamvas/repos",
"events_url": "https://api.github.com/users/jvamvas/events{/privacy}",
"received_events_url": "https://api.github.com/users/jvamvas/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2020-06-22T09:13:26 | 2020-06-23T08:07:24 | 2020-06-23T08:07:24 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/292",
"html_url": "https://github.com/huggingface/datasets/pull/292",
"diff_url": "https://github.com/huggingface/datasets/pull/292.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/292.patch",
"merged_at": "2020-06-23T08:07:24"
} | Thank you for featuring the x_stance dataset in your library. This PR updates some metadata:
- Citation: Replace preprint with proceedings
- URL: Use a URL with long-term availability
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/292/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/292/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/291 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/291/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/291/comments | https://api.github.com/repos/huggingface/datasets/issues/291/events | https://github.com/huggingface/datasets/pull/291 | 642,688,450 | MDExOlB1bGxSZXF1ZXN0NDM3NjM1NjMy | 291 | break statement not required | {
"login": "mayurnewase",
"id": 12967587,
"node_id": "MDQ6VXNlcjEyOTY3NTg3",
"avatar_url": "https://avatars.githubusercontent.com/u/12967587?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mayurnewase",
"html_url": "https://github.com/mayurnewase",
"followers_url": "https://api.github.com/users/mayurnewase/followers",
"following_url": "https://api.github.com/users/mayurnewase/following{/other_user}",
"gists_url": "https://api.github.com/users/mayurnewase/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mayurnewase/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mayurnewase/subscriptions",
"organizations_url": "https://api.github.com/users/mayurnewase/orgs",
"repos_url": "https://api.github.com/users/mayurnewase/repos",
"events_url": "https://api.github.com/users/mayurnewase/events{/privacy}",
"received_events_url": "https://api.github.com/users/mayurnewase/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2020-06-22T01:40:55 | 2020-06-23T17:57:58 | 2020-06-23T09:37:02 | NONE | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/291",
"html_url": "https://github.com/huggingface/datasets/pull/291",
"diff_url": "https://github.com/huggingface/datasets/pull/291.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/291.patch",
"merged_at": null
} | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/291/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/291/timeline | null |
|
https://api.github.com/repos/huggingface/datasets/issues/290 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/290/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/290/comments | https://api.github.com/repos/huggingface/datasets/issues/290/events | https://github.com/huggingface/datasets/issues/290 | 641,978,286 | MDU6SXNzdWU2NDE5NzgyODY= | 290 | ConnectionError - Eli5 dataset download | {
"login": "JovanNj",
"id": 8490096,
"node_id": "MDQ6VXNlcjg0OTAwOTY=",
"avatar_url": "https://avatars.githubusercontent.com/u/8490096?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/JovanNj",
"html_url": "https://github.com/JovanNj",
"followers_url": "https://api.github.com/users/JovanNj/followers",
"following_url": "https://api.github.com/users/JovanNj/following{/other_user}",
"gists_url": "https://api.github.com/users/JovanNj/gists{/gist_id}",
"starred_url": "https://api.github.com/users/JovanNj/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/JovanNj/subscriptions",
"organizations_url": "https://api.github.com/users/JovanNj/orgs",
"repos_url": "https://api.github.com/users/JovanNj/repos",
"events_url": "https://api.github.com/users/JovanNj/events{/privacy}",
"received_events_url": "https://api.github.com/users/JovanNj/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-06-19T13:40:33 | 2020-06-20T13:22:24 | 2020-06-20T13:22:24 | NONE | null | null | null | Hi, I have a problem with downloading Eli5 dataset. When typing `nlp.load_dataset('eli5')`, I get ConnectionError: Couldn't reach https://storage.googleapis.com/huggingface-nlp/cache/datasets/eli5/LFQA_reddit/1.0.0/explain_like_im_five-train_eli5.arrow
I would appreciate if you could help me with this issue. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/290/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/290/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/289 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/289/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/289/comments | https://api.github.com/repos/huggingface/datasets/issues/289/events | https://github.com/huggingface/datasets/pull/289 | 641,934,194 | MDExOlB1bGxSZXF1ZXN0NDM3MDc0MTM3 | 289 | update xsum | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2020-06-19T12:28:32 | 2020-06-22T13:27:26 | 2020-06-22T07:20:07 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/289",
"html_url": "https://github.com/huggingface/datasets/pull/289",
"diff_url": "https://github.com/huggingface/datasets/pull/289.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/289.patch",
"merged_at": "2020-06-22T07:20:07"
} | This PR makes the following update to the xsum dataset:
- Manual download is not required anymore
- dataset can be loaded as follow: `nlp.load_dataset('xsum')`
**Important**
Instead of using on outdated url to download the data: "https://raw.githubusercontent.com/EdinburghNLP/XSum/master/XSum-Dataset/XSum-TRAINING-DEV-TEST-SPLIT-90-5-5.json"
a more up-to-date url stored here: https://s3.amazonaws.com/datasets.huggingface.co/summarization/xsum.tar.gz is used
, so that the user does not need to manually download the data anymore.
There might be slight breaking changes here for xsum. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/289/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/289/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/288 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/288/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/288/comments | https://api.github.com/repos/huggingface/datasets/issues/288/events | https://github.com/huggingface/datasets/issues/288 | 641,888,610 | MDU6SXNzdWU2NDE4ODg2MTA= | 288 | Error at the first example in README: AttributeError: module 'dill' has no attribute '_dill' | {
"login": "wutong8023",
"id": 14964542,
"node_id": "MDQ6VXNlcjE0OTY0NTQy",
"avatar_url": "https://avatars.githubusercontent.com/u/14964542?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/wutong8023",
"html_url": "https://github.com/wutong8023",
"followers_url": "https://api.github.com/users/wutong8023/followers",
"following_url": "https://api.github.com/users/wutong8023/following{/other_user}",
"gists_url": "https://api.github.com/users/wutong8023/gists{/gist_id}",
"starred_url": "https://api.github.com/users/wutong8023/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/wutong8023/subscriptions",
"organizations_url": "https://api.github.com/users/wutong8023/orgs",
"repos_url": "https://api.github.com/users/wutong8023/repos",
"events_url": "https://api.github.com/users/wutong8023/events{/privacy}",
"received_events_url": "https://api.github.com/users/wutong8023/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 5 | 2020-06-19T11:01:22 | 2020-06-21T09:05:11 | 2020-06-21T09:05:11 | NONE | null | null | null | /Users/parasol_tree/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:469: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint8 = np.dtype([("qint8", np.int8, 1)])
/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:470: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint8 = np.dtype([("quint8", np.uint8, 1)])
/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:471: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint16 = np.dtype([("qint16", np.int16, 1)])
/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:472: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_quint16 = np.dtype([("quint16", np.uint16, 1)])
/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:473: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
_np_qint32 = np.dtype([("qint32", np.int32, 1)])
/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:476: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.
np_resource = np.dtype([("resource", np.ubyte, 1)])
/Users/parasol_tree/anaconda3/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: compiletime version 3.5 of module 'tensorflow.python.framework.fast_tensor_util' does not match runtime version 3.6
return f(*args, **kwds)
/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/h5py/__init__.py:36: FutureWarning: Conversion of the second argument of issubdtype from `float` to `np.floating` is deprecated. In future, it will be treated as `np.float64 == np.dtype(float).type`.
from ._conv import register_converters as _register_converters
Traceback (most recent call last):
File "/Users/parasol_tree/Resource/019 - Github/AcademicEnglishToolkit /test.py", line 7, in <module>
import nlp
File "/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/nlp/__init__.py", line 27, in <module>
from .arrow_dataset import Dataset
File "/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/nlp/arrow_dataset.py", line 31, in <module>
from nlp.utils.py_utils import dumps
File "/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/nlp/utils/__init__.py", line 20, in <module>
from .download_manager import DownloadManager, GenerateMode
File "/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/nlp/utils/download_manager.py", line 25, in <module>
from .py_utils import flatten_nested, map_nested, size_str
File "/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/nlp/utils/py_utils.py", line 244, in <module>
class Pickler(dill.Pickler):
File "/Users/parasol_tree/anaconda3/lib/python3.6/site-packages/nlp/utils/py_utils.py", line 247, in Pickler
dispatch = dill._dill.MetaCatchingDict(dill.Pickler.dispatch.copy())
AttributeError: module 'dill' has no attribute '_dill' | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/288/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/288/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/287 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/287/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/287/comments | https://api.github.com/repos/huggingface/datasets/issues/287/events | https://github.com/huggingface/datasets/pull/287 | 641,800,227 | MDExOlB1bGxSZXF1ZXN0NDM2OTY0NTg0 | 287 | fix squad_v2 metric | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-19T08:24:46 | 2020-06-19T08:33:43 | 2020-06-19T08:33:41 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/287",
"html_url": "https://github.com/huggingface/datasets/pull/287",
"diff_url": "https://github.com/huggingface/datasets/pull/287.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/287.patch",
"merged_at": "2020-06-19T08:33:41"
} | Fix #280
The imports were wrong | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/287/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/287/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/286 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/286/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/286/comments | https://api.github.com/repos/huggingface/datasets/issues/286/events | https://github.com/huggingface/datasets/pull/286 | 641,585,758 | MDExOlB1bGxSZXF1ZXN0NDM2NzkzMjI4 | 286 | Add ANLI dataset. | {
"login": "easonnie",
"id": 11016329,
"node_id": "MDQ6VXNlcjExMDE2MzI5",
"avatar_url": "https://avatars.githubusercontent.com/u/11016329?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/easonnie",
"html_url": "https://github.com/easonnie",
"followers_url": "https://api.github.com/users/easonnie/followers",
"following_url": "https://api.github.com/users/easonnie/following{/other_user}",
"gists_url": "https://api.github.com/users/easonnie/gists{/gist_id}",
"starred_url": "https://api.github.com/users/easonnie/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/easonnie/subscriptions",
"organizations_url": "https://api.github.com/users/easonnie/orgs",
"repos_url": "https://api.github.com/users/easonnie/repos",
"events_url": "https://api.github.com/users/easonnie/events{/privacy}",
"received_events_url": "https://api.github.com/users/easonnie/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-18T22:27:30 | 2020-06-22T12:23:27 | 2020-06-22T12:23:27 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/286",
"html_url": "https://github.com/huggingface/datasets/pull/286",
"diff_url": "https://github.com/huggingface/datasets/pull/286.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/286.patch",
"merged_at": "2020-06-22T12:23:26"
} | I completed all the steps in https://github.com/huggingface/nlp/blob/master/CONTRIBUTING.md#how-to-add-a-dataset and push the code for ANLI. Please let me know if there are any errors. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/286/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/286/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/285 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/285/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/285/comments | https://api.github.com/repos/huggingface/datasets/issues/285/events | https://github.com/huggingface/datasets/pull/285 | 641,360,702 | MDExOlB1bGxSZXF1ZXN0NDM2NjAyMjk4 | 285 | Consistent formatting of citations | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 1 | 2020-06-18T16:25:23 | 2020-06-22T08:09:25 | 2020-06-22T08:09:24 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/285",
"html_url": "https://github.com/huggingface/datasets/pull/285",
"diff_url": "https://github.com/huggingface/datasets/pull/285.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/285.patch",
"merged_at": "2020-06-22T08:09:23"
} | #283 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/285/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/285/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/284 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/284/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/284/comments | https://api.github.com/repos/huggingface/datasets/issues/284/events | https://github.com/huggingface/datasets/pull/284 | 641,337,217 | MDExOlB1bGxSZXF1ZXN0NDM2NTgxODQ2 | 284 | Fix manual download instructions | {
"login": "patrickvonplaten",
"id": 23423619,
"node_id": "MDQ6VXNlcjIzNDIzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/patrickvonplaten",
"html_url": "https://github.com/patrickvonplaten",
"followers_url": "https://api.github.com/users/patrickvonplaten/followers",
"following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}",
"gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}",
"starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions",
"organizations_url": "https://api.github.com/users/patrickvonplaten/orgs",
"repos_url": "https://api.github.com/users/patrickvonplaten/repos",
"events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}",
"received_events_url": "https://api.github.com/users/patrickvonplaten/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 5 | 2020-06-18T15:59:57 | 2020-06-19T08:24:21 | 2020-06-19T08:24:19 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/284",
"html_url": "https://github.com/huggingface/datasets/pull/284",
"diff_url": "https://github.com/huggingface/datasets/pull/284.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/284.patch",
"merged_at": "2020-06-19T08:24:19"
} | This PR replaces the static `DatasetBulider` variable `MANUAL_DOWNLOAD_INSTRUCTIONS` by a property function `manual_download_instructions()`.
Some datasets like XTREME and all WMT need the manual data dir only for a small fraction of the possible configs.
After some brainstorming with @mariamabarham and @lhoestq, we came to the conclusion that having a property function `manual_download_instructions()` gives us more flexibility to decide on a per config basis in the dataset builder if manual download instructions are needed.
Also this PR should unblock solves a bug with `wmt16 - ro-en`
@sshleifer from this branch you should be able to succesfully run
```python
import nlp
ds = nlp.load_dataset('./datasets/wmt16', 'ro-en')
```
and once this PR is merged S3 should be synched so that
```python
import nlp
ds = nlp.load_dataset("wmt16", "ro-en")
```
works as well.
**Important**: Since `MANUAL_DOWNLOAD_INSTRUCTIONS` was not really exposed to the user, this PR should not be a problem regarding backward compatibility. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/284/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/284/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/283 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/283/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/283/comments | https://api.github.com/repos/huggingface/datasets/issues/283/events | https://github.com/huggingface/datasets/issues/283 | 641,270,439 | MDU6SXNzdWU2NDEyNzA0Mzk= | 283 | Consistent formatting of citations | {
"login": "srush",
"id": 35882,
"node_id": "MDQ6VXNlcjM1ODgy",
"avatar_url": "https://avatars.githubusercontent.com/u/35882?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/srush",
"html_url": "https://github.com/srush",
"followers_url": "https://api.github.com/users/srush/followers",
"following_url": "https://api.github.com/users/srush/following{/other_user}",
"gists_url": "https://api.github.com/users/srush/gists{/gist_id}",
"starred_url": "https://api.github.com/users/srush/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/srush/subscriptions",
"organizations_url": "https://api.github.com/users/srush/orgs",
"repos_url": "https://api.github.com/users/srush/repos",
"events_url": "https://api.github.com/users/srush/events{/privacy}",
"received_events_url": "https://api.github.com/users/srush/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [
{
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
}
] | null | 0 | 2020-06-18T14:48:45 | 2020-06-22T17:30:46 | 2020-06-22T17:30:46 | CONTRIBUTOR | null | null | null | The citations are all of a different format, some have "```" and have text inside, others are proper bibtex.
Can we make it so that they all are proper citations, i.e. parse by the bibtex spec:
https://bibtexparser.readthedocs.io/en/master/ | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/283/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/283/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/282 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/282/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/282/comments | https://api.github.com/repos/huggingface/datasets/issues/282/events | https://github.com/huggingface/datasets/pull/282 | 641,217,759 | MDExOlB1bGxSZXF1ZXN0NDM2NDgxNzMy | 282 | Update dataset_info from gcs | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-18T13:41:15 | 2020-06-18T16:24:52 | 2020-06-18T16:24:51 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/282",
"html_url": "https://github.com/huggingface/datasets/pull/282",
"diff_url": "https://github.com/huggingface/datasets/pull/282.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/282.patch",
"merged_at": "2020-06-18T16:24:51"
} | Some datasets are hosted on gcs (wikipedia for example). In this PR I make sure that, when a user loads such datasets, the file_instructions are built using the dataset_info.json from gcs and not from the info extracted from the local `dataset_infos.json` (the one that contain the info for each config). Indeed local files may end up outdated.
Furthermore, to avoid outdated dataset_infos.json, I now make sure that each time you run `load_dataset` it also tries to update the file locally.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/282/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/282/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/281 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/281/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/281/comments | https://api.github.com/repos/huggingface/datasets/issues/281/events | https://github.com/huggingface/datasets/issues/281 | 641,067,856 | MDU6SXNzdWU2NDEwNjc4NTY= | 281 | Private/sensitive data | {
"login": "MFreidank",
"id": 6368040,
"node_id": "MDQ6VXNlcjYzNjgwNDA=",
"avatar_url": "https://avatars.githubusercontent.com/u/6368040?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/MFreidank",
"html_url": "https://github.com/MFreidank",
"followers_url": "https://api.github.com/users/MFreidank/followers",
"following_url": "https://api.github.com/users/MFreidank/following{/other_user}",
"gists_url": "https://api.github.com/users/MFreidank/gists{/gist_id}",
"starred_url": "https://api.github.com/users/MFreidank/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/MFreidank/subscriptions",
"organizations_url": "https://api.github.com/users/MFreidank/orgs",
"repos_url": "https://api.github.com/users/MFreidank/repos",
"events_url": "https://api.github.com/users/MFreidank/events{/privacy}",
"received_events_url": "https://api.github.com/users/MFreidank/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 3 | 2020-06-18T09:47:27 | 2020-06-20T13:15:12 | 2020-06-20T13:15:12 | NONE | null | null | null | Hi all,
Thanks for this fantastic library, it makes it very easy to do prototyping for NLP projects interchangeably between TF/Pytorch.
Unfortunately, there is data that cannot easily be shared publicly as it may contain sensitive information.
Is there support/a plan to support such data with NLP, e.g. by reading it from local sources?
Use case flow could look like this: use NLP to prototype an approach on similar, public data and apply the resulting prototype on sensitive/private data without the need to rethink data processing pipelines.
Many thanks for your responses ahead of time and kind regards,
MFreidank | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/281/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/281/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/280 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/280/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/280/comments | https://api.github.com/repos/huggingface/datasets/issues/280/events | https://github.com/huggingface/datasets/issues/280 | 640,677,615 | MDU6SXNzdWU2NDA2Nzc2MTU= | 280 | Error with SquadV2 Metrics | {
"login": "avinregmi",
"id": 32203792,
"node_id": "MDQ6VXNlcjMyMjAzNzky",
"avatar_url": "https://avatars.githubusercontent.com/u/32203792?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/avinregmi",
"html_url": "https://github.com/avinregmi",
"followers_url": "https://api.github.com/users/avinregmi/followers",
"following_url": "https://api.github.com/users/avinregmi/following{/other_user}",
"gists_url": "https://api.github.com/users/avinregmi/gists{/gist_id}",
"starred_url": "https://api.github.com/users/avinregmi/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/avinregmi/subscriptions",
"organizations_url": "https://api.github.com/users/avinregmi/orgs",
"repos_url": "https://api.github.com/users/avinregmi/repos",
"events_url": "https://api.github.com/users/avinregmi/events{/privacy}",
"received_events_url": "https://api.github.com/users/avinregmi/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-17T19:10:54 | 2020-06-19T08:33:41 | 2020-06-19T08:33:41 | NONE | null | null | null | I can't seem to import squad v2 metrics.
**squad_metric = nlp.load_metric('squad_v2')**
**This throws me an error.:**
```
ImportError Traceback (most recent call last)
<ipython-input-8-170b6a170555> in <module>
----> 1 squad_metric = nlp.load_metric('squad_v2')
~/env/lib64/python3.6/site-packages/nlp/load.py in load_metric(path, name, process_id, num_process, data_dir, experiment_id, in_memory, download_config, **metric_init_kwargs)
426 """
427 module_path = prepare_module(path, download_config=download_config, dataset=False)
--> 428 metric_cls = import_main_class(module_path, dataset=False)
429 metric = metric_cls(
430 name=name,
~/env/lib64/python3.6/site-packages/nlp/load.py in import_main_class(module_path, dataset)
55 """
56 importlib.invalidate_caches()
---> 57 module = importlib.import_module(module_path)
58
59 if dataset:
/usr/lib64/python3.6/importlib/__init__.py in import_module(name, package)
124 break
125 level += 1
--> 126 return _bootstrap._gcd_import(name[level:], package, level)
127
128
/usr/lib64/python3.6/importlib/_bootstrap.py in _gcd_import(name, package, level)
/usr/lib64/python3.6/importlib/_bootstrap.py in _find_and_load(name, import_)
/usr/lib64/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
/usr/lib64/python3.6/importlib/_bootstrap.py in _load_unlocked(spec)
/usr/lib64/python3.6/importlib/_bootstrap_external.py in exec_module(self, module)
/usr/lib64/python3.6/importlib/_bootstrap.py in _call_with_frames_removed(f, *args, **kwds)
~/env/lib64/python3.6/site-packages/nlp/metrics/squad_v2/a15e787c76889174874386d3def75321f0284c11730d2a57e28fe1352c9b5c7a/squad_v2.py in <module>
16
17 import nlp
---> 18 from .evaluate import evaluate
19
20 _CITATION = """\
ImportError: cannot import name 'evaluate'
``` | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/280/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/280/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/279 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/279/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/279/comments | https://api.github.com/repos/huggingface/datasets/issues/279/events | https://github.com/huggingface/datasets/issues/279 | 640,611,692 | MDU6SXNzdWU2NDA2MTE2OTI= | 279 | Dataset Preprocessing Cache with .map() function not working as expected | {
"login": "sarahwie",
"id": 8027676,
"node_id": "MDQ6VXNlcjgwMjc2NzY=",
"avatar_url": "https://avatars.githubusercontent.com/u/8027676?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sarahwie",
"html_url": "https://github.com/sarahwie",
"followers_url": "https://api.github.com/users/sarahwie/followers",
"following_url": "https://api.github.com/users/sarahwie/following{/other_user}",
"gists_url": "https://api.github.com/users/sarahwie/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sarahwie/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sarahwie/subscriptions",
"organizations_url": "https://api.github.com/users/sarahwie/orgs",
"repos_url": "https://api.github.com/users/sarahwie/repos",
"events_url": "https://api.github.com/users/sarahwie/events{/privacy}",
"received_events_url": "https://api.github.com/users/sarahwie/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 5 | 2020-06-17T17:17:21 | 2021-07-06T21:43:28 | 2021-04-18T23:43:49 | NONE | null | null | null | I've been having issues with reproducibility when loading and processing datasets with the `.map` function. I was only able to resolve them by clearing all of the cache files on my system.
Is there a way to disable using the cache when processing a dataset? As I make minor processing changes on the same dataset, I want to be able to be certain the data is being re-processed rather than loaded from a cached file.
Could you also help me understand a bit more about how the caching functionality is used for pre-processing? E.g. how is it determined when to load from a cache vs. reprocess.
I was particularly having an issue where the correct dataset splits were loaded, but as soon as I applied the `.map()` function to each split independently, they somehow all exited this process having been converted to the test set.
Thanks! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/279/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/279/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/278 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/278/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/278/comments | https://api.github.com/repos/huggingface/datasets/issues/278/events | https://github.com/huggingface/datasets/issues/278 | 640,518,917 | MDU6SXNzdWU2NDA1MTg5MTc= | 278 | MemoryError when loading German Wikipedia | {
"login": "gregburman",
"id": 4698028,
"node_id": "MDQ6VXNlcjQ2OTgwMjg=",
"avatar_url": "https://avatars.githubusercontent.com/u/4698028?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/gregburman",
"html_url": "https://github.com/gregburman",
"followers_url": "https://api.github.com/users/gregburman/followers",
"following_url": "https://api.github.com/users/gregburman/following{/other_user}",
"gists_url": "https://api.github.com/users/gregburman/gists{/gist_id}",
"starred_url": "https://api.github.com/users/gregburman/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/gregburman/subscriptions",
"organizations_url": "https://api.github.com/users/gregburman/orgs",
"repos_url": "https://api.github.com/users/gregburman/repos",
"events_url": "https://api.github.com/users/gregburman/events{/privacy}",
"received_events_url": "https://api.github.com/users/gregburman/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 7 | 2020-06-17T15:06:21 | 2020-06-19T12:53:02 | 2020-06-19T12:53:02 | NONE | null | null | null | Hi, first off let me say thank you for all the awesome work you're doing at Hugging Face across all your projects (NLP, Transformers, Tokenizers) - they're all amazing contributions to us working with NLP models :)
I'm trying to download the German Wikipedia dataset as follows:
```
wiki = nlp.load_dataset("wikipedia", "20200501.de", split="train")
```
However, when I do so, I get the following error:
```
Downloading and preparing dataset wikipedia/20200501.de (download: Unknown size, generated: Unknown size, total: Unknown size) to /home/ubuntu/.cache/huggingface/datasets/wikipedia/20200501.de/1.0.0...
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/ubuntu/anaconda3/envs/albert/lib/python3.7/site-packages/nlp/load.py", line 520, in load_dataset
save_infos=save_infos,
File "/home/ubuntu/anaconda3/envs/albert/lib/python3.7/site-packages/nlp/builder.py", line 433, in download_and_prepare
dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
File "/home/ubuntu/anaconda3/envs/albert/lib/python3.7/site-packages/nlp/builder.py", line 824, in _download_and_prepare
"\n\t`{}`".format(usage_example)
nlp.builder.MissingBeamOptions: Trying to generate a dataset using Apache Beam, yet no Beam Runner or PipelineOptions() has been provided in `load_dataset` or in the builder arguments. For big datasets it has to run on large-scale data processing tools like Dataflow, Spark, etc. More information about Apache Beam runners at https://beam.apache.org/documentation/runners/capability-matrix/
If you really want to run it locally because you feel like the Dataset is small enough, you can use the local beam runner called `DirectRunner` (you may run out of memory).
Example of usage:
`load_dataset('wikipedia', '20200501.de', beam_runner='DirectRunner')`
```
So, following on from the example usage at the bottom, I tried specifying `beam_runner='DirectRunner`, however when I do this after about 20 min after the data has all downloaded, I get a `MemoryError` as warned.
This isn't an issue for the English or French Wikipedia datasets (I've tried both), as neither seem to require that `beam_runner` be specified. Can you please clarify why this is an issue for the German dataset?
My nlp version is 0.2.1.
Thank you! | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/278/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/278/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/277 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/277/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/277/comments | https://api.github.com/repos/huggingface/datasets/issues/277/events | https://github.com/huggingface/datasets/issues/277 | 640,163,053 | MDU6SXNzdWU2NDAxNjMwNTM= | 277 | Empty samples in glue/qqp | {
"login": "richarddwang",
"id": 17963619,
"node_id": "MDQ6VXNlcjE3OTYzNjE5",
"avatar_url": "https://avatars.githubusercontent.com/u/17963619?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/richarddwang",
"html_url": "https://github.com/richarddwang",
"followers_url": "https://api.github.com/users/richarddwang/followers",
"following_url": "https://api.github.com/users/richarddwang/following{/other_user}",
"gists_url": "https://api.github.com/users/richarddwang/gists{/gist_id}",
"starred_url": "https://api.github.com/users/richarddwang/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/richarddwang/subscriptions",
"organizations_url": "https://api.github.com/users/richarddwang/orgs",
"repos_url": "https://api.github.com/users/richarddwang/repos",
"events_url": "https://api.github.com/users/richarddwang/events{/privacy}",
"received_events_url": "https://api.github.com/users/richarddwang/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-06-17T05:54:52 | 2020-06-21T00:21:45 | 2020-06-21T00:21:45 | CONTRIBUTOR | null | null | null | ```
qqp = nlp.load_dataset('glue', 'qqp')
print(qqp['train'][310121])
print(qqp['train'][362225])
```
```
{'question1': 'How can I create an Android app?', 'question2': '', 'label': 0, 'idx': 310137}
{'question1': 'How can I develop android app?', 'question2': '', 'label': 0, 'idx': 362246}
```
Notice that question 2 is empty string.
BTW, I have checked and these two are the only naughty ones in all splits of qqp. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/277/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/277/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/276 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/276/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/276/comments | https://api.github.com/repos/huggingface/datasets/issues/276/events | https://github.com/huggingface/datasets/pull/276 | 639,490,858 | MDExOlB1bGxSZXF1ZXN0NDM1MDY5Nzg5 | 276 | Fix metric compute (original_instructions missing) | {
"login": "lhoestq",
"id": 42851186,
"node_id": "MDQ6VXNlcjQyODUxMTg2",
"avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lhoestq",
"html_url": "https://github.com/lhoestq",
"followers_url": "https://api.github.com/users/lhoestq/followers",
"following_url": "https://api.github.com/users/lhoestq/following{/other_user}",
"gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions",
"organizations_url": "https://api.github.com/users/lhoestq/orgs",
"repos_url": "https://api.github.com/users/lhoestq/repos",
"events_url": "https://api.github.com/users/lhoestq/events{/privacy}",
"received_events_url": "https://api.github.com/users/lhoestq/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 2 | 2020-06-16T08:52:01 | 2020-06-18T07:41:45 | 2020-06-18T07:41:44 | MEMBER | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/276",
"html_url": "https://github.com/huggingface/datasets/pull/276",
"diff_url": "https://github.com/huggingface/datasets/pull/276.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/276.patch",
"merged_at": "2020-06-18T07:41:43"
} | When loading arrow data we added in cc8d250 a way to specify the instructions that were used to store them with the loaded dataset.
However metrics load data the same way but don't need instructions (we use one single file).
In this PR I just make `original_instructions` optional when reading files to load a `Dataset` object.
This should fix #269 | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/276/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/276/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/275 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/275/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/275/comments | https://api.github.com/repos/huggingface/datasets/issues/275/events | https://github.com/huggingface/datasets/issues/275 | 639,439,052 | MDU6SXNzdWU2Mzk0MzkwNTI= | 275 | NonMatchingChecksumError when loading pubmed dataset | {
"login": "DavideStenner",
"id": 48441753,
"node_id": "MDQ6VXNlcjQ4NDQxNzUz",
"avatar_url": "https://avatars.githubusercontent.com/u/48441753?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/DavideStenner",
"html_url": "https://github.com/DavideStenner",
"followers_url": "https://api.github.com/users/DavideStenner/followers",
"following_url": "https://api.github.com/users/DavideStenner/following{/other_user}",
"gists_url": "https://api.github.com/users/DavideStenner/gists{/gist_id}",
"starred_url": "https://api.github.com/users/DavideStenner/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/DavideStenner/subscriptions",
"organizations_url": "https://api.github.com/users/DavideStenner/orgs",
"repos_url": "https://api.github.com/users/DavideStenner/repos",
"events_url": "https://api.github.com/users/DavideStenner/events{/privacy}",
"received_events_url": "https://api.github.com/users/DavideStenner/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067388877,
"node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug",
"name": "dataset bug",
"color": "2edb81",
"default": false,
"description": "A bug in a dataset script provided in the library"
}
] | closed | false | null | [] | null | 1 | 2020-06-16T07:31:51 | 2020-06-19T07:37:07 | 2020-06-19T07:37:07 | NONE | null | null | null | I get this error when i run `nlp.load_dataset('scientific_papers', 'pubmed', split = 'train[:50%]')`.
The error is:
```
---------------------------------------------------------------------------
NonMatchingChecksumError Traceback (most recent call last)
<ipython-input-2-7742dea167d0> in <module>()
----> 1 df = nlp.load_dataset('scientific_papers', 'pubmed', split = 'train[:50%]')
2 df = pd.DataFrame(df)
3 gc.collect()
3 frames
/usr/local/lib/python3.6/dist-packages/nlp/load.py in load_dataset(path, name, version, data_dir, data_files, split, cache_dir, download_config, download_mode, ignore_verifications, save_infos, **config_kwargs)
518 download_mode=download_mode,
519 ignore_verifications=ignore_verifications,
--> 520 save_infos=save_infos,
521 )
522
/usr/local/lib/python3.6/dist-packages/nlp/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, save_infos, try_from_hf_gcs, dl_manager, **download_and_prepare_kwargs)
431 verify_infos = not save_infos and not ignore_verifications
432 self._download_and_prepare(
--> 433 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs
434 )
435 # Sync info
/usr/local/lib/python3.6/dist-packages/nlp/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)
468 # Checksums verification
469 if verify_infos:
--> 470 verify_checksums(self.info.download_checksums, dl_manager.get_recorded_sizes_checksums())
471 for split_generator in split_generators:
472 if str(split_generator.split_info.name).lower() == "all":
/usr/local/lib/python3.6/dist-packages/nlp/utils/info_utils.py in verify_checksums(expected_checksums, recorded_checksums)
34 bad_urls = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
35 if len(bad_urls) > 0:
---> 36 raise NonMatchingChecksumError(str(bad_urls))
37 logger.info("All the checksums matched successfully.")
38
NonMatchingChecksumError: ['https://drive.google.com/uc?id=1b3rmCSIoh6VhD4HKWjI4HOW-cSwcwbeC&export=download', 'https://drive.google.com/uc?id=1lvsqvsFi3W-pE1SqNZI0s8NR9rC1tsja&export=download']
```
I'm currently working on google colab.
That is quite strange because yesterday it was fine.
| {
"url": "https://api.github.com/repos/huggingface/datasets/issues/275/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/275/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/274 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/274/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/274/comments | https://api.github.com/repos/huggingface/datasets/issues/274/events | https://github.com/huggingface/datasets/issues/274 | 639,156,625 | MDU6SXNzdWU2MzkxNTY2MjU= | 274 | PG-19 | {
"login": "lucidrains",
"id": 108653,
"node_id": "MDQ6VXNlcjEwODY1Mw==",
"avatar_url": "https://avatars.githubusercontent.com/u/108653?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/lucidrains",
"html_url": "https://github.com/lucidrains",
"followers_url": "https://api.github.com/users/lucidrains/followers",
"following_url": "https://api.github.com/users/lucidrains/following{/other_user}",
"gists_url": "https://api.github.com/users/lucidrains/gists{/gist_id}",
"starred_url": "https://api.github.com/users/lucidrains/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/lucidrains/subscriptions",
"organizations_url": "https://api.github.com/users/lucidrains/orgs",
"repos_url": "https://api.github.com/users/lucidrains/repos",
"events_url": "https://api.github.com/users/lucidrains/events{/privacy}",
"received_events_url": "https://api.github.com/users/lucidrains/received_events",
"type": "User",
"site_admin": false
} | [
{
"id": 2067376369,
"node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5",
"url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request",
"name": "dataset request",
"color": "e99695",
"default": false,
"description": "Requesting to add a new dataset"
}
] | closed | false | null | [] | null | 4 | 2020-06-15T21:02:26 | 2020-07-06T15:35:02 | 2020-07-06T15:35:02 | CONTRIBUTOR | null | null | null | Hi, and thanks for all your open-sourced work, as always!
I was wondering if you would be open to adding PG-19 to your collection of datasets. https://github.com/deepmind/pg19 It is often used for benchmarking long-range language modeling. | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/274/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/274/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/273 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/273/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/273/comments | https://api.github.com/repos/huggingface/datasets/issues/273/events | https://github.com/huggingface/datasets/pull/273 | 638,968,054 | MDExOlB1bGxSZXF1ZXN0NDM0NjM0MzU4 | 273 | update cos_e to add cos_e v1.0 | {
"login": "mariamabarham",
"id": 38249783,
"node_id": "MDQ6VXNlcjM4MjQ5Nzgz",
"avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/mariamabarham",
"html_url": "https://github.com/mariamabarham",
"followers_url": "https://api.github.com/users/mariamabarham/followers",
"following_url": "https://api.github.com/users/mariamabarham/following{/other_user}",
"gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}",
"starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions",
"organizations_url": "https://api.github.com/users/mariamabarham/orgs",
"repos_url": "https://api.github.com/users/mariamabarham/repos",
"events_url": "https://api.github.com/users/mariamabarham/events{/privacy}",
"received_events_url": "https://api.github.com/users/mariamabarham/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-15T16:03:22 | 2020-06-16T08:25:54 | 2020-06-16T08:25:52 | CONTRIBUTOR | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/273",
"html_url": "https://github.com/huggingface/datasets/pull/273",
"diff_url": "https://github.com/huggingface/datasets/pull/273.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/273.patch",
"merged_at": "2020-06-16T08:25:52"
} | This PR updates the cos_e dataset to add v1.0 as requested here #163
@nazneenrajani | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/273/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/273/timeline | null |
https://api.github.com/repos/huggingface/datasets/issues/272 | https://api.github.com/repos/huggingface/datasets | https://api.github.com/repos/huggingface/datasets/issues/272/labels{/name} | https://api.github.com/repos/huggingface/datasets/issues/272/comments | https://api.github.com/repos/huggingface/datasets/issues/272/events | https://github.com/huggingface/datasets/pull/272 | 638,307,313 | MDExOlB1bGxSZXF1ZXN0NDM0MTExOTQ3 | 272 | asd | {
"login": "sn696",
"id": 66900970,
"node_id": "MDQ6VXNlcjY2OTAwOTcw",
"avatar_url": "https://avatars.githubusercontent.com/u/66900970?v=4",
"gravatar_id": "",
"url": "https://api.github.com/users/sn696",
"html_url": "https://github.com/sn696",
"followers_url": "https://api.github.com/users/sn696/followers",
"following_url": "https://api.github.com/users/sn696/following{/other_user}",
"gists_url": "https://api.github.com/users/sn696/gists{/gist_id}",
"starred_url": "https://api.github.com/users/sn696/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/sn696/subscriptions",
"organizations_url": "https://api.github.com/users/sn696/orgs",
"repos_url": "https://api.github.com/users/sn696/repos",
"events_url": "https://api.github.com/users/sn696/events{/privacy}",
"received_events_url": "https://api.github.com/users/sn696/received_events",
"type": "User",
"site_admin": false
} | [] | closed | false | null | [] | null | 0 | 2020-06-14T08:20:38 | 2020-06-14T09:16:41 | 2020-06-14T09:16:41 | NONE | null | false | {
"url": "https://api.github.com/repos/huggingface/datasets/pulls/272",
"html_url": "https://github.com/huggingface/datasets/pull/272",
"diff_url": "https://github.com/huggingface/datasets/pull/272.diff",
"patch_url": "https://github.com/huggingface/datasets/pull/272.patch",
"merged_at": null
} | {
"url": "https://api.github.com/repos/huggingface/datasets/issues/272/reactions",
"total_count": 0,
"+1": 0,
"-1": 0,
"laugh": 0,
"hooray": 0,
"confused": 0,
"heart": 0,
"rocket": 0,
"eyes": 0
} | https://api.github.com/repos/huggingface/datasets/issues/272/timeline | null |