url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
599M
2.32B
node_id
stringlengths
18
32
number
int64
1
6.92k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
milestone
dict
comments
sequencelengths
0
28
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
4 values
active_lock_reason
float64
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
float64
state_reason
stringclasses
3 values
draft
float64
0
1
pull_request
dict
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/84
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/84/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/84/comments
https://api.github.com/repos/huggingface/datasets/issues/84/events
https://github.com/huggingface/datasets/pull/84
617,249,815
MDExOlB1bGxSZXF1ZXN0NDE3MjAxODcz
84
[TedHrLr] add left dummy data
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-13T08:27:20Z"
"2020-05-13T08:29:22Z"
"2020-05-13T08:29:21Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/84/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/84/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/84.diff", "html_url": "https://github.com/huggingface/datasets/pull/84", "merged_at": "2020-05-13T08:29:21Z", "patch_url": "https://github.com/huggingface/datasets/pull/84.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/84" }
true
https://api.github.com/repos/huggingface/datasets/issues/83
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/83/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/83/comments
https://api.github.com/repos/huggingface/datasets/issues/83/events
https://github.com/huggingface/datasets/pull/83
616,863,601
MDExOlB1bGxSZXF1ZXN0NDE2ODkyOTUz
83
New datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-05-12T18:22:27Z"
"2020-05-12T18:22:47Z"
"2020-05-12T18:22:45Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/83/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/83/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/83.diff", "html_url": "https://github.com/huggingface/datasets/pull/83", "merged_at": "2020-05-12T18:22:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/83.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/83" }
true
https://api.github.com/repos/huggingface/datasets/issues/82
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/82/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/82/comments
https://api.github.com/repos/huggingface/datasets/issues/82/events
https://github.com/huggingface/datasets/pull/82
616,805,194
MDExOlB1bGxSZXF1ZXN0NDE2ODQ1Njc5
82
[Datasets] add ted_hrlr
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-12T16:46:50Z"
"2020-05-13T07:52:54Z"
"2020-05-13T07:52:53Z"
CONTRIBUTOR
null
@thomwolf - After looking at `xnli` I think it's better to leave the translation features and add a `translation` key to make them work in our framework. The result looks like this: ![Screenshot from 2020-05-12 18-34-43](https://user-images.githubusercontent.com/23423619/81721933-ee1faf00-9480-11ea-9e95-d6557cbd0ce0.png) you can see that each split has a `translation` key which value is the nlp.features.Translation object. That's a simple change. If it's ok for you, I will add dummy data for the other configs and treat the other translation scripts in the same way.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/82/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/82/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/82.diff", "html_url": "https://github.com/huggingface/datasets/pull/82", "merged_at": "2020-05-13T07:52:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/82.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/82" }
true
https://api.github.com/repos/huggingface/datasets/issues/81
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/81/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/81/comments
https://api.github.com/repos/huggingface/datasets/issues/81/events
https://github.com/huggingface/datasets/pull/81
616,793,010
MDExOlB1bGxSZXF1ZXN0NDE2ODM1NzE1
81
add tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-12T16:28:19Z"
"2020-05-13T07:43:57Z"
"2020-05-13T07:43:56Z"
MEMBER
null
Tests for py_utils functions and for the BaseReader used to read from arrow and parquet. I also removed unused utils functions.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/81/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/81/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/81.diff", "html_url": "https://github.com/huggingface/datasets/pull/81", "merged_at": "2020-05-13T07:43:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/81.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/81" }
true
https://api.github.com/repos/huggingface/datasets/issues/80
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/80/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/80/comments
https://api.github.com/repos/huggingface/datasets/issues/80/events
https://github.com/huggingface/datasets/pull/80
616,786,803
MDExOlB1bGxSZXF1ZXN0NDE2ODMwNjk3
80
Add nbytes + nexamples check
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-12T16:18:43Z"
"2020-05-13T07:52:34Z"
"2020-05-13T07:52:33Z"
MEMBER
null
### Save size and number of examples Now when you do `save_checksums`, it also create `cached_sizes.txt` right next to the checksum file. This new file stores the bytes sizes and the number of examples of each split that has been prepared and stored in the cache. Example: ``` # Cached sizes: <full_config_name> <num_bytes> <num_examples> hansards/house/1.0.0/test 22906629 122290 hansards/house/1.0.0/train 191459584 947969 hansards/senate/1.0.0/test 5711686 25553 hansards/senate/1.0.0/train 40324278 182135 ``` ### Check processing output If there is a `caches_sizes.txt`, then each time we run `download_and_prepare` it will make sure that the sizes match. You can set `ignore_checksums=True` if you don't want that to happen. ### Fill Dataset Info All the split infos and the checksums are now stored correctly in DatasetInfo after `download_and_prepare` ### Check space on disk before running `download_and_prepare` Check if the space is lower than the sum of the sizes of the files in `checksums.txt` and `cached_files.txt`. This is not ideal though as it considers the files for all configs. TODO: A better way to do it would be to have save the `DatasetInfo` instead of the `checksums.txt` and `cached_sizes.txt`, in order to have one file per dataset config (and therefore consider only the sizes of the files for one config and not all of them). It can also be the occasion to factorize all the `download_and_prepare` verifications. Maybe next PR ?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/80/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/80/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/80.diff", "html_url": "https://github.com/huggingface/datasets/pull/80", "merged_at": "2020-05-13T07:52:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/80.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/80" }
true
https://api.github.com/repos/huggingface/datasets/issues/79
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/79/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/79/comments
https://api.github.com/repos/huggingface/datasets/issues/79/events
https://github.com/huggingface/datasets/pull/79
616,785,613
MDExOlB1bGxSZXF1ZXN0NDE2ODI5NzMy
79
[Convert] add new pattern
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-12T16:16:51Z"
"2020-05-12T16:17:10Z"
"2020-05-12T16:17:09Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/79/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/79/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/79.diff", "html_url": "https://github.com/huggingface/datasets/pull/79", "merged_at": "2020-05-12T16:17:09Z", "patch_url": "https://github.com/huggingface/datasets/pull/79.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/79" }
true
https://api.github.com/repos/huggingface/datasets/issues/78
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/78/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/78/comments
https://api.github.com/repos/huggingface/datasets/issues/78/events
https://github.com/huggingface/datasets/pull/78
616,774,275
MDExOlB1bGxSZXF1ZXN0NDE2ODIwNzU5
78
[Tests] skip beam dataset tests for now
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-12T16:00:58Z"
"2020-05-12T16:16:24Z"
"2020-05-12T16:16:22Z"
CONTRIBUTOR
null
For now we will skip tests for Beam Datasets
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/78/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/78/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/78.diff", "html_url": "https://github.com/huggingface/datasets/pull/78", "merged_at": "2020-05-12T16:16:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/78.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/78" }
true
https://api.github.com/repos/huggingface/datasets/issues/77
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/77/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/77/comments
https://api.github.com/repos/huggingface/datasets/issues/77/events
https://github.com/huggingface/datasets/pull/77
616,674,601
MDExOlB1bGxSZXF1ZXN0NDE2NzQwMjAz
77
New datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-05-12T13:51:59Z"
"2020-05-12T14:02:16Z"
"2020-05-12T14:02:15Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/77/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/77/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/77.diff", "html_url": "https://github.com/huggingface/datasets/pull/77", "merged_at": "2020-05-12T14:02:15Z", "patch_url": "https://github.com/huggingface/datasets/pull/77.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/77" }
true
https://api.github.com/repos/huggingface/datasets/issues/76
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/76/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/76/comments
https://api.github.com/repos/huggingface/datasets/issues/76/events
https://github.com/huggingface/datasets/pull/76
616,579,228
MDExOlB1bGxSZXF1ZXN0NDE2NjYyMTk2
76
pin flake 8
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-12T11:25:29Z"
"2020-05-12T11:27:35Z"
"2020-05-12T11:27:34Z"
CONTRIBUTOR
null
Flake 8's new version does not like our format. Pinning the version for now.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/76/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/76/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/76.diff", "html_url": "https://github.com/huggingface/datasets/pull/76", "merged_at": "2020-05-12T11:27:34Z", "patch_url": "https://github.com/huggingface/datasets/pull/76.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/76" }
true
https://api.github.com/repos/huggingface/datasets/issues/75
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/75/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/75/comments
https://api.github.com/repos/huggingface/datasets/issues/75/events
https://github.com/huggingface/datasets/pull/75
616,520,163
MDExOlB1bGxSZXF1ZXN0NDE2NjE0MzU1
75
WIP adding metrics
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-05-12T09:52:00Z"
"2020-05-13T07:44:12Z"
"2020-05-13T07:44:10Z"
MEMBER
null
Adding the following metrics as identified by @mariamabarham: 1. BLEU: BiLingual Evaluation Understudy: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py, https://github.com/chakki-works/sumeval/blob/master/sumeval/metrics/bleu.py (multilingual) 2. GLEU: Google-BLEU: https://github.com/cnap/gec-ranking/blob/master/scripts/compute_gleu 3. Sacrebleu: https://pypi.org/project/sacrebleu/1.4.8/ (pypi package), https://github.com/mjpost/sacrebleu (github implementation) 4. ROUGE: Recall-Oriented Understudy for Gisting Evaluation: https://github.com/google-research/google-research/tree/master/rouge, https://github.com/chakki-works/sumeval/blob/master/sumeval/metrics/rouge.py (multilingual) 5. Seqeval: https://github.com/chakki-works/seqeval (github implementation), https://pypi.org/project/seqeval/0.0.12/ (pypi package) 6. Coval: coreference evaluation package for the CoNLL and ARRAU datasets https://github.com/ns-moosavi/coval 7. SQuAD v1 evaluation script 8. SQuAD V2 evaluation script: https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/ 9. GLUE 10. XNLI Not now: 1. Perplexity: https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/perplexity.py 2. Spearman: https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/spearman_correlation.py 3. F1_measure: https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/f1_measure.py 4. Pearson_corelation: https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/pearson_correlation.py 5. AUC: https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/auc.py 6. Entropy: https://github.com/allenai/allennlp/blob/master/allennlp/training/metrics/entropy.py
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/75/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/75/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/75.diff", "html_url": "https://github.com/huggingface/datasets/pull/75", "merged_at": "2020-05-13T07:44:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/75.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/75" }
true
https://api.github.com/repos/huggingface/datasets/issues/74
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/74/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/74/comments
https://api.github.com/repos/huggingface/datasets/issues/74/events
https://github.com/huggingface/datasets/pull/74
616,511,101
MDExOlB1bGxSZXF1ZXN0NDE2NjA3MDcy
74
fix overflow check
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-12T09:38:01Z"
"2020-05-12T10:04:39Z"
"2020-05-12T10:04:38Z"
MEMBER
null
I did some tests and unfortunately the test ``` pa_array.nbytes > MAX_BATCH_BYTES ``` doesn't work. Indeed for a StructArray, `nbytes` can be less 2GB even if there is an overflow (it loops...). I don't think we can do a proper overflow test for the limit of 2GB... For now I replaced it with a sanity check on the first element.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/74/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/74/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/74.diff", "html_url": "https://github.com/huggingface/datasets/pull/74", "merged_at": "2020-05-12T10:04:37Z", "patch_url": "https://github.com/huggingface/datasets/pull/74.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/74" }
true
https://api.github.com/repos/huggingface/datasets/issues/73
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/73/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/73/comments
https://api.github.com/repos/huggingface/datasets/issues/73/events
https://github.com/huggingface/datasets/pull/73
616,417,845
MDExOlB1bGxSZXF1ZXN0NDE2NTMyMTg1
73
JSON script
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-05-12T07:11:22Z"
"2020-05-18T06:50:37Z"
"2020-05-18T06:50:36Z"
CONTRIBUTOR
null
Add a JSONS script to read JSON datasets from files.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/73/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/73/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/73.diff", "html_url": "https://github.com/huggingface/datasets/pull/73", "merged_at": "2020-05-18T06:50:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/73.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/73" }
true
https://api.github.com/repos/huggingface/datasets/issues/72
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/72/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/72/comments
https://api.github.com/repos/huggingface/datasets/issues/72/events
https://github.com/huggingface/datasets/pull/72
616,225,010
MDExOlB1bGxSZXF1ZXN0NDE2Mzc4Mjg4
72
[README dummy data tests] README to better understand how the dummy data structure works
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-11T22:19:03Z"
"2020-05-11T22:26:03Z"
"2020-05-11T22:26:01Z"
CONTRIBUTOR
null
In this PR a README.md is added to tests to shine more light on how the dummy data structure works. I try to explain the different possible cases. IMO the best way to understand the logic is to checkout the dummy data structure of the different datasets I mention in the README.md since those are the "edge cases". @mariamabarham @thomwolf @lhoestq @jplu - I'd be happy to checkout the dummy data structure and get some feedback on possible improvements.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/72/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/72/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/72.diff", "html_url": "https://github.com/huggingface/datasets/pull/72", "merged_at": "2020-05-11T22:26:01Z", "patch_url": "https://github.com/huggingface/datasets/pull/72.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/72" }
true
https://api.github.com/repos/huggingface/datasets/issues/71
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/71/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/71/comments
https://api.github.com/repos/huggingface/datasets/issues/71/events
https://github.com/huggingface/datasets/pull/71
615,942,180
MDExOlB1bGxSZXF1ZXN0NDE2MTUxODM4
71
Fix arrow writer for big datasets using writer_batch_size
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-11T14:45:36Z"
"2020-05-11T20:09:47Z"
"2020-05-11T20:00:38Z"
MEMBER
null
This PR fixes Yacine's bug. According to [this](https://github.com/apache/arrow/blob/master/docs/source/cpp/arrays.rst#size-limitations-and-recommendations), it is not recommended to have pyarrow arrays bigger than 2Go. Therefore I set a default batch size of 100 000 examples per batch. In general it shouldn't exceed 2Go. If it does, I reduce the batch_size on the fly, and I notify the user with a warning.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/71/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/71/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/71.diff", "html_url": "https://github.com/huggingface/datasets/pull/71", "merged_at": "2020-05-11T20:00:38Z", "patch_url": "https://github.com/huggingface/datasets/pull/71.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/71" }
true
https://api.github.com/repos/huggingface/datasets/issues/70
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/70/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/70/comments
https://api.github.com/repos/huggingface/datasets/issues/70/events
https://github.com/huggingface/datasets/pull/70
615,679,102
MDExOlB1bGxSZXF1ZXN0NDE1OTM3NDgw
70
adding RACE, QASC, Super_glue and Tiny_shakespear datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-05-11T08:07:49Z"
"2020-05-12T13:21:52Z"
"2020-05-12T13:21:51Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/70/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/70/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/70.diff", "html_url": "https://github.com/huggingface/datasets/pull/70", "merged_at": "2020-05-12T13:21:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/70.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/70" }
true
https://api.github.com/repos/huggingface/datasets/issues/69
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/69/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/69/comments
https://api.github.com/repos/huggingface/datasets/issues/69/events
https://github.com/huggingface/datasets/pull/69
615,450,534
MDExOlB1bGxSZXF1ZXN0NDE1NzYyNTQ4
69
fix cache dir in builder tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-10T18:39:21Z"
"2020-05-11T07:19:30Z"
"2020-05-11T07:19:28Z"
MEMBER
null
minor fix
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/69/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/69/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/69.diff", "html_url": "https://github.com/huggingface/datasets/pull/69", "merged_at": "2020-05-11T07:19:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/69.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/69" }
true
https://api.github.com/repos/huggingface/datasets/issues/68
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/68/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/68/comments
https://api.github.com/repos/huggingface/datasets/issues/68/events
https://github.com/huggingface/datasets/pull/68
614,882,655
MDExOlB1bGxSZXF1ZXN0NDE1MzQ3NTgw
68
[CSV] re-add csv
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T17:38:29Z"
"2020-05-08T17:40:48Z"
"2020-05-08T17:40:46Z"
CONTRIBUTOR
null
Re-adding csv under the datasets under construction to keep circle ci happy - will have to see how to include it in the tests. @lhoestq noticed that I accidently deleted it in https://github.com/huggingface/nlp/pull/63#discussion_r422263729.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/68/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/68/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/68.diff", "html_url": "https://github.com/huggingface/datasets/pull/68", "merged_at": "2020-05-08T17:40:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/68.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/68" }
true
https://api.github.com/repos/huggingface/datasets/issues/67
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/67/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/67/comments
https://api.github.com/repos/huggingface/datasets/issues/67/events
https://github.com/huggingface/datasets/pull/67
614,798,483
MDExOlB1bGxSZXF1ZXN0NDE1Mjc5NjI0
67
[Tests] Test files locally
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T15:02:43Z"
"2020-05-08T19:50:47Z"
"2020-05-08T15:17:00Z"
CONTRIBUTOR
null
This PR adds a `aws` and a `local` decorator to the tests so that tests now run on the local datasets. By default, the `aws` is deactivated and `local` is activated and `slow` is deactivated, so that only 1 test per dataset runs on circle ci. **When local is activated all folders in `./datasets` are tested.** **Important** When adding a dataset, we should no longer upload it to AWS. The steps are: 1. Open a PR 2. Add a dataset as described in `datasets/README.md` 3. If all tests pass, push to master Currently we have 49 functional datasets in our code base. We have 6 datasets "under-construction" that don't pass the tests - so I put them in a folder "datasets_under_construction" - it would be nice to open a PR to fix them and put them in the `datasets` folder. **Important** when running tests locally, the datasets are cached so to rerun them delete your local cache via: `rm -r ~/.cache/huggingface/datasets/*` @thomwolf @mariamabarham @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/67/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/67/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/67.diff", "html_url": "https://github.com/huggingface/datasets/pull/67", "merged_at": "2020-05-08T15:17:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/67.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/67" }
true
https://api.github.com/repos/huggingface/datasets/issues/66
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/66/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/66/comments
https://api.github.com/repos/huggingface/datasets/issues/66/events
https://github.com/huggingface/datasets/pull/66
614,748,552
MDExOlB1bGxSZXF1ZXN0NDE1MjM5Njgy
66
[Datasets] ReadME
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T13:37:43Z"
"2020-05-08T13:39:23Z"
"2020-05-08T13:39:22Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/66/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/66/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/66.diff", "html_url": "https://github.com/huggingface/datasets/pull/66", "merged_at": "2020-05-08T13:39:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/66.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/66" }
true
https://api.github.com/repos/huggingface/datasets/issues/65
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/65/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/65/comments
https://api.github.com/repos/huggingface/datasets/issues/65/events
https://github.com/huggingface/datasets/pull/65
614,746,516
MDExOlB1bGxSZXF1ZXN0NDE1MjM4MDEw
65
fix math dataset and xcopa
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T13:33:55Z"
"2020-05-08T13:35:41Z"
"2020-05-08T13:35:40Z"
CONTRIBUTOR
null
- fixes math dataset and xcopa, uploaded both of the to S3
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/65/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/65/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/65.diff", "html_url": "https://github.com/huggingface/datasets/pull/65", "merged_at": "2020-05-08T13:35:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/65.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/65" }
true
https://api.github.com/repos/huggingface/datasets/issues/64
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/64/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/64/comments
https://api.github.com/repos/huggingface/datasets/issues/64/events
https://github.com/huggingface/datasets/pull/64
614,737,057
MDExOlB1bGxSZXF1ZXN0NDE1MjMwMjYy
64
[Datasets] Make master ready for datasets adding
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T13:17:00Z"
"2020-05-08T13:17:31Z"
"2020-05-08T13:17:30Z"
CONTRIBUTOR
null
Add all relevant files so that datasets can now be added on master
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/64/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/64/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/64.diff", "html_url": "https://github.com/huggingface/datasets/pull/64", "merged_at": "2020-05-08T13:17:30Z", "patch_url": "https://github.com/huggingface/datasets/pull/64.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/64" }
true
https://api.github.com/repos/huggingface/datasets/issues/63
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/63/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/63/comments
https://api.github.com/repos/huggingface/datasets/issues/63/events
https://github.com/huggingface/datasets/pull/63
614,666,365
MDExOlB1bGxSZXF1ZXN0NDE1MTczODU5
63
[Dataset scripts] add all datasets scripts
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T10:50:15Z"
"2020-05-08T17:39:22Z"
"2020-05-08T11:34:00Z"
CONTRIBUTOR
null
As mentioned, we can have the canonical datasets in the master. For now I also want to include all the data as present on S3 to make the synchronization easier when uploading new datastes. @mariamabarham @lhoestq @thomwolf - what do you think? If this is ok for you, I can sync up the master with the `add_dataset` branch: https://github.com/huggingface/nlp/pull/37 so that master is up to date.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/63/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/63/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/63.diff", "html_url": "https://github.com/huggingface/datasets/pull/63", "merged_at": "2020-05-08T11:34:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/63.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/63" }
true
https://api.github.com/repos/huggingface/datasets/issues/62
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/62/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/62/comments
https://api.github.com/repos/huggingface/datasets/issues/62/events
https://github.com/huggingface/datasets/pull/62
614,630,830
MDExOlB1bGxSZXF1ZXN0NDE1MTQ1NDAx
62
[Cached Path] Better error message
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T09:39:47Z"
"2020-05-08T09:45:47Z"
"2020-05-08T09:45:47Z"
CONTRIBUTOR
null
IMO returning `None` in this function only leads to confusion and is never helpful.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/62/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/62/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/62.diff", "html_url": "https://github.com/huggingface/datasets/pull/62", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/62.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/62" }
true
https://api.github.com/repos/huggingface/datasets/issues/61
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/61/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/61/comments
https://api.github.com/repos/huggingface/datasets/issues/61/events
https://github.com/huggingface/datasets/pull/61
614,607,474
MDExOlB1bGxSZXF1ZXN0NDE1MTI3MTU4
61
[Load] rename setup_module to prepare_module
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-08T08:54:22Z"
"2020-05-08T08:56:32Z"
"2020-05-08T08:56:16Z"
CONTRIBUTOR
null
rename setup_module to prepare_module due to issues with pytests `setup_module` function. See: PR #59.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/61/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/61/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/61.diff", "html_url": "https://github.com/huggingface/datasets/pull/61", "merged_at": "2020-05-08T08:56:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/61.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/61" }
true
https://api.github.com/repos/huggingface/datasets/issues/60
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/60/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/60/comments
https://api.github.com/repos/huggingface/datasets/issues/60/events
https://github.com/huggingface/datasets/pull/60
614,372,553
MDExOlB1bGxSZXF1ZXN0NDE0OTQyNjEy
60
Update to simplify some datasets conversion
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-05-07T22:02:24Z"
"2020-05-08T10:38:32Z"
"2020-05-08T10:18:24Z"
MEMBER
null
This PR updates the encoding of `Values` like `integers`, `boolean` and `float` to use python casting and avoid having to cast in the dataset scripts, as mentioned here: https://github.com/huggingface/nlp/pull/37#discussion_r420176626 We could also change (not included in this PR yet): - `supervized_keys` to make them a NamedTuple instead of a dataclass, and - handle specifically the `Translation` features. as mentioned here: https://github.com/huggingface/nlp/pull/37#discussion_r421740236 @patrickvonplaten @mariamabarham tell me if you want these two last changes as well.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/60/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/60/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/60.diff", "html_url": "https://github.com/huggingface/datasets/pull/60", "merged_at": "2020-05-08T10:18:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/60.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/60" }
true
https://api.github.com/repos/huggingface/datasets/issues/59
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/59/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/59/comments
https://api.github.com/repos/huggingface/datasets/issues/59/events
https://github.com/huggingface/datasets/pull/59
614,366,045
MDExOlB1bGxSZXF1ZXN0NDE0OTM3NTgx
59
Fix tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-05-07T21:48:09Z"
"2020-05-08T10:57:57Z"
"2020-05-08T10:46:51Z"
MEMBER
null
@patrickvonplaten I've broken a bit the tests with #25 while simplifying and re-organizing the `load.py` and `download_manager.py` scripts. I'm trying to fix them here but I have a weird error, do you think you can have a look? ```bash (datasets) MacBook-Pro-de-Thomas:datasets thomwolf$ python -m pytest -sv ./tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ============================================================================= test session starts ============================================================================= platform darwin -- Python 3.7.7, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 -- /Users/thomwolf/miniconda2/envs/datasets/bin/python cachedir: .pytest_cache rootdir: /Users/thomwolf/Documents/GitHub/datasets plugins: xdist-1.31.0, forked-1.1.3 collected 1 item tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ERROR =================================================================================== ERRORS ==================================================================================== ____________________________________________________________ ERROR at setup of DatasetTest.test_builder_class_snli ____________________________________________________________ file_path = <module 'tests.test_dataset_common' from '/Users/thomwolf/Documents/GitHub/datasets/tests/test_dataset_common.py'> download_config = DownloadConfig(cache_dir=None, force_download=False, resume_download=False, local_files_only=False, proxies=None, user_agent=None, extract_compressed_file=True, force_extract=True) download_kwargs = {} def setup_module(file_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs,) -> DatasetBuilder: r""" Download/extract/cache a dataset to add to the lib from a path or url which can be: - a path to a local directory containing the dataset processing python script - an url to a S3 directory with a dataset processing python script Dataset codes are cached inside the lib to allow easy import (avoid ugly sys.path tweaks) and using cloudpickle (among other things). Return: tuple of the unique id associated to the dataset the local path to the dataset """ if download_config is None: download_config = DownloadConfig(**download_kwargs) download_config.extract_compressed_file = True download_config.force_extract = True > name = list(filter(lambda x: x, file_path.split("/")))[-1] + ".py" E AttributeError: module 'tests.test_dataset_common' has no attribute 'split' src/nlp/load.py:169: AttributeError ============================================================================== warnings summary =============================================================================== /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15 /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp -- Docs: https://docs.pytest.org/en/latest/warnings.html =========================================================================== short test summary info =========================================================================== ERROR tests/test_dataset_common.py::DatasetTest::test_builder_class_snli - AttributeError: module 'tests.test_dataset_common' has no attribute 'split' ========================================================================= 1 warning, 1 error in 3.63s ========================================================================= ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/59/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/59/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/59.diff", "html_url": "https://github.com/huggingface/datasets/pull/59", "merged_at": "2020-05-08T10:46:51Z", "patch_url": "https://github.com/huggingface/datasets/pull/59.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/59" }
true
https://api.github.com/repos/huggingface/datasets/issues/58
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/58/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/58/comments
https://api.github.com/repos/huggingface/datasets/issues/58/events
https://github.com/huggingface/datasets/pull/58
614,362,308
MDExOlB1bGxSZXF1ZXN0NDE0OTM0NTY4
58
Aborted PR - Fix tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-05-07T21:40:19Z"
"2020-05-07T21:48:01Z"
"2020-05-07T21:41:27Z"
MEMBER
null
@patrickvonplaten I've broken a bit the tests with #25 while simplifying and re-organizing the `load.py` and `download_manager.py` scripts. I'm trying to fix them here but I have a weird error, do you think you can have a look? ```bash (datasets) MacBook-Pro-de-Thomas:datasets thomwolf$ python -m pytest -sv ./tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ============================================================================= test session starts ============================================================================= platform darwin -- Python 3.7.7, pytest-5.4.1, py-1.8.1, pluggy-0.13.1 -- /Users/thomwolf/miniconda2/envs/datasets/bin/python cachedir: .pytest_cache rootdir: /Users/thomwolf/Documents/GitHub/datasets plugins: xdist-1.31.0, forked-1.1.3 collected 1 item tests/test_dataset_common.py::DatasetTest::test_builder_class_snli ERROR =================================================================================== ERRORS ==================================================================================== ____________________________________________________________ ERROR at setup of DatasetTest.test_builder_class_snli ____________________________________________________________ file_path = <module 'tests.test_dataset_common' from '/Users/thomwolf/Documents/GitHub/datasets/tests/test_dataset_common.py'> download_config = DownloadConfig(cache_dir=None, force_download=False, resume_download=False, local_files_only=False, proxies=None, user_agent=None, extract_compressed_file=True, force_extract=True) download_kwargs = {} def setup_module(file_path: str, download_config: Optional[DownloadConfig] = None, **download_kwargs,) -> DatasetBuilder: r""" Download/extract/cache a dataset to add to the lib from a path or url which can be: - a path to a local directory containing the dataset processing python script - an url to a S3 directory with a dataset processing python script Dataset codes are cached inside the lib to allow easy import (avoid ugly sys.path tweaks) and using cloudpickle (among other things). Return: tuple of the unique id associated to the dataset the local path to the dataset """ if download_config is None: download_config = DownloadConfig(**download_kwargs) download_config.extract_compressed_file = True download_config.force_extract = True > name = list(filter(lambda x: x, file_path.split("/")))[-1] + ".py" E AttributeError: module 'tests.test_dataset_common' has no attribute 'split' src/nlp/load.py:169: AttributeError ============================================================================== warnings summary =============================================================================== /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15 /Users/thomwolf/miniconda2/envs/datasets/lib/python3.7/site-packages/tensorflow_core/python/pywrap_tensorflow_internal.py:15: DeprecationWarning: the imp module is deprecated in favour of importlib; see the module's documentation for alternative uses import imp -- Docs: https://docs.pytest.org/en/latest/warnings.html =========================================================================== short test summary info =========================================================================== ERROR tests/test_dataset_common.py::DatasetTest::test_builder_class_snli - AttributeError: module 'tests.test_dataset_common' has no attribute 'split' ========================================================================= 1 warning, 1 error in 3.63s ========================================================================= ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/58/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/58/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/58.diff", "html_url": "https://github.com/huggingface/datasets/pull/58", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/58.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/58" }
true
https://api.github.com/repos/huggingface/datasets/issues/57
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/57/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/57/comments
https://api.github.com/repos/huggingface/datasets/issues/57/events
https://github.com/huggingface/datasets/pull/57
614,261,638
MDExOlB1bGxSZXF1ZXN0NDE0ODUzMDM5
57
Better cached path
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-07T18:36:00Z"
"2020-05-08T13:20:30Z"
"2020-05-08T13:20:28Z"
MEMBER
null
### Changes: - The `cached_path` no longer returns None if the file is missing/the url doesn't work. Instead, it can raise `FileNotFoundError` (missing file), `ConnectionError` (no cache and unreachable url) or `ValueError` (parsing error) - Fix requests to firebase API that doesn't handle HEAD requests... - Allow custom download in datasets script: it allows to use `tf.io.gfile.copy` for example, to download from google storage. I added an example: the `boolq` script
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/57/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/57/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/57.diff", "html_url": "https://github.com/huggingface/datasets/pull/57", "merged_at": "2020-05-08T13:20:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/57.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/57" }
true
https://api.github.com/repos/huggingface/datasets/issues/56
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/56/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/56/comments
https://api.github.com/repos/huggingface/datasets/issues/56/events
https://github.com/huggingface/datasets/pull/56
614,236,869
MDExOlB1bGxSZXF1ZXN0NDE0ODMyODY4
56
[Dataset] Tester add mock function
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-07T17:51:37Z"
"2020-05-07T17:52:51Z"
"2020-05-07T17:52:50Z"
CONTRIBUTOR
null
need to add an empty `extract()` function to make `hansard` dataset test work.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/56/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/56/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/56.diff", "html_url": "https://github.com/huggingface/datasets/pull/56", "merged_at": "2020-05-07T17:52:50Z", "patch_url": "https://github.com/huggingface/datasets/pull/56.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/56" }
true
https://api.github.com/repos/huggingface/datasets/issues/55
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/55/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/55/comments
https://api.github.com/repos/huggingface/datasets/issues/55/events
https://github.com/huggingface/datasets/pull/55
613,968,072
MDExOlB1bGxSZXF1ZXN0NDE0NjE0MjE1
55
Beam datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-07T11:04:32Z"
"2020-05-11T07:20:02Z"
"2020-05-11T07:20:00Z"
MEMBER
null
# Beam datasets ## Intro Beam Datasets are using beam pipelines for preprocessing (basically lots of `.map` over objects called PCollections). The advantage of apache beam is that you can choose which type of runner you want to use to preprocess your data. The main runners are: - the `DirectRunner` to run the pipeline locally (default). However I encountered memory issues for big datasets (like the french or english wikipedia). Small dataset work fine - Google Dataflow. I didn't play with it. - Spark or Flink, two well known data processing frameworks. I tried to use the Spark/Flink local runners provided by apache beam for python and wasn't able to make them work properly though... ## From tfds beam datasets to our own beam datasets Tensorflow datasets used beam and a complicated pipeline to shard the TFRecords files. To allow users to download beam datasets and not having to preprocess them, they also allow to download the already preprocessed datasets from their google storage (the beam pipeline doesn't run in that case). On our side, we replace TFRecords by something else. Arrow or Parquet do the job but I chose Parquet as: 1) there is a builtin apache beam parquet writer that is quite convenient, and 2) reading parquet from the pyarrow library is also simple and effective (there is a mmap option !) Moreover we don't shard datasets in many many files like tfds (they were doing probably doing that mainly because of the limit of 2Gb per TFRecord file). Therefore we have a simpler pipeline that saves each split into one parquet file. We also removed the utilities to use their google storage (for now maybe ? we'll have to discuss it). ## Main changes - Added a BeamWriter to save the output of beam pipelines into parquet files and fill dataset infos - Create a ParquetReader and refactor a bit the arrow_reader.py \> **With this, we can now try to add beam datasets from tfds** I already added the wikipedia one, and I will also try to add the Wiki40b dataset ## Test the wikipedia script You can download and run the beam pipeline for wikipedia (using the `DirectRunner` by default) like this: ``` >>> import nlp >>> nlp.load("datasets/nlp/wikipedia", dataset_config="20200501.frr") ``` This wikipedia dataset (lang: frr, North Frisian) is a small one (~10Mb), but feel free to try bigger ones (and fill 20Gb of swap memory if you try the english one lol) ## Next Should we allow to download preprocessed datasets from the tfds google storage ? Should we try to optimize the beam pipelines to run locally without memory issues ? Should we try other data processing frameworks for big datasets, like spark ? ## About this PR It should be merged after #25 ----------------- I'd be happy to have your feedback and your ideas to improve the processing of big datasets like wikipedia :)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/55/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/55/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/55.diff", "html_url": "https://github.com/huggingface/datasets/pull/55", "merged_at": "2020-05-11T07:20:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/55.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/55" }
true
https://api.github.com/repos/huggingface/datasets/issues/54
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/54/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/54/comments
https://api.github.com/repos/huggingface/datasets/issues/54/events
https://github.com/huggingface/datasets/pull/54
613,513,348
MDExOlB1bGxSZXF1ZXN0NDE0MjUyODkw
54
[Tests] Improved Error message for dummy folder structure
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-06T18:11:48Z"
"2020-05-06T18:13:00Z"
"2020-05-06T18:12:59Z"
CONTRIBUTOR
null
Improved Error message
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/54/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/54/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/54.diff", "html_url": "https://github.com/huggingface/datasets/pull/54", "merged_at": "2020-05-06T18:12:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/54.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/54" }
true
https://api.github.com/repos/huggingface/datasets/issues/53
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/53/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/53/comments
https://api.github.com/repos/huggingface/datasets/issues/53/events
https://github.com/huggingface/datasets/pull/53
613,436,158
MDExOlB1bGxSZXF1ZXN0NDE0MTkwMzkz
53
[Features] Typo in generate_from_dict
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-06T16:05:23Z"
"2020-05-07T15:28:46Z"
"2020-05-07T15:28:45Z"
CONTRIBUTOR
null
Change `isinstance` test in features when generating features from dict.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/53/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/53/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/53.diff", "html_url": "https://github.com/huggingface/datasets/pull/53", "merged_at": "2020-05-07T15:28:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/53.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/53" }
true
https://api.github.com/repos/huggingface/datasets/issues/52
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/52/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/52/comments
https://api.github.com/repos/huggingface/datasets/issues/52/events
https://github.com/huggingface/datasets/pull/52
613,339,071
MDExOlB1bGxSZXF1ZXN0NDE0MTEyMDAy
52
allow dummy folder structure to handle dict of lists
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-06T13:54:35Z"
"2020-05-06T13:55:19Z"
"2020-05-06T13:55:18Z"
CONTRIBUTOR
null
`esnli.py` needs that extension of the dummy data testing.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/52/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/52/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/52.diff", "html_url": "https://github.com/huggingface/datasets/pull/52", "merged_at": "2020-05-06T13:55:18Z", "patch_url": "https://github.com/huggingface/datasets/pull/52.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/52" }
true
https://api.github.com/repos/huggingface/datasets/issues/51
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/51/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/51/comments
https://api.github.com/repos/huggingface/datasets/issues/51/events
https://github.com/huggingface/datasets/pull/51
613,266,668
MDExOlB1bGxSZXF1ZXN0NDE0MDUyOTYw
51
[Testing] Improved testing structure
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-06T12:03:07Z"
"2020-05-07T22:07:19Z"
"2020-05-06T13:20:18Z"
CONTRIBUTOR
null
This PR refactors the test design a bit and puts the mock download manager in the `utils` files as it is just a test helper class. as @mariamabarham pointed out, creating a dummy folder structure can be quite hard to grasp. This PR tries to change that to some extent. It follows the following logic for the `dummy` folder structure now: 1.) The data bulider has no config -> the `dummy` folder structure is: `dummy/<version>/dummy_data.zip` 2) The data builder has >= 1 configs -> the `dummy` folder structure is: `dummy/<config_name_1>/<version>/dummy_data.zip` `dummy/<config_name_2>/<version>/dummy_data.zip` Now, the difficult part is how to create the `dummy_data.zip` file. There are two cases: A) The `data_urs` parameter inserted into the `download_and_extract` fn is a **string**: -> the `dummy_data.zip` file zips the folder: `dummy_data/<relative_path_of_folder_structure_of_url>` B) The `data_urs` parameter inserted into the `download_and_extract` fn is a **dict**: -> the `dummy_data.zip` file zips the folder: `dummy_data/<relative_path_of_folder_structure_of_url_behind _key_1>` `dummy_data/<relative_path_of_folder_structure_of_url_behind _key_2>` By relative folder structure I mean `url_path.split('./')[-1]`. As an example the dataset **xquad** by deepmind has the following url path behind the key `de`: `https://github.com/deepmind/xquad/blob/master/xquad.de.json` -> This means that the relative url path should be `xquad.de.json`. @mariamabarham B) is a change from how is was before and I think is makes more sense. While before the `dummy_data.zip` file for xquad with config `de` looked like: `dummy_data/de` it would now look like `dummy_data/xquad.de.json`. I think this is better and easier to understand. Therefore there are currently 6 tests that would have to have changed their dummy folder structure, but which can easily be done (30min). I also added a function: `print_dummy_data_folder_structure` that prints out the expected structures when testing which should be quite helpful.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/51/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/51/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/51.diff", "html_url": "https://github.com/huggingface/datasets/pull/51", "merged_at": "2020-05-06T13:20:17Z", "patch_url": "https://github.com/huggingface/datasets/pull/51.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/51" }
true
https://api.github.com/repos/huggingface/datasets/issues/50
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/50/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/50/comments
https://api.github.com/repos/huggingface/datasets/issues/50/events
https://github.com/huggingface/datasets/pull/50
612,583,126
MDExOlB1bGxSZXF1ZXN0NDEzNTAwMjE0
50
[Tests] test only for fast test as a default
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-05T12:59:22Z"
"2020-05-05T13:02:18Z"
"2020-05-05T13:02:16Z"
CONTRIBUTOR
null
Test only for one config on circle ci to speed up testing. Add all config test as a slow test. @mariamabarham @thomwolf
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/50/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/50/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/50.diff", "html_url": "https://github.com/huggingface/datasets/pull/50", "merged_at": "2020-05-05T13:02:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/50.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/50" }
true
https://api.github.com/repos/huggingface/datasets/issues/49
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/49/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/49/comments
https://api.github.com/repos/huggingface/datasets/issues/49/events
https://github.com/huggingface/datasets/pull/49
612,545,483
MDExOlB1bGxSZXF1ZXN0NDEzNDY5ODg0
49
fix flatten nested
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-05T11:55:13Z"
"2020-05-05T13:59:26Z"
"2020-05-05T13:59:25Z"
MEMBER
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/49/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/49/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/49.diff", "html_url": "https://github.com/huggingface/datasets/pull/49", "merged_at": "2020-05-05T13:59:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/49.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/49" }
true
https://api.github.com/repos/huggingface/datasets/issues/48
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/48/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/48/comments
https://api.github.com/repos/huggingface/datasets/issues/48/events
https://github.com/huggingface/datasets/pull/48
612,504,687
MDExOlB1bGxSZXF1ZXN0NDEzNDM2MTgz
48
[Command Convert] remove tensorflow import
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-05T10:41:00Z"
"2020-05-05T11:13:58Z"
"2020-05-05T11:13:56Z"
CONTRIBUTOR
null
Remove all tensorflow import statements.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/48/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/48/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/48.diff", "html_url": "https://github.com/huggingface/datasets/pull/48", "merged_at": "2020-05-05T11:13:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/48.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/48" }
true
https://api.github.com/repos/huggingface/datasets/issues/47
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/47/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/47/comments
https://api.github.com/repos/huggingface/datasets/issues/47/events
https://github.com/huggingface/datasets/pull/47
612,446,493
MDExOlB1bGxSZXF1ZXN0NDEzMzg5MDc1
47
[PyArrow Feature] fix py arrow bool
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-05T08:56:28Z"
"2020-05-05T10:40:28Z"
"2020-05-05T10:40:27Z"
CONTRIBUTOR
null
To me it seems that `bool` can only be accessed with `bool_` when looking at the pyarrow types: https://arrow.apache.org/docs/python/api/datatypes.html.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/47/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/47/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/47.diff", "html_url": "https://github.com/huggingface/datasets/pull/47", "merged_at": "2020-05-05T10:40:27Z", "patch_url": "https://github.com/huggingface/datasets/pull/47.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/47" }
true
https://api.github.com/repos/huggingface/datasets/issues/46
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/46/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/46/comments
https://api.github.com/repos/huggingface/datasets/issues/46/events
https://github.com/huggingface/datasets/pull/46
612,398,190
MDExOlB1bGxSZXF1ZXN0NDEzMzUxNTY0
46
[Features] Strip str key before dict look-up
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-05T07:31:45Z"
"2020-05-05T08:37:45Z"
"2020-05-05T08:37:44Z"
CONTRIBUTOR
null
The dataset `anli.py` currently fails because it tries to look up a key `1\n` in a dict that only has the key `1`. Added an if statement to strip key if it cannot be found in dict.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/46/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/46/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/46.diff", "html_url": "https://github.com/huggingface/datasets/pull/46", "merged_at": "2020-05-05T08:37:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/46.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/46" }
true
https://api.github.com/repos/huggingface/datasets/issues/45
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/45/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/45/comments
https://api.github.com/repos/huggingface/datasets/issues/45/events
https://github.com/huggingface/datasets/pull/45
612,386,583
MDExOlB1bGxSZXF1ZXN0NDEzMzQzMjAy
45
[Load] Separate Module kwargs and builder kwargs.
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-05T07:09:54Z"
"2022-10-04T09:32:11Z"
"2020-05-08T09:51:22Z"
CONTRIBUTOR
null
Kwargs for the `load_module` fn should be passed with `module_xxxx` to `builder_kwargs` of `load` fn. This is a follow-up PR of: https://github.com/huggingface/nlp/pull/41
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/45/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/45/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/45.diff", "html_url": "https://github.com/huggingface/datasets/pull/45", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/45.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/45" }
true
https://api.github.com/repos/huggingface/datasets/issues/44
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/44/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/44/comments
https://api.github.com/repos/huggingface/datasets/issues/44/events
https://github.com/huggingface/datasets/pull/44
611,873,486
MDExOlB1bGxSZXF1ZXN0NDEyOTUwMzU1
44
[Tests] Fix tests for datasets with no config
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-04T13:25:38Z"
"2020-05-04T13:28:04Z"
"2020-05-04T13:28:03Z"
CONTRIBUTOR
null
Forgot to fix `None` problem for datasets that have no config this in PR: https://github.com/huggingface/nlp/pull/42
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/44/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/44/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/44.diff", "html_url": "https://github.com/huggingface/datasets/pull/44", "merged_at": "2020-05-04T13:28:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/44.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/44" }
true
https://api.github.com/repos/huggingface/datasets/issues/43
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/43/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/43/comments
https://api.github.com/repos/huggingface/datasets/issues/43/events
https://github.com/huggingface/datasets/pull/43
611,773,279
MDExOlB1bGxSZXF1ZXN0NDEyODcxNTE5
43
[Checksums] If no configs exist prevent to run over empty list
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-04T10:39:42Z"
"2022-10-04T09:32:02Z"
"2020-05-04T13:18:03Z"
CONTRIBUTOR
null
`movie_rationales` e.g. has no configs.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/43/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/43/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/43.diff", "html_url": "https://github.com/huggingface/datasets/pull/43", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/43.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/43" }
true
https://api.github.com/repos/huggingface/datasets/issues/42
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/42/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/42/comments
https://api.github.com/repos/huggingface/datasets/issues/42/events
https://github.com/huggingface/datasets/pull/42
611,754,343
MDExOlB1bGxSZXF1ZXN0NDEyODU1OTE2
42
[Tests] allow tests for builders without config
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-04T10:06:22Z"
"2020-05-04T13:10:50Z"
"2020-05-04T13:10:48Z"
CONTRIBUTOR
null
Some dataset scripts have no configs - the tests have to be adapted for this case. In this case the dummy data will be saved as: - natural_questions -> dummy -> -> 1.0.0 (version num) -> -> -> dummy_data.zip
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/42/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/42/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/42.diff", "html_url": "https://github.com/huggingface/datasets/pull/42", "merged_at": "2020-05-04T13:10:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/42.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/42" }
true
https://api.github.com/repos/huggingface/datasets/issues/41
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/41/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/41/comments
https://api.github.com/repos/huggingface/datasets/issues/41/events
https://github.com/huggingface/datasets/pull/41
611,739,219
MDExOlB1bGxSZXF1ZXN0NDEyODQzNDQy
41
[Load module] allow kwargs into load module
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-04T09:42:11Z"
"2020-05-04T19:39:07Z"
"2020-05-04T19:39:06Z"
CONTRIBUTOR
null
Currenly it is not possible to force a re-download of the dataset script. This simple change allows to pass ``force_reload=True`` as ``builder_kwargs`` in the ``load.py`` function.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/41/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/41/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/41.diff", "html_url": "https://github.com/huggingface/datasets/pull/41", "merged_at": "2020-05-04T19:39:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/41.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/41" }
true
https://api.github.com/repos/huggingface/datasets/issues/40
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/40/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/40/comments
https://api.github.com/repos/huggingface/datasets/issues/40/events
https://github.com/huggingface/datasets/pull/40
611,721,308
MDExOlB1bGxSZXF1ZXN0NDEyODI4NzU2
40
Update remote checksums instead of overwrite
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-04T09:13:14Z"
"2020-05-04T11:51:51Z"
"2020-05-04T11:51:49Z"
MEMBER
null
When the user uploads a dataset on S3, checksums are also uploaded with the `--upload_checksums` parameter. If the user uploads the dataset in several steps, then the remote checksums file was previously overwritten. Now it's going to be updated with the new checksums.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/40/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/40/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/40.diff", "html_url": "https://github.com/huggingface/datasets/pull/40", "merged_at": "2020-05-04T11:51:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/40.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/40" }
true
https://api.github.com/repos/huggingface/datasets/issues/39
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/39/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/39/comments
https://api.github.com/repos/huggingface/datasets/issues/39/events
https://github.com/huggingface/datasets/pull/39
611,712,135
MDExOlB1bGxSZXF1ZXN0NDEyODIxNTA4
39
[Test] improve slow testing
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-04T08:58:33Z"
"2020-05-04T08:59:50Z"
"2020-05-04T08:59:49Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/39/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/39/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/39.diff", "html_url": "https://github.com/huggingface/datasets/pull/39", "merged_at": "2020-05-04T08:59:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/39.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/39" }
true
https://api.github.com/repos/huggingface/datasets/issues/38
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/38/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/38/comments
https://api.github.com/repos/huggingface/datasets/issues/38/events
https://github.com/huggingface/datasets/issues/38
611,677,656
MDU6SXNzdWU2MTE2Nzc2NTY=
38
[Checksums] Error for some datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }, { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
[]
"2020-05-04T08:00:16Z"
"2020-05-04T09:48:20Z"
"2020-05-04T09:48:20Z"
CONTRIBUTOR
null
The checksums command works very nicely for `squad`. But for `crime_and_punish` and `xnli`, the same bug happens: When running: ``` python nlp-cli nlp-cli test xnli --save_checksums ``` leads to: ``` File "nlp-cli", line 33, in <module> service.run() File "/home/patrick/python_bin/nlp/commands/test.py", line 61, in run ignore_checksums=self._ignore_checksums, File "/home/patrick/python_bin/nlp/builder.py", line 383, in download_and_prepare self._download_and_prepare(dl_manager=dl_manager, download_config=download_config) File "/home/patrick/python_bin/nlp/builder.py", line 627, in _download_and_prepare dl_manager=dl_manager, max_examples_per_split=download_config.max_examples_per_split, File "/home/patrick/python_bin/nlp/builder.py", line 431, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/patrick/python_bin/nlp/datasets/xnli/8bf4185a2da1ef2a523186dd660d9adcf0946189e7fa5942ea31c63c07b68a7f/xnli.py", line 95, in _split_generators dl_dir = dl_manager.download_and_extract(_DATA_URL) File "/home/patrick/python_bin/nlp/utils/download_manager.py", line 246, in download_and_extract return self.extract(self.download(url_or_urls)) File "/home/patrick/python_bin/nlp/utils/download_manager.py", line 186, in download self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) File "/home/patrick/python_bin/nlp/utils/download_manager.py", line 166, in _record_sizes_checksums self._recorded_sizes_checksums[url] = get_size_checksum(path) File "/home/patrick/python_bin/nlp/utils/checksums_utils.py", line 81, in get_size_checksum with open(path, "rb") as f: TypeError: expected str, bytes or os.PathLike object, not tuple ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/38/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/38/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/37
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/37/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/37/comments
https://api.github.com/repos/huggingface/datasets/issues/37/events
https://github.com/huggingface/datasets/pull/37
611,670,295
MDExOlB1bGxSZXF1ZXN0NDEyNzg5MjQ4
37
[Datasets ToDo-List] add datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }, { "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" } ]
null
[]
"2020-05-04T07:47:39Z"
"2022-10-04T09:32:17Z"
"2020-05-08T13:48:23Z"
CONTRIBUTOR
null
## Description This PR acts as a dashboard to see which datasets are added to the library and work. Cicle-ci should always be green so that we can be sure that newly added datasets are functional. This PR should not be merged. ## Progress **For the following datasets the test commands**: ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_real_dataset_<your-dataset-name> ``` and ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_dataset_all_configs_<your-dataset-name> ``` **passes**. - [x] Squad - [x] Sentiment140 - [x] XNLI - [x] Crime_and_Punish - [x] movie_rationales - [x] ai2_arc - [x] anli - [x] event2Mind - [x] Fquad - [x] blimp - [x] empathetic_dialogues - [x] cosmos_qa - [x] xquad - [x] blog_authorship_corpus - [x] SNLI - [x] break_data - [x] SQuAD v2 - [x] cfq - [x] eraser_multi_rc - [x] Glue - [x] Tydiqa - [x] wiki_qa - [x] wikitext - [x] winogrande - [x] wiqa - [x] esnli - [x] civil_comments - [x] commonsense_qa - [x] com_qa - [x] coqa - [x] wiki_split - [x] cos_e - [x] xcopa - [x] quarel - [x] quartz - [x] squad_it - [x] quoref - [x] squad_pt - [x] cornell_movie_dialog - [x] SciQ - [x] Scifact - [x] hellaswag - [x] ted_multi (in translate) - [x] Aeslc (summarization) - [x] drop - [x] gap - [x] hansard - [x] opinosis - [x] MLQA - [x] math_dataset ## How-To-Add a dataset **Before adding a dataset make sure that your branch is up to date**: 1. `git checkout add_datasets` 2. `git pull` **Add a dataset via the `convert_dataset.sh` bash script:** Running `bash convert_dataset.sh <file/to/tfds/datascript.py>` (*e.g.* `bash convert_dataset.sh ../tensorflow-datasets/tensorflow_datasets/text/movie_rationales.py`) will automatically run all the steps mentioned in **Add a dataset manually** below. Make sure that you run `convert_dataset.sh` from the root folder of `nlp`. The conversion script should work almost always for step 1): "convert dataset script from tfds to nlp format" and 2) "create checksum file" and step 3) "make style". It can also sometimes automatically run step 4) "create the correct dummy data from tfds", but this will only work if a) there is either no config name or only one config name and b) the `tfds testing/test_data/fake_example` is in the correct form. Nevertheless, the script should always be run in the beginning until an error occurs to be more efficient. If the conversion script does not work or fails at some step, then you can run the steps manually as follows: **Add a dataset manually** Make sure you run all of the following commands from the root of your `nlp` git clone. Also make sure that you changed to this branch: ``` git checkout add_datasets ``` 1) the tfds datascript file should be converted to `nlp` style: ``` python nlp-cli convert --tfds_path <path/to/tensorflow_datasets/text/your_dataset_name>.py --nlp_directory datasets/nlp ``` This will convert the tdfs script and create a folder with the correct name. 2) the checksum file should be added. Use the command: ``` python nlp-cli test datasets/nlp/<your-dataset-folder> --save_checksums --all_configs ``` A checksums.txt file should be created in your folder and the structure should look as follows: squad/ ├── squad.py/ └── urls_checksums/ ...........└── checksums.txt Delete the created `*.lock` file afterward - it should not be uploaded to AWS. 3) run black and isort on your newly added datascript files so that they look nice: ``` make style ``` 4) the dummy data should be added. For this it might be useful to take a look into the structure of other examples as shown in the PR here and at `<path/to/tensorflow_datasets/testing/test_data/test_data/fake_examples>` whether the same data can be used. 5) the data can be uploaded to AWS using the command ``` aws s3 cp datasets/nlp/<your-dataset-folder> s3://datasets.huggingface.co/nlp/<your-dataset-folder> --recursive ``` 6) check whether all works as expected using: ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_real_dataset_<your-dataset-name> ``` and ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_dataset_all_configs_<your-dataset-name> ``` 7) push to this PR and rerun the circle ci workflow to check whether circle ci stays green. 8) Edit this commend and tick off your newly added dataset :-) ## TODO-list Maybe we can add a TODO-list here for everybody that feels like adding new datasets so that we will not add the same datasets. Here a link to available datasets: https://docs.google.com/spreadsheets/d/1zOtEqOrnVQwdgkC4nJrTY6d-Av02u0XFzeKAtBM2fUI/edit#gid=0 Patrick: - [ ] boolq - *weird download link* - [ ] c4 - *beam dataset*
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/37/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/37/timeline
null
null
1
{ "diff_url": "https://github.com/huggingface/datasets/pull/37.diff", "html_url": "https://github.com/huggingface/datasets/pull/37", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/37.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/37" }
true
https://api.github.com/repos/huggingface/datasets/issues/36
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/36/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/36/comments
https://api.github.com/repos/huggingface/datasets/issues/36/events
https://github.com/huggingface/datasets/pull/36
611,528,349
MDExOlB1bGxSZXF1ZXN0NDEyNjgwOTk1
36
Metrics - refactoring, adding support for download and distributed metrics
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-05-03T23:00:17Z"
"2020-05-11T08:16:02Z"
"2020-05-11T08:16:00Z"
MEMBER
null
Refactoring metrics to have a similar loading API than the datasets and improving the import system. # Import system The import system has ben upgraded. There are now three types of imports allowed: 1. `library` imports (identified as "absolute imports") ```python import seqeval ``` => we'll test all the imports before running the scripts and if one cannot be imported we'll display an error message like this one: `ImportError: To be able to use this metric/dataset, you need to install the following dependencies ['seqeval'] using 'pip install seqeval' for instance'` 2. `internal` imports (identified as "relative imports") ```python import .c4_utils ``` => we'll assume this point to a file in the same directory/S3-directory as the main script and download this file. 2. `external` imports (identified as "relative imports" with a comment starting with `# From:`) ```python from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py ``` => we'll assume this point to the URL of a python script (if it's a link to a github file, we'll take the raw file automatically). => the script is downloaded and renamed to the import name (here above renamed from `bleu.py` to `nmt_bleu.py`). Renaming the file can be necessary if the distant file has the same name as the dataset/metric processing script. If you forgot to rename the distant script and it has the same name as the dataset/metric, you'll have an explicit error message asking to rename the import anyway. # Hosting metrics Metrics are hosted on a S3 bucket like the dataset processing scripts. # Metrics scripts Metrics scripts have a lot in common with datasets processing scripts. They also have a `metric.info` including citations, descriptions and links to relevant pages. Metrics have more documentation to supply to ensure they are used well. Four examples are already included for reference in [./metrics](./metrics): BLEU, ROUGE, SacreBLEU and SeqEVAL. # Automatic support for distributed/multi-processing metric computation We've also added support for automatic distributed/multi-processing metric computation (e.g. when using DistributedDataParallel). We leverage our own dataset format for smart caching in this case. Here is a quick gist of a standard use of metrics (the simplest usage): ```python import nlp bleu_metric = nlp.load_metric('bleu') # If you only have a single iteration, you can easily compute the score like this predictions = model(inputs) score = bleu_metric.compute(predictions, references) # If you have a loop, you can "add" your predictions and references at each iteration instead of having to save them yourself (the metric object store them efficiently for you) for batch in dataloader: model_input, targets = batch predictions = model(model_inputs) bleu.add(predictions, targets) score = bleu_metric.compute() # Compute the score from all the stored predictions/references ``` Here is a quick gist of a use in a distributed torch setup (should work for any python multi-process setup actually). It's pretty much identical to the second example above: ```python import nlp # You need to give the total number of parallel python processes (num_process) and the id of each process (process_id) bleu = nlp.load_metric('bleu', process_id=torch.distributed.get_rank(),b num_process=torch.distributed.get_world_size()) for batch in dataloader: model_input, targets = batch predictions = model(model_inputs) bleu.add(predictions, targets) score = bleu_metric.compute() # Compute the score on the first node by default (can be set to compute on each node as well) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/36/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/36/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/36.diff", "html_url": "https://github.com/huggingface/datasets/pull/36", "merged_at": "2020-05-11T08:16:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/36.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/36" }
true
https://api.github.com/repos/huggingface/datasets/issues/35
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/35/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/35/comments
https://api.github.com/repos/huggingface/datasets/issues/35/events
https://github.com/huggingface/datasets/pull/35
611,413,731
MDExOlB1bGxSZXF1ZXN0NDEyNjAyMTc0
35
[Tests] fix typo
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-03T13:23:49Z"
"2020-05-03T13:24:21Z"
"2020-05-03T13:24:20Z"
CONTRIBUTOR
null
@lhoestq - currently the slow test fail with: ``` _____________________________________________________________________________________ DatasetTest.test_load_real_dataset_xnli _____________________________________________________________________________________ self = <tests.test_dataset_common.DatasetTest testMethod=test_load_real_dataset_xnli>, dataset_name = 'xnli' @slow def test_load_real_dataset(self, dataset_name): with tempfile.TemporaryDirectory() as temp_data_dir: > dataset = load(dataset_name, data_dir=temp_data_dir) tests/test_dataset_common.py:153: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ ../../python_bin/nlp/load.py:497: in load dbuilder.download_and_prepare(**download_and_prepare_kwargs) ../../python_bin/nlp/builder.py:383: in download_and_prepare self._download_and_prepare(dl_manager=dl_manager, download_config=download_config) ../../python_bin/nlp/builder.py:627: in _download_and_prepare dl_manager=dl_manager, max_examples_per_split=download_config.max_examples_per_split, ../../python_bin/nlp/builder.py:431: in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) ../../python_bin/nlp/datasets/xnli/8bf4185a2da1ef2a523186dd660d9adcf0946189e7fa5942ea31c63c07b68a7f/xnli.py:95: in _split_generators dl_dir = dl_manager.download_and_extract(_DATA_URL) ../../python_bin/nlp/utils/download_manager.py:246: in download_and_extract return self.extract(self.download(url_or_urls)) ../../python_bin/nlp/utils/download_manager.py:186: in download self._record_sizes_checksums(url_or_urls, downloaded_path_or_paths) ../../python_bin/nlp/utils/download_manager.py:166: in _record_sizes_checksums self._recorded_sizes_checksums[url] = get_size_checksum(path) _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ path = ('', '/tmp/tmpkajlg9yc/downloads/c0f7773c480a3f2d85639d777e0e17e65527460310d80760fd3fc2b2f2960556.c952a63cb17d3d46e412ceb7dbcd656ce2b15cc9ef17f50c28f81c48a7c853b5') def get_size_checksum(path: str) -> Tuple[int, str]: """Compute the file size and the sha256 checksum of a file""" m = sha256() > with open(path, "rb") as f: E TypeError: expected str, bytes or os.PathLike object, not tuple ../../python_bin/nlp/utils/checksums_utils.py:81: TypeError ``` - the checksums probably need to be updated no? And we should also think about how to write a test for the checksums.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 1, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/35/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/35/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/35.diff", "html_url": "https://github.com/huggingface/datasets/pull/35", "merged_at": "2020-05-03T13:24:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/35.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/35" }
true
https://api.github.com/repos/huggingface/datasets/issues/34
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/34/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/34/comments
https://api.github.com/repos/huggingface/datasets/issues/34/events
https://github.com/huggingface/datasets/pull/34
611,385,516
MDExOlB1bGxSZXF1ZXN0NDEyNTg0OTM0
34
[Tests] add slow tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-03T11:01:22Z"
"2020-05-03T12:18:30Z"
"2020-05-03T12:18:29Z"
CONTRIBUTOR
null
This PR adds a slow test that downloads the "real" dataset. The test is decorated as "slow" so that it will not automatically run on circle ci. Before uploading a dataset, one should test that this test passes, manually by running ``` RUN_SLOW=1 pytest tests/test_dataset_common.py::DatasetTest::test_load_real_dataset_<your-dataset-script-name> ``` This PR should be merged after PR: #33
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/34/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/34/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/34.diff", "html_url": "https://github.com/huggingface/datasets/pull/34", "merged_at": "2020-05-03T12:18:29Z", "patch_url": "https://github.com/huggingface/datasets/pull/34.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/34" }
true
https://api.github.com/repos/huggingface/datasets/issues/33
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/33/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/33/comments
https://api.github.com/repos/huggingface/datasets/issues/33/events
https://github.com/huggingface/datasets/pull/33
611,052,081
MDExOlB1bGxSZXF1ZXN0NDEyMzU1ODE0
33
Big cleanup/refactoring for clean serialization
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-05-01T23:45:57Z"
"2020-05-03T12:17:34Z"
"2020-05-03T12:17:33Z"
MEMBER
null
This PR cleans many base classes to re-build them as `dataclasses`. We can thus use a simple serialization workflow for `DatasetInfo`, including it's `Features` and `SplitDict` based on `dataclasses` `asdict()`. The resulting code is a lot shorter, can be easily serialized/deserialized, dataset info are human-readable and we can get rid of the `dataclass_json` dependency. The scripts have breaking changes and the conversion tool is updated. Example of dataset info in SQuAD script now: ```python def _info(self): return nlp.DatasetInfo( description=_DESCRIPTION, features=nlp.Features({ "id": nlp.Value('string'), "title": nlp.Value('string'), "context": nlp.Value('string'), "question": nlp.Value('string'), "answers": nlp.Sequence({ "text": nlp.Value('string'), "answer_start": nlp.Value('int32'), }), }), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, homepage="https://rajpurkar.github.io/SQuAD-explorer/", citation=_CITATION, ) ``` Example of serialized dataset info: ```bash { "description": "Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.\n", "citation": "@article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\narchivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": { "id": { "dtype": "string", "_type": "Value" }, "title": { "dtype": "string", "_type": "Value" }, "context": { "dtype": "string", "_type": "Value" }, "question": { "dtype": "string", "_type": "Value" }, "answers": { "feature": { "text": { "dtype": "string", "_type": "Value" }, "answer_start": { "dtype": "int32", "_type": "Value" } }, "length": -1, "_type": "Sequence" } }, "supervised_keys": null, "name": "squad", "version": { "version_str": "1.0.0", "description": "New split API (https://tensorflow.org/datasets/splits)", "nlp_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0 }, "splits": { "train": { "name": "train", "num_bytes": 79426386, "num_examples": 87599, "dataset_name": "squad" }, "validation": { "name": "validation", "num_bytes": 10491883, "num_examples": 10570, "dataset_name": "squad" } }, "size_in_bytes": 0, "download_size": 35142551, "download_checksums": [] } ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/33/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/33/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/33.diff", "html_url": "https://github.com/huggingface/datasets/pull/33", "merged_at": "2020-05-03T12:17:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/33.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/33" }
true
https://api.github.com/repos/huggingface/datasets/issues/32
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/32/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/32/comments
https://api.github.com/repos/huggingface/datasets/issues/32/events
https://github.com/huggingface/datasets/pull/32
610,715,580
MDExOlB1bGxSZXF1ZXN0NDEyMTAzMzIx
32
Fix map caching notebooks
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-05-01T11:55:26Z"
"2020-05-03T12:15:58Z"
"2020-05-03T12:15:57Z"
MEMBER
null
Previously, caching results with `.map()` didn't work in notebooks. To reuse a result, `.map()` serializes the functions with `dill.dumps` and then it hashes it. The problem is that when using `dill.dumps` to serialize a function, it also saves its origin (filename + line no.) and the origin of all the `globals` this function needs. However for notebooks and shells, the filename looks like \<ipython-input-13-9ed2afe61d25\> and the line no. changes often. To fix the problem, I added a new dispatch function for code objects that ignore the origin of the code if it comes from a notebook or a python shell. I tested these cases in a notebook: - lambda functions - named functions - methods - classmethods - staticmethods - classes that implement `__call__` The caching now works as expected for all of them :) I also tested the caching in the demo notebook and it works fine !
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/32/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/32/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/32.diff", "html_url": "https://github.com/huggingface/datasets/pull/32", "merged_at": "2020-05-03T12:15:57Z", "patch_url": "https://github.com/huggingface/datasets/pull/32.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/32" }
true
https://api.github.com/repos/huggingface/datasets/issues/31
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/31/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/31/comments
https://api.github.com/repos/huggingface/datasets/issues/31/events
https://github.com/huggingface/datasets/pull/31
610,677,641
MDExOlB1bGxSZXF1ZXN0NDEyMDczNDE4
31
[Circle ci] Install a virtual env before running tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-01T10:11:17Z"
"2020-05-01T22:06:16Z"
"2020-05-01T22:06:15Z"
CONTRIBUTOR
null
Install a virtual env before running tests to not running into sudo issues when dynamically downloading files. Same number of tests now pass / fail as on my local computer: ![Screenshot from 2020-05-01 12-14-44](https://user-images.githubusercontent.com/23423619/80798814-8a0a0a80-8ba5-11ea-8db8-599d33bbfccd.png)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/31/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/31/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/31.diff", "html_url": "https://github.com/huggingface/datasets/pull/31", "merged_at": "2020-05-01T22:06:15Z", "patch_url": "https://github.com/huggingface/datasets/pull/31.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/31" }
true
https://api.github.com/repos/huggingface/datasets/issues/30
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/30/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/30/comments
https://api.github.com/repos/huggingface/datasets/issues/30/events
https://github.com/huggingface/datasets/pull/30
610,549,072
MDExOlB1bGxSZXF1ZXN0NDExOTY4Mzk3
30
add metrics which require download files from github
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-05-01T04:13:22Z"
"2022-10-04T09:31:58Z"
"2020-05-11T08:19:54Z"
CONTRIBUTOR
null
To download files from github, I copied the `load_dataset_module` and its dependencies (without the builder) in `load.py` to `metrics/metric_utils.py`. I made the following changes: - copy the needed files in a folder`metric_name` - delete all other files that are not needed For metrics that require an external import, I first create a `<metric_name>_imports.py` file which contains all external urls. Then I create a `<metric_name>.py` in which I will load the external files using `<metric_name>_imports.py`
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/30/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/30/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/30.diff", "html_url": "https://github.com/huggingface/datasets/pull/30", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/30.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/30" }
true
https://api.github.com/repos/huggingface/datasets/issues/29
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/29/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/29/comments
https://api.github.com/repos/huggingface/datasets/issues/29/events
https://github.com/huggingface/datasets/pull/29
610,243,997
MDExOlB1bGxSZXF1ZXN0NDExNzIwODMx
29
Hf_api small changes
{ "avatar_url": "https://avatars.githubusercontent.com/u/326577?v=4", "events_url": "https://api.github.com/users/julien-c/events{/privacy}", "followers_url": "https://api.github.com/users/julien-c/followers", "following_url": "https://api.github.com/users/julien-c/following{/other_user}", "gists_url": "https://api.github.com/users/julien-c/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/julien-c", "id": 326577, "login": "julien-c", "node_id": "MDQ6VXNlcjMyNjU3Nw==", "organizations_url": "https://api.github.com/users/julien-c/orgs", "received_events_url": "https://api.github.com/users/julien-c/received_events", "repos_url": "https://api.github.com/users/julien-c/repos", "site_admin": false, "starred_url": "https://api.github.com/users/julien-c/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/julien-c/subscriptions", "type": "User", "url": "https://api.github.com/users/julien-c" }
[]
closed
false
null
[]
null
[]
"2020-04-30T17:06:43Z"
"2020-04-30T19:51:45Z"
"2020-04-30T19:51:44Z"
MEMBER
null
From Patrick: ```python from nlp import hf_api api = hf_api.HfApi() api.dataset_list() ``` works :-)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/29/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/29/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/29.diff", "html_url": "https://github.com/huggingface/datasets/pull/29", "merged_at": "2020-04-30T19:51:44Z", "patch_url": "https://github.com/huggingface/datasets/pull/29.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/29" }
true
https://api.github.com/repos/huggingface/datasets/issues/28
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/28/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/28/comments
https://api.github.com/repos/huggingface/datasets/issues/28/events
https://github.com/huggingface/datasets/pull/28
610,241,907
MDExOlB1bGxSZXF1ZXN0NDExNzE5MTQy
28
[Circle ci] Adds circle ci config
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-30T17:03:35Z"
"2020-04-30T19:51:09Z"
"2020-04-30T19:51:08Z"
CONTRIBUTOR
null
@thomwolf can you take a look and set up circle ci on: https://app.circleci.com/projects/project-dashboard/github/huggingface I think for `nlp` only admins can set it up, which I guess is you :-)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/28/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/28/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/28.diff", "html_url": "https://github.com/huggingface/datasets/pull/28", "merged_at": "2020-04-30T19:51:08Z", "patch_url": "https://github.com/huggingface/datasets/pull/28.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/28" }
true
https://api.github.com/repos/huggingface/datasets/issues/27
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/27/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/27/comments
https://api.github.com/repos/huggingface/datasets/issues/27/events
https://github.com/huggingface/datasets/pull/27
610,230,476
MDExOlB1bGxSZXF1ZXN0NDExNzA5OTc0
27
[Cleanup] Removes all files in testing except test_dataset_common
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-30T16:45:21Z"
"2020-04-30T17:39:25Z"
"2020-04-30T17:39:23Z"
CONTRIBUTOR
null
As far as I know, all files in `tests` were old `tfds test files` so I removed them. We can still look them up on the other library.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/27/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/27/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/27.diff", "html_url": "https://github.com/huggingface/datasets/pull/27", "merged_at": "2020-04-30T17:39:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/27.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/27" }
true
https://api.github.com/repos/huggingface/datasets/issues/26
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/26/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/26/comments
https://api.github.com/repos/huggingface/datasets/issues/26/events
https://github.com/huggingface/datasets/pull/26
610,226,047
MDExOlB1bGxSZXF1ZXN0NDExNzA2NjA2
26
[Tests] Clean tests
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-30T16:38:29Z"
"2020-04-30T20:12:04Z"
"2020-04-30T20:12:03Z"
CONTRIBUTOR
null
the abseil testing library (https://abseil.io/docs/python/quickstart.html) is better than the one I had before, so I decided to switch to that and changed the `setup.py` config file. Abseil has more support and a cleaner API for parametrized testing I think. I added a list of all dataset scripts that are currently on AWS, but will replace that once the API is integrated into this lib. One can now easily test for just a single function for a single dataset with: `tests/test_dataset_common.py::DatasetTest::test_load_dataset_wikipedia` NOTE: This PR is rebased on PR #29 so should be merged after.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/26/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/26/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/26.diff", "html_url": "https://github.com/huggingface/datasets/pull/26", "merged_at": "2020-04-30T20:12:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/26.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/26" }
true
https://api.github.com/repos/huggingface/datasets/issues/25
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/25/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/25/comments
https://api.github.com/repos/huggingface/datasets/issues/25/events
https://github.com/huggingface/datasets/pull/25
609,708,863
MDExOlB1bGxSZXF1ZXN0NDExMjQ4Nzg2
25
Add script csv datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-30T08:28:08Z"
"2022-10-04T09:32:13Z"
"2020-05-07T21:14:49Z"
CONTRIBUTOR
null
This is a PR allowing to create datasets from local CSV files. A usage might be: ```python import nlp ds = nlp.load( path="csv", name="bbc", dataset_files={ nlp.Split.TRAIN: ["datasets/dummy_data/csv/train.csv"], nlp.Split.TEST: [""datasets/dummy_data/csv/test.csv""] }, csv_kwargs={ "skip_rows": 0, "delimiter": ",", "quote_char": "\"", "header_as_column_names": True } ) ``` ``` Downloading and preparing dataset bbc/1.0.0 (download: Unknown size, generated: Unknown size, total: Unknown size) to /home/jplu/.cache/huggingface/datasets/bbc/1.0.0... Dataset bbc downloaded and prepared to /home/jplu/.cache/huggingface/datasets/bbc/1.0.0. Subsequent calls will reuse this data. {'test': Dataset(schema: {'category': 'string', 'text': 'string'}, num_rows: 49), 'train': Dataset(schema: {'category': 'string', 'text': 'string'}, num_rows: 99), 'validation': Dataset(schema: {'category': 'string', 'text': 'string'}, num_rows: 0)} ``` How it is read: - `path`: the `csv` word means "I want to create a CSV dataset" - `name`: the name of this dataset is `bbc` - `dataset_files`: this is a dictionary where each key is the list of files corresponding to the key split. - `csv_kwargs`: this is the keywords arguments to "explain" how to read the CSV files * `skip_rows`: number of rows have to be skipped, starting from the beginning of the file * `delimiter`: which delimiter is used to separate the columns * `quote_char`: which quote char is used to represent a column where the delimiter appears in one of them * `header_as_column_names`: will use the first row (header) of the file as name for the features. Otherwise the names will be automatically generated as `f1`, `f2`, etc... Will be applied after the `skip_rows` parameter. **TODO**: for now the `csv.py` is copied each time we create a new dataset as `ds_name.py`, this behavior will be modified to have only the `csv.py` script copied only once and not for all the CSV datasets.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/25/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/25/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/25.diff", "html_url": "https://github.com/huggingface/datasets/pull/25", "merged_at": "2020-05-07T21:14:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/25.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/25" }
true
https://api.github.com/repos/huggingface/datasets/issues/24
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/24/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/24/comments
https://api.github.com/repos/huggingface/datasets/issues/24/events
https://github.com/huggingface/datasets/pull/24
609,064,987
MDExOlB1bGxSZXF1ZXN0NDEwNzE5MTU0
24
Add checksums
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-04-29T13:37:29Z"
"2020-04-30T19:52:50Z"
"2020-04-30T19:52:49Z"
MEMBER
null
### Checksums files They are stored next to the dataset script in urls_checksums/checksums.txt. They are used to check the integrity of the datasets downloaded files. I kept the same format as tensorflow-datasets. There is one checksums file for all configs. ### Load a dataset When you do `load("squad")`, it will also download the checksums file and put it next to the script in nlp/datasets/hash/urls_checksums/checksums.txt. It also verifies that the downloaded files checksums match the expected ones. You can ignore checksum tests with `load("squad", ignore_checksums=True)` (under the hood it just adds `ignore_checksums=True` in the `DownloadConfig`) ### Test a dataset There is a new command `nlp-cli test squad` that runs `download_and_prepare` to see if it runs ok, and that verifies that all the checksums match. Allowed arguments are `--name`, `--all_configs`, `--ignore_checksums` and `--register_checksums`. ### Register checksums 1. If the dataset has external dataset files The command `nlp-cli test squad --register_checksums --all_configs` runs `download_and_prepare` on all configs to see if it runs ok, and it creates the checksums file. You can also register one config at a time using `--name` instead ; the checksums file will be completed and not overwritten. If the script is a local script, the checksum file is moved to urls_checksums/checksums.txt next to the local script, to enable the user to upload both the script and the checksums file afterwards with `nlp-cli upload squad`. 2. If the dataset files are all inside the directory of the dataset script The user can directly do `nlp-cli upload squad --register_checksums`, as there is no need to download anything. In this case however, all the dataset must be uploaded at once. -- PS : it doesn't allow to register checksums for canonical datasets, the file has to be added manually on S3 for now (I guess ?) Also I feel like we must be sure that this processes would not constrain too much any user from uploading its dataset. Let me know what you think :)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/24/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/24/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/24.diff", "html_url": "https://github.com/huggingface/datasets/pull/24", "merged_at": "2020-04-30T19:52:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/24.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/24" }
true
https://api.github.com/repos/huggingface/datasets/issues/23
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/23/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/23/comments
https://api.github.com/repos/huggingface/datasets/issues/23/events
https://github.com/huggingface/datasets/pull/23
608,508,706
MDExOlB1bGxSZXF1ZXN0NDEwMjczOTU2
23
Add metrics
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-04-28T18:02:05Z"
"2022-10-04T09:31:56Z"
"2020-05-11T08:19:38Z"
CONTRIBUTOR
null
This PR is a draft for adding metrics (sacrebleu and seqeval are added) use case examples: `import nlp` **sacrebleu:** ``` refs = [['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'], ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.']] sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] sacrebleu = nlp.load_metrics('sacrebleu') print(sacrebleu.score) ``` **seqeval:** ``` y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']] seqeval = nlp.load_metrics('seqeval') print(seqeval.accuracy_score(y_true, y_pred) print(seqeval.f1_score(y_true, y_pred) ``` _examples are taken from the corresponding web page_ your comments and suggestions are more than welcomed
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/23/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/23/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/23.diff", "html_url": "https://github.com/huggingface/datasets/pull/23", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/23.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/23" }
true
https://api.github.com/repos/huggingface/datasets/issues/22
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/22/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/22/comments
https://api.github.com/repos/huggingface/datasets/issues/22/events
https://github.com/huggingface/datasets/pull/22
608,298,586
MDExOlB1bGxSZXF1ZXN0NDEwMTAyMjU3
22
adding bleu score code
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-04-28T13:00:50Z"
"2020-04-28T17:48:20Z"
"2020-04-28T17:48:08Z"
CONTRIBUTOR
null
this PR add the BLEU score metric to the lib. It can be tested by running the following code. ` from nlp.metrics import bleu hyp1 = "It is a guide to action which ensures that the military always obeys the commands of the party" ref1a = "It is a guide to action that ensures that the military forces always being under the commands of the party " ref1b = "It is the guiding principle which guarantees the military force always being under the command of the Party" ref1c = "It is the practical guide for the army always to heed the directions of the party" list_of_references = [[ref1a, ref1b, ref1c]] hypotheses = [hyp1] bleu = bleu.bleu_score(list_of_references, hypotheses,4, smooth=True) print(bleu) `
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/22/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/22/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/22.diff", "html_url": "https://github.com/huggingface/datasets/pull/22", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/22.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/22" }
true
https://api.github.com/repos/huggingface/datasets/issues/21
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/21/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/21/comments
https://api.github.com/repos/huggingface/datasets/issues/21/events
https://github.com/huggingface/datasets/pull/21
607,914,185
MDExOlB1bGxSZXF1ZXN0NDA5Nzk2MTM4
21
Cleanup Features - Updating convert command - Fix Download manager
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-04-27T23:16:55Z"
"2020-05-01T09:29:47Z"
"2020-05-01T09:29:46Z"
MEMBER
null
This PR makes a number of changes: # Updating `Features` Features are a complex mechanism provided in `tfds` to be able to modify a dataset on-the-fly when serializing to disk and when loading from disk. We don't really need this because (1) it hides too much from the user and (2) our datatype can be directly mapped to Arrow tables on drive so we usually don't need to change the format before/after serialization. This PR extracts and refactors these features in a single `features.py` files. It still keep a number of features classes for easy compatibility with tfds, namely the `Sequence`, `Tensor`, `ClassLabel` and `Translation` features. Some more complex features involving a pre-processing on-the-fly during serialization are kept: - `ClassLabel` which are able to convert from label strings to integers, - `Translation`which does some check on the languages. # Updating the `convert` command We do a few updates here - following the simplification of the `features` (cf above), conversion are updated - we also makes it simpler to convert a single file - some code need to be fixed manually after conversion (e.g. to remove some encoding processing in former tfds `Text` features. We highlight this code with a "git merge conflict" style syntax for easy manual fixing. # Fix download manager iterator You kept me up quite late on Tuesday night with this `os.scandir` change @lhoestq ;-)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/21/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/21/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/21.diff", "html_url": "https://github.com/huggingface/datasets/pull/21", "merged_at": "2020-05-01T09:29:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/21.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/21" }
true
https://api.github.com/repos/huggingface/datasets/issues/20
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/20/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/20/comments
https://api.github.com/repos/huggingface/datasets/issues/20/events
https://github.com/huggingface/datasets/pull/20
607,313,557
MDExOlB1bGxSZXF1ZXN0NDA5MzEyMDI1
20
remove boto3 and promise dependencies
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-04-27T07:39:45Z"
"2020-04-27T16:04:17Z"
"2020-04-27T14:15:45Z"
MEMBER
null
With the new download manager, we don't need `promise` anymore. I also removed `boto3` as in [this pr](https://github.com/huggingface/transformers/pull/3968)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/20/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/20/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/20.diff", "html_url": "https://github.com/huggingface/datasets/pull/20", "merged_at": "2020-04-27T14:15:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/20.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/20" }
true
https://api.github.com/repos/huggingface/datasets/issues/19
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/19/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/19/comments
https://api.github.com/repos/huggingface/datasets/issues/19/events
https://github.com/huggingface/datasets/pull/19
606,400,645
MDExOlB1bGxSZXF1ZXN0NDA4NjIwMjUw
19
Replace tf.constant for TF
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-24T15:32:06Z"
"2020-04-29T09:27:08Z"
"2020-04-25T21:18:45Z"
CONTRIBUTOR
null
Replace simple tf.constant type of Tensor to tf.ragged.constant which allows to have examples of different size in a tf.data.Dataset. Now the training works with TF. Here the same example than for the PT in collab: ```python import tensorflow as tf import nlp from transformers import BertTokenizerFast, TFBertForQuestionAnswering # Load our training dataset and tokenizer train_dataset = nlp.load('squad', split="train[:1%]") tokenizer = BertTokenizerFast.from_pretrained('bert-base-cased') def get_correct_alignement(context, answer): start_idx = answer['answer_start'][0] text = answer['text'][0] end_idx = start_idx + len(text) if context[start_idx:end_idx] == text: return start_idx, end_idx # When the gold label position is good elif context[start_idx-1:end_idx-1] == text: return start_idx-1, end_idx-1 # When the gold label is off by one character elif context[start_idx-2:end_idx-2] == text: return start_idx-2, end_idx-2 # When the gold label is off by two character else: raise ValueError() # Tokenize our training dataset def convert_to_features(example_batch): # Tokenize contexts and questions (as pairs of inputs) input_pairs = list(zip(example_batch['context'], example_batch['question'])) encodings = tokenizer.batch_encode_plus(input_pairs, pad_to_max_length=True) # Compute start and end tokens for labels using Transformers's fast tokenizers alignement methods. start_positions, end_positions = [], [] for i, (context, answer) in enumerate(zip(example_batch['context'], example_batch['answers'])): start_idx, end_idx = get_correct_alignement(context, answer) start_positions.append([encodings.char_to_token(i, start_idx)]) end_positions.append([encodings.char_to_token(i, end_idx-1)]) if start_positions and end_positions: encodings.update({'start_positions': start_positions, 'end_positions': end_positions}) return encodings train_dataset = train_dataset.map(convert_to_features, batched=True) columns = ['input_ids', 'token_type_ids', 'attention_mask', 'start_positions', 'end_positions'] train_dataset.set_format(type='tensorflow', columns=columns) features = {x: train_dataset[x] for x in columns[:3]} labels = {"output_1": train_dataset["start_positions"]} labels["output_2"] = train_dataset["end_positions"] tfdataset = tf.data.Dataset.from_tensor_slices((features, labels)).batch(8) model = TFBertForQuestionAnswering.from_pretrained("bert-base-cased") loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.NONE, from_logits=True) opt = tf.keras.optimizers.Adam(learning_rate=3e-5) model.compile(optimizer=opt, loss={'output_1': loss_fn, 'output_2': loss_fn}, loss_weights={'output_1': 1., 'output_2': 1.}, metrics=['accuracy']) model.fit(tfdataset, epochs=1, steps_per_epoch=3) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/19/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/19/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/19.diff", "html_url": "https://github.com/huggingface/datasets/pull/19", "merged_at": "2020-04-25T21:18:45Z", "patch_url": "https://github.com/huggingface/datasets/pull/19.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/19" }
true
https://api.github.com/repos/huggingface/datasets/issues/18
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/18/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/18/comments
https://api.github.com/repos/huggingface/datasets/issues/18/events
https://github.com/huggingface/datasets/pull/18
606,109,196
MDExOlB1bGxSZXF1ZXN0NDA4Mzg0MTc3
18
Updating caching mechanism - Allow dependency in dataset processing scripts - Fix style and quality in the repo
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-04-24T07:39:48Z"
"2020-04-29T15:27:28Z"
"2020-04-28T16:06:28Z"
MEMBER
null
This PR has a lot of content (might be hard to review, sorry, in particular because I fixed the style in the repo at the same time). # Style & quality: You can now install the style and quality tools with `pip install -e .[quality]`. This will install black, the compatible version of sort and flake8. You can then clean the style and check the quality before merging your PR with: ```bash make style make quality ``` # Allow dependencies in dataset processing scripts We can now allow (some level) of imports in dataset processing scripts (in addition to PyPi imports). Namely, you can do the two following things: Import from a relative path to a file in the same folder as the dataset processing script: ```python import .c4_utils ``` Or import from a relative path to a file in a folder/archive/github repo to which you provide an URL after the import state with `# From: [URL]`: ```python import .clicr.dataset_code.build_json_dataset # From: https://github.com/clips/clicr ``` In both these cases, after downloading the main dataset processing script, we will identify the location of these dependencies, download them and copy them in the dataset processing script folder. Note that only direct import in the dataset processing script will be handled. We don't recursively explore the additional import to download further files. Also, when we download from an additional directory (in the second case above), we recursively add `__init__.py` to all the sub-folder so you can import from them. This part is still tested for now. If you've seen datasets which required external utilities, tell me and I can test it. # Update the cache to have a better local structure The local structure in the `src/datasets` folder is now: `src/datasets/DATASET_NAME/DATASET_HASH/*` The hash is computed from the full code of the dataset processing script as well as all the local and downloaded dependencies as mentioned above. This way if you change some code in a utility related to your dataset, a new hash should be computed.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/18/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/18/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/18.diff", "html_url": "https://github.com/huggingface/datasets/pull/18", "merged_at": "2020-04-28T16:06:28Z", "patch_url": "https://github.com/huggingface/datasets/pull/18.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/18" }
true
https://api.github.com/repos/huggingface/datasets/issues/17
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/17/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/17/comments
https://api.github.com/repos/huggingface/datasets/issues/17/events
https://github.com/huggingface/datasets/pull/17
605,753,027
MDExOlB1bGxSZXF1ZXN0NDA4MDk3NjM0
17
Add Pandas as format type
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-23T18:20:14Z"
"2020-04-27T18:07:50Z"
"2020-04-27T18:07:48Z"
CONTRIBUTOR
null
As detailed in the title ^^
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/17/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/17/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/17.diff", "html_url": "https://github.com/huggingface/datasets/pull/17", "merged_at": "2020-04-27T18:07:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/17.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/17" }
true
https://api.github.com/repos/huggingface/datasets/issues/16
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/16/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/16/comments
https://api.github.com/repos/huggingface/datasets/issues/16/events
https://github.com/huggingface/datasets/pull/16
605,661,462
MDExOlB1bGxSZXF1ZXN0NDA4MDIyMTUz
16
create our own DownloadManager
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-04-23T16:08:07Z"
"2021-05-05T18:25:24Z"
"2020-04-25T21:25:10Z"
MEMBER
null
I tried to create our own - and way simpler - download manager, by replacing all the complicated stuff with our own `cached_path` solution. With this implementation, I tried `dataset = nlp.load('squad')` and it seems to work fine. For the implementation, what I did exactly: - I copied the old download manager - I removed all the dependences to the old `download` files - I replaced all the download + extract calls by calls to `cached_path` - I removed unused parameters (extract_dir, compute_stats) (maybe compute_stats could be re-added later if we want to compute stats...) - I left some functions unimplemented for now. We will probably have to implement them because they are used by some datasets scripts (download_kaggle_data, iter_archive) or because we may need them at some point (download_checksums, _record_sizes_checksums) Let me know if you think that this is going the right direction or if you have remarks. Note: I didn't write any test yet as I wanted to read your remarks first
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/16/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/16/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/16.diff", "html_url": "https://github.com/huggingface/datasets/pull/16", "merged_at": "2020-04-25T21:25:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/16.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/16" }
true
https://api.github.com/repos/huggingface/datasets/issues/15
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/15/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/15/comments
https://api.github.com/repos/huggingface/datasets/issues/15/events
https://github.com/huggingface/datasets/pull/15
604,906,708
MDExOlB1bGxSZXF1ZXN0NDA3NDEwOTk3
15
[Tests] General Test Design for all dataset scripts
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-22T16:46:01Z"
"2022-10-04T09:31:54Z"
"2020-04-27T14:48:02Z"
CONTRIBUTOR
null
The general idea is similar to how testing is done in `transformers`. There is one general `test_dataset_common.py` file which has a `DatasetTesterMixin` class. This class implements all of the logic that can be used in a generic way for all dataset classes. The idea is to keep each individual dataset test file as minimal as possible. In order to test whether the specific data set class can download the data and generate the examples **without** downloading the actual data all the time, a MockDataLoaderManager class is used which receives a `mock_folder_structure_fn` function from each individual dataset test file that create "fake" data and which returns the same folder structure that would have been created when using the real data downloader.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/15/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/15/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/15.diff", "html_url": "https://github.com/huggingface/datasets/pull/15", "merged_at": "2020-04-27T14:48:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/15.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/15" }
true
https://api.github.com/repos/huggingface/datasets/issues/14
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/14/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/14/comments
https://api.github.com/repos/huggingface/datasets/issues/14/events
https://github.com/huggingface/datasets/pull/14
604,761,315
MDExOlB1bGxSZXF1ZXN0NDA3MjkzNjU5
14
[Download] Only create dir if not already exist
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-22T13:32:51Z"
"2022-10-04T09:31:50Z"
"2020-04-23T08:27:33Z"
CONTRIBUTOR
null
This was quite annoying to find out :D. Some datasets have save in the same directory. So we should only create a new directory if it doesn't already exist.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/14/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/14/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/14.diff", "html_url": "https://github.com/huggingface/datasets/pull/14", "merged_at": "2020-04-23T08:27:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/14.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/14" }
true
https://api.github.com/repos/huggingface/datasets/issues/13
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/13/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/13/comments
https://api.github.com/repos/huggingface/datasets/issues/13/events
https://github.com/huggingface/datasets/pull/13
604,547,951
MDExOlB1bGxSZXF1ZXN0NDA3MTIxMjkw
13
[Make style]
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-22T08:10:06Z"
"2022-10-04T09:31:51Z"
"2020-04-23T13:02:22Z"
CONTRIBUTOR
null
Added Makefile and applied make style to all. make style runs the following code: ``` style: black --line-length 119 --target-version py35 src isort --recursive src ``` It's the same code that is run in `transformers`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/13/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/13/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/13.diff", "html_url": "https://github.com/huggingface/datasets/pull/13", "merged_at": "2020-04-23T13:02:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/13.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/13" }
true
https://api.github.com/repos/huggingface/datasets/issues/12
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/12/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/12/comments
https://api.github.com/repos/huggingface/datasets/issues/12/events
https://github.com/huggingface/datasets/pull/12
604,518,583
MDExOlB1bGxSZXF1ZXN0NDA3MDk3MzA4
12
[Map Function] add assert statement if map function does not return dict or None
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-22T07:21:24Z"
"2022-10-04T09:31:53Z"
"2020-04-24T06:29:03Z"
CONTRIBUTOR
null
IMO, if a function is provided that is not a print statement (-> returns variable of type `None`) or a function that updates the datasets (-> returns variable of type `dict`), then a `TypeError` should be raised. Not sure whether you had cases in mind where the user should do something else @thomwolf , but I think a lot of silent errors can be avoided with this assert statement.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/12/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/12/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/12.diff", "html_url": "https://github.com/huggingface/datasets/pull/12", "merged_at": "2020-04-24T06:29:03Z", "patch_url": "https://github.com/huggingface/datasets/pull/12.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/12" }
true
https://api.github.com/repos/huggingface/datasets/issues/11
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/11/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/11/comments
https://api.github.com/repos/huggingface/datasets/issues/11/events
https://github.com/huggingface/datasets/pull/11
603,921,624
MDExOlB1bGxSZXF1ZXN0NDA2NjExODk2
11
[Convert TFDS to HFDS] Extend script to also allow just converting a single file
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-21T11:25:33Z"
"2022-10-04T09:31:46Z"
"2020-04-21T20:47:00Z"
CONTRIBUTOR
null
Adds another argument to be able to convert only a single file
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/11/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/11/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/11.diff", "html_url": "https://github.com/huggingface/datasets/pull/11", "merged_at": "2020-04-21T20:47:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/11.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/11" }
true
https://api.github.com/repos/huggingface/datasets/issues/10
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/10/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/10/comments
https://api.github.com/repos/huggingface/datasets/issues/10/events
https://github.com/huggingface/datasets/pull/10
603,909,327
MDExOlB1bGxSZXF1ZXN0NDA2NjAxNzQ2
10
Name json file "squad.json" instead of "squad.py.json"
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-21T11:04:28Z"
"2022-10-04T09:31:44Z"
"2020-04-21T20:48:06Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/10/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/10/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/10.diff", "html_url": "https://github.com/huggingface/datasets/pull/10", "merged_at": "2020-04-21T20:48:06Z", "patch_url": "https://github.com/huggingface/datasets/pull/10.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/10" }
true
https://api.github.com/repos/huggingface/datasets/issues/9
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/9/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/9/comments
https://api.github.com/repos/huggingface/datasets/issues/9/events
https://github.com/huggingface/datasets/pull/9
603,894,874
MDExOlB1bGxSZXF1ZXN0NDA2NTkwMDQw
9
[Clean up] Datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-04-21T10:39:56Z"
"2022-10-04T09:31:42Z"
"2020-04-21T20:49:58Z"
CONTRIBUTOR
null
Clean up `nlp/datasets` folder. As I understood, eventually the `nlp/datasets` shall not exist anymore at all. The folder `nlp/datasets/nlp` is kept for the moment, but won't be needed in the future, since it will live on S3 (actually it already does) at: `https://s3.console.aws.amazon.com/s3/buckets/datasets.huggingface.co/nlp/?region=us-east-1` and the different `dataset downloader scripts will be added to `nlp/src/nlp` when downloaded by the user. The folder `nlp/datasets/checksums` is kept for now, but won't be needed anymore in the future. The remaining folders/ files are leftovers from tensorflow-datasets and are not needed. The can be looked up in the private tensorflow-dataset repo.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/9/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/9/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/9.diff", "html_url": "https://github.com/huggingface/datasets/pull/9", "merged_at": "2020-04-21T20:49:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/9.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/9" }
true
https://api.github.com/repos/huggingface/datasets/issues/8
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/8/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/8/comments
https://api.github.com/repos/huggingface/datasets/issues/8/events
https://github.com/huggingface/datasets/pull/8
601,783,243
MDExOlB1bGxSZXF1ZXN0NDA0OTg0NDUz
8
Fix issue 6: error when the citation is missing in the DatasetInfo
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-17T08:04:26Z"
"2020-04-29T09:27:11Z"
"2020-04-20T13:24:12Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/8/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/8/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/8.diff", "html_url": "https://github.com/huggingface/datasets/pull/8", "merged_at": "2020-04-20T13:24:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/8.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/8" }
true
https://api.github.com/repos/huggingface/datasets/issues/7
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/7/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/7/comments
https://api.github.com/repos/huggingface/datasets/issues/7/events
https://github.com/huggingface/datasets/pull/7
601,780,534
MDExOlB1bGxSZXF1ZXN0NDA0OTgyMzA2
7
Fix issue 5: allow empty datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-17T07:59:56Z"
"2020-04-29T09:27:13Z"
"2020-04-20T13:23:48Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/7/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/7/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/7.diff", "html_url": "https://github.com/huggingface/datasets/pull/7", "merged_at": "2020-04-20T13:23:47Z", "patch_url": "https://github.com/huggingface/datasets/pull/7.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/7" }
true
https://api.github.com/repos/huggingface/datasets/issues/6
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6/comments
https://api.github.com/repos/huggingface/datasets/issues/6/events
https://github.com/huggingface/datasets/issues/6
600,330,836
MDU6SXNzdWU2MDAzMzA4MzY=
6
Error when citation is not given in the DatasetInfo
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-15T14:14:54Z"
"2020-04-29T09:23:22Z"
"2020-04-29T09:23:22Z"
CONTRIBUTOR
null
The following error is raised when the `citation` parameter is missing when we instantiate a `DatasetInfo`: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jplu/dev/jplu/datasets/src/nlp/info.py", line 338, in __repr__ citation_pprint = _indent('"""{}"""'.format(self.citation.strip())) AttributeError: 'NoneType' object has no attribute 'strip' ``` I propose to do the following change in the `info.py` file. The method: ```python def __repr__(self): splits_pprint = _indent("\n".join(["{"] + [ " '{}': {},".format(k, split.num_examples) for k, split in sorted(self.splits.items()) ] + ["}"])) features_pprint = _indent(repr(self.features)) citation_pprint = _indent('"""{}"""'.format(self.citation.strip())) return INFO_STR.format( name=self.name, version=self.version, description=self.description, total_num_examples=self.splits.total_num_examples, features=features_pprint, splits=splits_pprint, citation=citation_pprint, homepage=self.homepage, supervised_keys=self.supervised_keys, # Proto add a \n that we strip. license=str(self.license).strip()) ``` Becomes: ```python def __repr__(self): splits_pprint = _indent("\n".join(["{"] + [ " '{}': {},".format(k, split.num_examples) for k, split in sorted(self.splits.items()) ] + ["}"])) features_pprint = _indent(repr(self.features)) ## the strip is done only is the citation is given citation_pprint = self.citation if self.citation: citation_pprint = _indent('"""{}"""'.format(self.citation.strip())) return INFO_STR.format( name=self.name, version=self.version, description=self.description, total_num_examples=self.splits.total_num_examples, features=features_pprint, splits=splits_pprint, citation=citation_pprint, homepage=self.homepage, supervised_keys=self.supervised_keys, # Proto add a \n that we strip. license=str(self.license).strip()) ``` And now it is ok. @thomwolf are you ok with this fix?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5/comments
https://api.github.com/repos/huggingface/datasets/issues/5/events
https://github.com/huggingface/datasets/issues/5
600,295,889
MDU6SXNzdWU2MDAyOTU4ODk=
5
ValueError when a split is empty
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-15T13:25:13Z"
"2020-04-29T09:23:05Z"
"2020-04-29T09:23:05Z"
CONTRIBUTOR
null
When a split is empty either TEST, VALIDATION or TRAIN I get the following error: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/jplu/dev/jplu/datasets/src/nlp/load.py", line 295, in load ds = dbuilder.as_dataset(**as_dataset_kwargs) File "/home/jplu/dev/jplu/datasets/src/nlp/builder.py", line 587, in as_dataset datasets = utils.map_nested(build_single_dataset, split, map_tuple=True) File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 158, in map_nested for k, v in data_struct.items() File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 158, in <dictcomp> for k, v in data_struct.items() File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 172, in map_nested return function(data_struct) File "/home/jplu/dev/jplu/datasets/src/nlp/builder.py", line 601, in _build_single_dataset split=split, File "/home/jplu/dev/jplu/datasets/src/nlp/builder.py", line 625, in _as_dataset split_infos=self.info.splits.values(), File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 200, in read return py_utils.map_nested(_read_instruction_to_ds, instructions) File "/home/jplu/dev/jplu/datasets/src/nlp/utils/py_utils.py", line 172, in map_nested return function(data_struct) File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 191, in _read_instruction_to_ds file_instructions = make_file_instructions(name, split_infos, instruction) File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 104, in make_file_instructions absolute_instructions=absolute_instructions, File "/home/jplu/dev/jplu/datasets/src/nlp/arrow_reader.py", line 122, in _make_file_instructions_from_absolutes 'Split empty. This might means that dataset hasn\'t been generated ' ValueError: Split empty. This might means that dataset hasn't been generated yet and info not restored from GCS, or that legacy dataset is used. ``` How to reproduce: ```python import csv import nlp class Bbc(nlp.GeneratorBasedBuilder): VERSION = nlp.Version("1.0.0") def __init__(self, **config): self.train = config.pop("train", None) self.validation = config.pop("validation", None) super(Bbc, self).__init__(**config) def _info(self): return nlp.DatasetInfo(builder=self, description="bla", features=nlp.features.FeaturesDict({"id": nlp.int32, "text": nlp.string, "label": nlp.string})) def _split_generators(self, dl_manager): return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": self.train}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": self.validation}), nlp.SplitGenerator(name=nlp.Split.TEST, gen_kwargs={"filepath": None})] def _generate_examples(self, filepath): if not filepath: return None, {} with open(filepath) as f: reader = csv.reader(f, delimiter=',', quotechar="\"") lines = list(reader)[1:] for idx, line in enumerate(lines): yield idx, {"id": idx, "text": line[1], "label": line[0]} ``` ```python import nlp dataset = nlp.load("bbc", builder_kwargs={"train": "bbc/data/train.csv", "validation": "bbc/data/test.csv"}) ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4/comments
https://api.github.com/repos/huggingface/datasets/issues/4/events
https://github.com/huggingface/datasets/issues/4
600,185,417
MDU6SXNzdWU2MDAxODU0MTc=
4
[Feature] Keep the list of labels of a dataset as metadata
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-15T10:17:10Z"
"2020-07-08T16:59:46Z"
"2020-05-04T06:11:57Z"
CONTRIBUTOR
null
It would be useful to keep the list of the labels of a dataset as metadata. Either directly in the `DatasetInfo` or in the Arrow metadata.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/3
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3/comments
https://api.github.com/repos/huggingface/datasets/issues/3/events
https://github.com/huggingface/datasets/issues/3
600,180,050
MDU6SXNzdWU2MDAxODAwNTA=
3
[Feature] More dataset outputs
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-15T10:08:14Z"
"2020-05-04T06:12:27Z"
"2020-05-04T06:12:27Z"
CONTRIBUTOR
null
Add the following dataset outputs: - Spark - Pandas
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2/comments
https://api.github.com/repos/huggingface/datasets/issues/2/events
https://github.com/huggingface/datasets/issues/2
599,767,671
MDU6SXNzdWU1OTk3Njc2NzE=
2
Issue to read a local dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/959590?v=4", "events_url": "https://api.github.com/users/jplu/events{/privacy}", "followers_url": "https://api.github.com/users/jplu/followers", "following_url": "https://api.github.com/users/jplu/following{/other_user}", "gists_url": "https://api.github.com/users/jplu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jplu", "id": 959590, "login": "jplu", "node_id": "MDQ6VXNlcjk1OTU5MA==", "organizations_url": "https://api.github.com/users/jplu/orgs", "received_events_url": "https://api.github.com/users/jplu/received_events", "repos_url": "https://api.github.com/users/jplu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jplu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jplu/subscriptions", "type": "User", "url": "https://api.github.com/users/jplu" }
[]
closed
false
null
[]
null
[]
"2020-04-14T18:18:51Z"
"2020-05-11T18:55:23Z"
"2020-05-11T18:55:22Z"
CONTRIBUTOR
null
Hello, As proposed by @thomwolf, I open an issue to explain what I'm trying to do without success. What I want to do is to create and load a local dataset, the script I have done is the following: ```python import os import csv import nlp class BbcConfig(nlp.BuilderConfig): def __init__(self, **kwargs): super(BbcConfig, self).__init__(**kwargs) class Bbc(nlp.GeneratorBasedBuilder): _DIR = "./data" _DEV_FILE = "test.csv" _TRAINING_FILE = "train.csv" BUILDER_CONFIGS = [BbcConfig(name="bbc", version=nlp.Version("1.0.0"))] def _info(self): return nlp.DatasetInfo(builder=self, features=nlp.features.FeaturesDict({"id": nlp.string, "text": nlp.string, "label": nlp.string})) def _split_generators(self, dl_manager): files = {"train": os.path.join(self._DIR, self._TRAINING_FILE), "dev": os.path.join(self._DIR, self._DEV_FILE)} return [nlp.SplitGenerator(name=nlp.Split.TRAIN, gen_kwargs={"filepath": files["train"]}), nlp.SplitGenerator(name=nlp.Split.VALIDATION, gen_kwargs={"filepath": files["dev"]})] def _generate_examples(self, filepath): with open(filepath) as f: reader = csv.reader(f, delimiter=',', quotechar="\"") lines = list(reader)[1:] for idx, line in enumerate(lines): yield idx, {"idx": idx, "text": line[1], "label": line[0]} ``` The dataset is attached to this issue as well: [data.zip](https://github.com/huggingface/datasets/files/4476928/data.zip) Now the steps to reproduce what I would like to do: 1. unzip data locally (I know the nlp lib can detect and extract archives but I want to reduce and facilitate the reproduction as much as possible) 2. create the `bbc.py` script as above at the same location than the unziped `data` folder. Now I try to load the dataset in three different ways and none works, the first one with the name of the dataset like I would do with TFDS: ```python import nlp from bbc import Bbc dataset = nlp.load("bbc") ``` I get: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 280, in load dbuilder: DatasetBuilder = builder(path, name, data_dir=data_dir, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 166, in builder builder_cls = load_dataset(path, name=name, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 88, in load_dataset local_files_only=local_files_only, File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/utils/file_utils.py", line 214, in cached_path if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): File "/opt/anaconda3/envs/transformers/lib/python3.7/zipfile.py", line 203, in is_zipfile with open(filename, "rb") as fp: TypeError: expected str, bytes or os.PathLike object, not NoneType ``` But @thomwolf told me that no need to import the script, just put the path of it, then I tried three different way to do: ```python import nlp dataset = nlp.load("bbc.py") ``` And ```python import nlp dataset = nlp.load("./bbc.py") ``` And ```python import nlp dataset = nlp.load("/absolute/path/to/bbc.py") ``` These three ways gives me: ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 280, in load dbuilder: DatasetBuilder = builder(path, name, data_dir=data_dir, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 166, in builder builder_cls = load_dataset(path, name=name, **builder_kwargs) File "/opt/anaconda3/envs/transformers/lib/python3.7/site-packages/nlp/load.py", line 124, in load_dataset dataset_module = importlib.import_module(module_path) File "/opt/anaconda3/envs/transformers/lib/python3.7/importlib/__init__.py", line 127, in import_module return _bootstrap._gcd_import(name[level:], package, level) File "<frozen importlib._bootstrap>", line 1006, in _gcd_import File "<frozen importlib._bootstrap>", line 983, in _find_and_load File "<frozen importlib._bootstrap>", line 965, in _find_and_load_unlocked ModuleNotFoundError: No module named 'nlp.datasets.2fd72627d92c328b3e9c4a3bf7ec932c48083caca09230cebe4c618da6e93688.bbc' ``` Any idea of what I'm missing? or I might have spot a bug :)
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2/timeline
null
completed
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/1
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1/comments
https://api.github.com/repos/huggingface/datasets/issues/1/events
https://github.com/huggingface/datasets/pull/1
599,457,467
MDExOlB1bGxSZXF1ZXN0NDAzMDk1NDYw
1
changing nlp.bool to nlp.bool_
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[]
"2020-04-14T10:18:02Z"
"2022-10-04T09:31:40Z"
"2020-04-14T12:01:40Z"
CONTRIBUTOR
null
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1/timeline
null
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1.diff", "html_url": "https://github.com/huggingface/datasets/pull/1", "merged_at": "2020-04-14T12:01:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/1.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1" }
true