url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
48
51
id
int64
600M
1.08B
node_id
stringlengths
18
24
number
int64
2
3.45k
title
stringlengths
1
276
user
dict
labels
list
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
list
milestone
dict
comments
sequence
created_at
int64
1,587B
1,640B
updated_at
int64
1,588B
1,640B
closed_at
int64
1,588B
1,640B
author_association
stringclasses
3 values
active_lock_reason
null
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
null
draft
null
pull_request
null
is_pull_request
bool
1 class
https://api.github.com/repos/huggingface/datasets/issues/2648
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2648/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2648/comments
https://api.github.com/repos/huggingface/datasets/issues/2648/events
https://github.com/huggingface/datasets/issues/2648
944,484,522
MDU6SXNzdWU5NDQ0ODQ1MjI=
2,648
Add web_split dataset for Paraphase and Rephrase benchmark
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[ { "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false } ]
null
[ "#take" ]
1,626,272,676,000
1,626,272,772,000
null
CONTRIBUTOR
null
## Describe: For getting simple sentences from complex sentence there are dataset and task like wiki_split that is available in hugging face datasets. This web_split is a very similar dataset. There some research paper which states that by combining these two datasets we if we train the model it will yield better results on both tests data. This dataset is made from web NLG data. All the dataset related details are provided in the below repository Github link: https://github.com/shashiongithub/Split-and-Rephrase
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2648/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2648/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2646
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2646/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2646/comments
https://api.github.com/repos/huggingface/datasets/issues/2646/events
https://github.com/huggingface/datasets/issues/2646
944,379,954
MDU6SXNzdWU5NDQzNzk5NTQ=
2,646
downloading of yahoo_answers_topics dataset failed
{ "login": "vikrant7k", "id": 66781249, "node_id": "MDQ6VXNlcjY2NzgxMjQ5", "avatar_url": "https://avatars.githubusercontent.com/u/66781249?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vikrant7k", "html_url": "https://github.com/vikrant7k", "followers_url": "https://api.github.com/users/vikrant7k/followers", "following_url": "https://api.github.com/users/vikrant7k/following{/other_user}", "gists_url": "https://api.github.com/users/vikrant7k/gists{/gist_id}", "starred_url": "https://api.github.com/users/vikrant7k/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vikrant7k/subscriptions", "organizations_url": "https://api.github.com/users/vikrant7k/orgs", "repos_url": "https://api.github.com/users/vikrant7k/repos", "events_url": "https://api.github.com/users/vikrant7k/events{/privacy}", "received_events_url": "https://api.github.com/users/vikrant7k/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi ! I just tested and it worked fine today for me.\r\n\r\nI think this is because the dataset is stored on Google Drive which has a quota limit for the number of downloads per day, see this similar issue https://github.com/huggingface/datasets/issues/996 \r\n\r\nFeel free to try again today, now that the quota was reset" ]
1,626,265,865,000
1,626,340,516,000
null
NONE
null
## Describe the bug I get an error datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files when I try to download the yahoo_answers_topics dataset ## Steps to reproduce the bug self.dataset = load_dataset( 'yahoo_answers_topics', cache_dir=self.config['yahoo_cache_dir'], split='train[:90%]') # Sample code to reproduce the bug self.dataset = load_dataset( 'yahoo_answers_topics', cache_dir=self.config['yahoo_cache_dir'], split='train[:90%]') ## Expected results A clear and concise description of the expected results. ## Actual results Specify the actual results or traceback. datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2646/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2646/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2645
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2645/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2645/comments
https://api.github.com/repos/huggingface/datasets/issues/2645/events
https://github.com/huggingface/datasets/issues/2645
944,374,284
MDU6SXNzdWU5NDQzNzQyODQ=
2,645
load_dataset processing failed with OS error after downloading a dataset
{ "login": "fake-warrior8", "id": 40395156, "node_id": "MDQ6VXNlcjQwMzk1MTU2", "avatar_url": "https://avatars.githubusercontent.com/u/40395156?v=4", "gravatar_id": "", "url": "https://api.github.com/users/fake-warrior8", "html_url": "https://github.com/fake-warrior8", "followers_url": "https://api.github.com/users/fake-warrior8/followers", "following_url": "https://api.github.com/users/fake-warrior8/following{/other_user}", "gists_url": "https://api.github.com/users/fake-warrior8/gists{/gist_id}", "starred_url": "https://api.github.com/users/fake-warrior8/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/fake-warrior8/subscriptions", "organizations_url": "https://api.github.com/users/fake-warrior8/orgs", "repos_url": "https://api.github.com/users/fake-warrior8/repos", "events_url": "https://api.github.com/users/fake-warrior8/events{/privacy}", "received_events_url": "https://api.github.com/users/fake-warrior8/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! It looks like an issue with pytorch.\r\n\r\nCould you try to run `import torch` and see if it raises an error ?", "> Hi ! It looks like an issue with pytorch.\r\n> \r\n> Could you try to run `import torch` and see if it raises an error ?\r\n\r\nIt works. Thank you!" ]
1,626,265,433,000
1,626,341,642,000
1,626,341,642,000
NONE
null
## Describe the bug After downloading a dataset like opus100, there is a bug that OSError: Cannot find data file. Original error: dlopen: cannot load any more object with static TLS ## Steps to reproduce the bug ```python from datasets import load_dataset this_dataset = load_dataset('opus100', 'af-en') ``` ## Expected results there is no error when running load_dataset. ## Actual results Specify the actual results or traceback. Traceback (most recent call last): File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 652, in _download_and_prep self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 989, in _prepare_split example = self.info.features.encode_example(record) File "/home/anaconda3/lib/python3.6/site-packages/datasets/features.py", line 952, in encode_example example = cast_to_python_objects(example) File "/home/anaconda3/lib/python3.6/site-packages/datasets/features.py", line 219, in cast_to_python_ob return _cast_to_python_objects(obj)[0] File "/home/anaconda3/lib/python3.6/site-packages/datasets/features.py", line 165, in _cast_to_python_o import torch File "/home/anaconda3/lib/python3.6/site-packages/torch/__init__.py", line 188, in <module> _load_global_deps() File "/home/anaconda3/lib/python3.6/site-packages/torch/__init__.py", line 141, in _load_global_deps ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL) File "/home/anaconda3/lib/python3.6/ctypes/__init__.py", line 348, in __init__ self._handle = _dlopen(self._name, mode) OSError: dlopen: cannot load any more object with static TLS During handling of the above exception, another exception occurred: Traceback (most recent call last): File "download_hub_opus100.py", line 9, in <module> this_dataset = load_dataset('opus100', language_pair) File "/home/anaconda3/lib/python3.6/site-packages/datasets/load.py", line 748, in load_dataset use_auth_token=use_auth_token, File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 575, in download_and_prepa dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/anaconda3/lib/python3.6/site-packages/datasets/builder.py", line 658, in _download_and_prep + str(e) OSError: Cannot find data file. Original error: dlopen: cannot load any more object with static TLS ## Environment info - `datasets` version: 1.8.0 - Platform: Linux-3.13.0-32-generic-x86_64-with-debian-jessie-sid - Python version: 3.6.6 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2645/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2645/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2644
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2644/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2644/comments
https://api.github.com/repos/huggingface/datasets/issues/2644/events
https://github.com/huggingface/datasets/issues/2644
944,254,748
MDU6SXNzdWU5NDQyNTQ3NDg=
2,644
Batched `map` not allowed to return 0 items
{ "login": "pcuenca", "id": 1177582, "node_id": "MDQ6VXNlcjExNzc1ODI=", "avatar_url": "https://avatars.githubusercontent.com/u/1177582?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pcuenca", "html_url": "https://github.com/pcuenca", "followers_url": "https://api.github.com/users/pcuenca/followers", "following_url": "https://api.github.com/users/pcuenca/following{/other_user}", "gists_url": "https://api.github.com/users/pcuenca/gists{/gist_id}", "starred_url": "https://api.github.com/users/pcuenca/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pcuenca/subscriptions", "organizations_url": "https://api.github.com/users/pcuenca/orgs", "repos_url": "https://api.github.com/users/pcuenca/repos", "events_url": "https://api.github.com/users/pcuenca/events{/privacy}", "received_events_url": "https://api.github.com/users/pcuenca/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! Thanks for reporting. Indeed it looks like type inference makes it fail. We should probably just ignore this step until a non-empty batch is passed.", "Sounds good! Do you want me to propose a PR? I'm quite busy right now, but if it's not too urgent I could take a look next week.", "Sure if you're interested feel free to open a PR :)\r\n\r\nYou can also ping me anytime if you have questions or if I can help !", "Sorry to ping you, @lhoestq, did you have a chance to take a look at the proposed PR? Thank you!", "Yes and it's all good, thank you :)\r\n\r\nFeel free to close this issue if it's good for you", "Everything's good, thanks!" ]
1,626,256,699,000
1,627,311,315,000
1,627,311,315,000
CONTRIBUTOR
null
## Describe the bug I'm trying to use `map` to filter a large dataset by selecting rows that match an expensive condition (files referenced by one of the columns need to exist in the filesystem, so we have to `stat` them). According to [the documentation](https://huggingface.co/docs/datasets/processing.html#augmenting-the-dataset), `a batch mapped function can take as input a batch of size N and return a batch of size M where M can be greater or less than N and can even be zero`. However, when the returned batch has a size of zero (neither item in the batch fulfilled the condition), we get an `index out of bounds` error. I think that `arrow_writer.py` is [trying to infer the returned types using the first element returned](https://github.com/huggingface/datasets/blob/master/src/datasets/arrow_writer.py#L100), but no elements were returned in this case. For this error to happen, I'm returning a dictionary that contains empty lists for the keys I want to keep, see below. If I return an empty dictionary instead (no keys), then a different error eventually occurs. ## Steps to reproduce the bug ```python def select_rows(examples): # `key` is a column name that exists in the original dataset # The following line simulates no matches found, so we return an empty batch result = {'key': []} return result filtered_dataset = dataset.map( select_rows, remove_columns = dataset.column_names, batched = True, num_proc = 1, desc = "Selecting rows with images that exist" ) ``` The code above immediately triggers the exception. If we use the following instead: ```python def select_rows(examples): # `key` is a column name that exists in the original dataset result = {'key': []} # or defaultdict or whatever # code to check for condition and append elements to result # some_items_found will be set to True if there were any matching elements in the batch return result if some_items_found else {} ``` Then it _seems_ to work, but it eventually fails with some sort of schema error. I believe it may happen when an empty batch is followed by a non-empty one, but haven't set up a test to verify it. In my opinion, returning a dictionary with empty lists and valid column names should be accepted as a valid result with zero items. ## Expected results The dataset would be filtered and only the matching fields would be returned. ## Actual results An exception is encountered, as described. Using a workaround makes it fail further along the line. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.1.dev0 - Platform: Linux-5.4.0-53-generic-x86_64-with-glibc2.17 - Python version: 3.8.10 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2644/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2644/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2643
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2643/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2643/comments
https://api.github.com/repos/huggingface/datasets/issues/2643/events
https://github.com/huggingface/datasets/issues/2643
944,220,273
MDU6SXNzdWU5NDQyMjAyNzM=
2,643
Enum used in map functions will raise a RecursionError with dill.
{ "login": "jorgeecardona", "id": 100702, "node_id": "MDQ6VXNlcjEwMDcwMg==", "avatar_url": "https://avatars.githubusercontent.com/u/100702?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jorgeecardona", "html_url": "https://github.com/jorgeecardona", "followers_url": "https://api.github.com/users/jorgeecardona/followers", "following_url": "https://api.github.com/users/jorgeecardona/following{/other_user}", "gists_url": "https://api.github.com/users/jorgeecardona/gists{/gist_id}", "starred_url": "https://api.github.com/users/jorgeecardona/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jorgeecardona/subscriptions", "organizations_url": "https://api.github.com/users/jorgeecardona/orgs", "repos_url": "https://api.github.com/users/jorgeecardona/repos", "events_url": "https://api.github.com/users/jorgeecardona/events{/privacy}", "received_events_url": "https://api.github.com/users/jorgeecardona/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "I'm running into this as well. (Thank you so much for reporting @jorgeecardona — was staring at this massive stack trace and unsure what exactly was wrong!)", "Hi ! Thanks for reporting :)\r\n\r\nUntil this is fixed on `dill`'s side, we could implement a custom saving in our Pickler indefined in utils.py_utils.py\r\nThere is already a suggestion in this message about how to do it:\r\nhttps://github.com/uqfoundation/dill/issues/250#issuecomment-852566284\r\n\r\nLet me know if such a workaround could help, and feel free to open a PR if you want to contribute !", "I have the same bug.\r\nthe code is as follows:\r\n![image](https://user-images.githubusercontent.com/84262181/139785849-620dd4ac-86ce-4212-8163-942bbca305aa.png)\r\nthe error is: \r\n![image](https://user-images.githubusercontent.com/84262181/139785899-88a9bd75-c60b-45a5-b819-830c7c096f3d.png)\r\n\r\nLook for the solution for this bug.", "Hi ! I think your RecursionError comes from a different issue @BitcoinNLPer , could you open a separate issue please ?\r\n\r\nAlso which dataset are you using ? I tried loading `CodedotAI/code_clippy` but I get a different error\r\n```python\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/Users/quentinlhoest/Desktop/hf/datasets/src/datasets/load.py\", line 1615, in load_dataset\r\n **config_kwargs,\r\n File \"/Users/quentinlhoest/Desktop/hf/datasets/src/datasets/load.py\", line 1446, in load_dataset_builder\r\n builder_cls = import_main_class(dataset_module.module_path)\r\n File \"/Users/quentinlhoest/Desktop/hf/datasets/src/datasets/load.py\", line 101, in import_main_class\r\n module = importlib.import_module(module_path)\r\n File \"/Users/quentinlhoest/.virtualenvs/hf-datasets/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\n File \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\n File \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\n File \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\n File \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\n File \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\n File \"/Users/quentinlhoest/.cache/huggingface/modules/datasets_modules/datasets/CodedotAI___code_clippy/d332f69d036e8c80f47bc9a96d676c3fa30cb50af7bb81e2d4d12e80b83efc4d/code_clippy.py\", line 66, in <module>\r\n url_elements = results.find_all(\"a\")\r\nAttributeError: 'NoneType' object has no attribute 'find_all'\r\n```" ]
1,626,254,168,000
1,635,846,671,000
null
NONE
null
## Describe the bug Enums used in functions pass to `map` will fail at pickling with a maximum recursion exception as described here: https://github.com/uqfoundation/dill/issues/250#issuecomment-852566284 In my particular case, I use an enum to define an argument with fixed options using the `TraininigArguments` dataclass as base class and the `HfArgumentParser`. In the same file I use a `ds.map` that tries to pickle the content of the module including the definition of the enum that runs into the dill bug described above. ## Steps to reproduce the bug ```python from datasets import load_dataset from enum import Enum class A(Enum): a = 'a' def main(): a = A.a def f(x): return {} if a == a.a else x ds = load_dataset('cnn_dailymail', '3.0.0')['test'] ds = ds.map(f, num_proc=15) if __name__ == "__main__": main() ``` ## Expected results The known problem with dill could be prevented as explained in the link above (workaround.) Since `HFArgumentParser` nicely uses the enum class for choices it makes sense to also deal with this bug under the hood. ## Actual results ```python File "/home/xxxx/miniconda3/lib/python3.8/site-packages/dill/_dill.py", line 1373, in save_type pickler.save_reduce(_create_type, (type(obj), obj.__name__, File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 690, in save_reduce save(args) File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 558, in save f(self, obj) # Call unbound method with explicit self File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 899, in save_tuple save(element) File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 534, in save self.framer.commit_frame() File "/home/xxxx/miniconda3/lib/python3.8/pickle.py", line 220, in commit_frame if f.tell() >= self._FRAME_SIZE_TARGET or force: RecursionError: maximum recursion depth exceeded while calling a Python object ``` ## Environment info - `datasets` version: 1.8.0 - Platform: Linux-5.9.0-4-amd64-x86_64-with-glibc2.10 - Python version: 3.8.5 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2643/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2643/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2642
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2642/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2642/comments
https://api.github.com/repos/huggingface/datasets/issues/2642/events
https://github.com/huggingface/datasets/issues/2642
944,175,697
MDU6SXNzdWU5NDQxNzU2OTc=
2,642
Support multi-worker with streaming dataset (IterableDataset).
{ "login": "cccntu", "id": 31893406, "node_id": "MDQ6VXNlcjMxODkzNDA2", "avatar_url": "https://avatars.githubusercontent.com/u/31893406?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cccntu", "html_url": "https://github.com/cccntu", "followers_url": "https://api.github.com/users/cccntu/followers", "following_url": "https://api.github.com/users/cccntu/following{/other_user}", "gists_url": "https://api.github.com/users/cccntu/gists{/gist_id}", "starred_url": "https://api.github.com/users/cccntu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cccntu/subscriptions", "organizations_url": "https://api.github.com/users/cccntu/orgs", "repos_url": "https://api.github.com/users/cccntu/repos", "events_url": "https://api.github.com/users/cccntu/events{/privacy}", "received_events_url": "https://api.github.com/users/cccntu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Hi ! This is a great idea :)\r\nI think we could have something similar to what we have in `datasets.Dataset.map`, i.e. a `num_proc` parameter that tells how many processes to spawn to parallelize the data processing. \r\n\r\nRegarding AUTOTUNE, this could be a nice feature as well, we could see how to add it in a second step" ]
1,626,250,978,000
1,626,341,854,000
null
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** The current `.map` does not support multi-process, CPU can become bottleneck if the pre-processing is complex (e.g. t5 span masking). **Describe the solution you'd like** Ideally `.map` should support multi-worker like tfds, with `AUTOTUNE`. **Describe alternatives you've considered** A simpler solution is to shard the dataset and process it in parallel with pytorch dataloader. The shard does not need to be of equal size. * https://pytorch.org/docs/stable/data.html#torch.utils.data.IterableDataset **Additional context**
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2642/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2642/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2641
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2641/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2641/comments
https://api.github.com/repos/huggingface/datasets/issues/2641/events
https://github.com/huggingface/datasets/issues/2641
943,838,085
MDU6SXNzdWU5NDM4MzgwODU=
2,641
load_dataset("financial_phrasebank") NonMatchingChecksumError
{ "login": "courtmckay", "id": 13956255, "node_id": "MDQ6VXNlcjEzOTU2MjU1", "avatar_url": "https://avatars.githubusercontent.com/u/13956255?v=4", "gravatar_id": "", "url": "https://api.github.com/users/courtmckay", "html_url": "https://github.com/courtmckay", "followers_url": "https://api.github.com/users/courtmckay/followers", "following_url": "https://api.github.com/users/courtmckay/following{/other_user}", "gists_url": "https://api.github.com/users/courtmckay/gists{/gist_id}", "starred_url": "https://api.github.com/users/courtmckay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/courtmckay/subscriptions", "organizations_url": "https://api.github.com/users/courtmckay/orgs", "repos_url": "https://api.github.com/users/courtmckay/repos", "events_url": "https://api.github.com/users/courtmckay/events{/privacy}", "received_events_url": "https://api.github.com/users/courtmckay/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi! It's probably because this dataset is stored on google drive and it has a per day quota limit. It should work if you retry, I was able to initiate the download.\r\n\r\nSimilar issue [here](https://github.com/huggingface/datasets/issues/2646)", "Hi ! Loading the dataset works on my side as well.\r\nFeel free to try again and let us know if it works for you know", "Thank you! I've been trying periodically for the past month, and no luck yet with this particular dataset. Just tried again and still hitting the checksum error.\r\n\r\nCode:\r\n\r\n`dataset = load_dataset(\"financial_phrasebank\", \"sentences_allagree\") `\r\n\r\nTraceback:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nNonMatchingChecksumError Traceback (most recent call last)\r\n<ipython-input-2-55cc2144f31e> in <module>\r\n----> 1 dataset = load_dataset(\"financial_phrasebank\", \"sentences_allagree\")\r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, streaming, **config_kwargs)\r\n 859 ignore_verifications=ignore_verifications,\r\n 860 try_from_hf_gcs=try_from_hf_gcs,\r\n--> 861 use_auth_token=use_auth_token,\r\n 862 )\r\n 863 \r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs)\r\n 582 if not downloaded_from_gcs:\r\n 583 self._download_and_prepare(\r\n--> 584 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs\r\n 585 )\r\n 586 # Sync info\r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs)\r\n 642 if verify_infos:\r\n 643 verify_checksums(\r\n--> 644 self.info.download_checksums, dl_manager.get_recorded_sizes_checksums(), \"dataset source files\"\r\n 645 )\r\n 646 \r\n\r\n/opt/conda/lib/python3.7/site-packages/datasets/utils/info_utils.py in verify_checksums(expected_checksums, recorded_checksums, verification_name)\r\n 38 if len(bad_urls) > 0:\r\n 39 error_msg = \"Checksums didn't match\" + for_verification_name + \":\\n\"\r\n---> 40 raise NonMatchingChecksumError(error_msg + str(bad_urls))\r\n 41 logger.info(\"All the checksums matched successfully\" + for_verification_name)\r\n 42 \r\n\r\nNonMatchingChecksumError: Checksums didn't match for dataset source files:\r\n['https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip']\r\n```" ]
1,626,211,309,000
1,626,701,170,000
null
NONE
null
## Describe the bug Attempting to download the financial_phrasebank dataset results in a NonMatchingChecksumError ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("financial_phrasebank", 'sentences_allagree') ``` ## Expected results I expect to see the financial_phrasebank dataset downloaded successfully ## Actual results NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://www.researchgate.net/profile/Pekka_Malo/publication/251231364_FinancialPhraseBank-v10/data/0c96051eee4fb1d56e000000/FinancialPhraseBank-v10.zip'] ## Environment info - `datasets` version: 1.9.0 - Platform: Linux-4.14.232-177.418.amzn2.x86_64-x86_64-with-debian-10.6 - Python version: 3.7.10 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2641/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2641/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2637
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2637/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2637/comments
https://api.github.com/repos/huggingface/datasets/issues/2637/events
https://github.com/huggingface/datasets/issues/2637
943,290,736
MDU6SXNzdWU5NDMyOTA3MzY=
2,637
Add the CIDEr metric?
{ "login": "zuujhyt", "id": 75845952, "node_id": "MDQ6VXNlcjc1ODQ1OTUy", "avatar_url": "https://avatars.githubusercontent.com/u/75845952?v=4", "gravatar_id": "", "url": "https://api.github.com/users/zuujhyt", "html_url": "https://github.com/zuujhyt", "followers_url": "https://api.github.com/users/zuujhyt/followers", "following_url": "https://api.github.com/users/zuujhyt/following{/other_user}", "gists_url": "https://api.github.com/users/zuujhyt/gists{/gist_id}", "starred_url": "https://api.github.com/users/zuujhyt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/zuujhyt/subscriptions", "organizations_url": "https://api.github.com/users/zuujhyt/orgs", "repos_url": "https://api.github.com/users/zuujhyt/repos", "events_url": "https://api.github.com/users/zuujhyt/events{/privacy}", "received_events_url": "https://api.github.com/users/zuujhyt/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "same" ]
1,626,178,971,000
1,632,729,425,000
null
NONE
null
Hi, I find the api in https://huggingface.co/metrics quite useful. I am playing around with video/image captioning task, where CIDEr is a popular metric. Do you plan to add this into the HF ```datasets``` library? Thanks.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2637/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2637/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2630
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2630/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2630/comments
https://api.github.com/repos/huggingface/datasets/issues/2630/events
https://github.com/huggingface/datasets/issues/2630
942,102,956
MDU6SXNzdWU5NDIxMDI5NTY=
2,630
Progress bars are not properly rendered in Jupyter notebook
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "To add my experience when trying to debug this issue:\r\n\r\nSeems like previously the workaround given [here](https://github.com/tqdm/tqdm/issues/485#issuecomment-473338308) worked around this issue. But with the latest version of jupyter/tqdm I still get terminal warnings that IPython tried to send a message from a forked process.", "Hi @mludv, thanks for the hint!!! :) \r\n\r\nWe will definitely take it into account to try to fix this issue... It seems somehow related to `multiprocessing` and `tqdm`..." ]
1,626,098,833,000
1,626,160,832,000
null
MEMBER
null
## Describe the bug The progress bars are not Jupyter widgets; regular progress bars appear (like in a terminal). ## Steps to reproduce the bug ```python ds.map(tokenize, num_proc=10) ``` ## Expected results Jupyter widgets displaying the progress bars. ## Actual results Simple plane progress bars. cc: Reported by @thomwolf
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2630/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2630/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2629
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2629/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2629/comments
https://api.github.com/repos/huggingface/datasets/issues/2629/events
https://github.com/huggingface/datasets/issues/2629
941,819,205
MDU6SXNzdWU5NDE4MTkyMDU=
2,629
Load datasets from the Hub without requiring a dataset script
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "This is so cool, let us know if we can help with anything on the hub side (@Pierrci @elishowk) 🎉 " ]
1,626,079,517,000
1,629,901,088,000
1,629,901,088,000
MEMBER
null
As a user I would like to be able to upload my csv/json/text/parquet/etc. files in a dataset repository on the Hugging Face Hub and be able to load this dataset with `load_dataset` without having to implement a dataset script. Moreover I would like to be able to specify which file goes into which split using the `data_files` argument. This feature should be compatible with private repositories and dataset streaming. This can be implemented by checking the extension of the files in the dataset repository and then by using the right dataset builder that is already packaged in the library (csv/json/text/parquet/etc.)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2629/reactions", "total_count": 11, "+1": 0, "-1": 0, "laugh": 0, "hooray": 2, "confused": 0, "heart": 7, "rocket": 2, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2629/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2625
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2625/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2625/comments
https://api.github.com/repos/huggingface/datasets/issues/2625/events
https://github.com/huggingface/datasets/issues/2625
941,439,922
MDU6SXNzdWU5NDE0Mzk5MjI=
2,625
⚛️😇⚙️🔑
{ "login": "hustlen0mics", "id": 50596661, "node_id": "MDQ6VXNlcjUwNTk2NjYx", "avatar_url": "https://avatars.githubusercontent.com/u/50596661?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hustlen0mics", "html_url": "https://github.com/hustlen0mics", "followers_url": "https://api.github.com/users/hustlen0mics/followers", "following_url": "https://api.github.com/users/hustlen0mics/following{/other_user}", "gists_url": "https://api.github.com/users/hustlen0mics/gists{/gist_id}", "starred_url": "https://api.github.com/users/hustlen0mics/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hustlen0mics/subscriptions", "organizations_url": "https://api.github.com/users/hustlen0mics/orgs", "repos_url": "https://api.github.com/users/hustlen0mics/repos", "events_url": "https://api.github.com/users/hustlen0mics/events{/privacy}", "received_events_url": "https://api.github.com/users/hustlen0mics/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
1,626,005,674,000
1,626,069,359,000
1,626,069,359,000
NONE
null
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2625/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2625/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2624
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2624/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2624/comments
https://api.github.com/repos/huggingface/datasets/issues/2624/events
https://github.com/huggingface/datasets/issues/2624
941,318,247
MDU6SXNzdWU5NDEzMTgyNDc=
2,624
can't set verbosity for `metric.py`
{ "login": "thomas-happify", "id": 66082334, "node_id": "MDQ6VXNlcjY2MDgyMzM0", "avatar_url": "https://avatars.githubusercontent.com/u/66082334?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thomas-happify", "html_url": "https://github.com/thomas-happify", "followers_url": "https://api.github.com/users/thomas-happify/followers", "following_url": "https://api.github.com/users/thomas-happify/following{/other_user}", "gists_url": "https://api.github.com/users/thomas-happify/gists{/gist_id}", "starred_url": "https://api.github.com/users/thomas-happify/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomas-happify/subscriptions", "organizations_url": "https://api.github.com/users/thomas-happify/orgs", "repos_url": "https://api.github.com/users/thomas-happify/repos", "events_url": "https://api.github.com/users/thomas-happify/events{/privacy}", "received_events_url": "https://api.github.com/users/thomas-happify/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Thanks @thomas-happify for reporting and thanks @mariosasko for the fix." ]
1,625,948,625,000
1,626,069,269,000
1,626,069,269,000
NONE
null
## Describe the bug ``` [2021-07-10 20:13:11,528][datasets.utils.filelock][INFO] - Lock 139705371374976 acquired on /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow.lock [2021-07-10 20:13:11,529][datasets.arrow_writer][INFO] - Done writing 32 examples in 6100 bytes /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow. [2021-07-10 20:13:11,531][datasets.arrow_dataset][INFO] - Set __getitem__(key) output type to python objects for no columns (when key is int or slice) and don't output other (un-formatted) columns. [2021-07-10 20:13:11,543][/conda/envs/myenv/lib/python3.8/site-packages/datasets/metric.py][INFO] - Removing /root/.cache/huggingface/metrics/seqeval/default/default_experiment-1-0.arrow ``` As you can see, `datasets` logging come from different places. `filelock`, `arrow_writer` & `arrow_dataset` comes from `datasets.*` which are expected However, `metric.py` logging comes from `/conda/envs/myenv/lib/python3.8/site-packages/datasets/` So when setting `datasets.utils.logging.set_verbosity_error()`, it still logs the last message which is annoying during evaluation. I had to do ``` logging.getLogger("/conda/envs/myenv/lib/python3.8/site-packages/datasets/metric").setLevel(logging.ERROR) ``` to fully mute these messages ## Expected results it shouldn't log these messages when setting `datasets.utils.logging.set_verbosity_error()` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: tried both 1.8.0 & 1.9.0 - Platform: Ubuntu 18.04.5 LTS - Python version: 3.8.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2624/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2624/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2622
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2622/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2622/comments
https://api.github.com/repos/huggingface/datasets/issues/2622/events
https://github.com/huggingface/datasets/issues/2622
941,127,785
MDU6SXNzdWU5NDExMjc3ODU=
2,622
Integration with AugLy
{ "login": "Darktex", "id": 890615, "node_id": "MDQ6VXNlcjg5MDYxNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/890615?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Darktex", "html_url": "https://github.com/Darktex", "followers_url": "https://api.github.com/users/Darktex/followers", "following_url": "https://api.github.com/users/Darktex/following{/other_user}", "gists_url": "https://api.github.com/users/Darktex/gists{/gist_id}", "starred_url": "https://api.github.com/users/Darktex/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Darktex/subscriptions", "organizations_url": "https://api.github.com/users/Darktex/orgs", "repos_url": "https://api.github.com/users/Darktex/repos", "events_url": "https://api.github.com/users/Darktex/events{/privacy}", "received_events_url": "https://api.github.com/users/Darktex/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Hi,\r\n\r\nyou can define your own custom formatting with `Dataset.set_transform()` and then run the tokenizer with the batches of augmented data as follows:\r\n```python\r\ndset = load_dataset(\"imdb\", split=\"train\") # Let's say we are working with the IMDB dataset\r\ndset.set_transform(lambda ex: {\"text\": augly_text_augmentation(ex[\"text\"])}, columns=\"text\", output_all_columns=True)\r\ndataloader = torch.utils.data.DataLoader(dset, batch_size=32)\r\nfor epoch in range(5):\r\n for batch in dataloader:\r\n tokenizer_output = tokenizer(batch.pop(\"text\"), padding=True, truncation=True, return_tensors=\"pt\")\r\n batch.update(tokenizer_output)\r\n output = model(**batch)\r\n ...\r\n```" ]
1,625,875,389,000
1,626,023,291,000
null
NONE
null
**Is your feature request related to a problem? Please describe.** Facebook recently launched a library, [AugLy](https://github.com/facebookresearch/AugLy) , that has a unified API for augmentations for image, video and text. It would be pretty exciting to have it hooked up to HF libraries so that we can make NLP models robust to misspellings or to punctuation, or emojis etc. Plus, with Transformers supporting more CV use cases, having augmentations support becomes crucial. **Describe the solution you'd like** The biggest difference between augmentations and preprocessing is that preprocessing happens only once, but you are running augmentations once per epoch. AugLy operates on text directly, so this breaks the typical workflow where we would run the tokenizer once, set format to pt tensors and be ready for the Dataloader. **Describe alternatives you've considered** One possible way of implementing these is to make a custom Dataset class where getitem(i) runs the augmentation and the tokenizer every time, though this would slow training down considerably given we wouldn't even run the tokenizer in batches.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2622/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2622/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2618
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2618/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2618/comments
https://api.github.com/repos/huggingface/datasets/issues/2618/events
https://github.com/huggingface/datasets/issues/2618
940,852,640
MDU6SXNzdWU5NDA4NTI2NDA=
2,618
`filelock.py` Error
{ "login": "liyucheng09", "id": 27999909, "node_id": "MDQ6VXNlcjI3OTk5OTA5", "avatar_url": "https://avatars.githubusercontent.com/u/27999909?v=4", "gravatar_id": "", "url": "https://api.github.com/users/liyucheng09", "html_url": "https://github.com/liyucheng09", "followers_url": "https://api.github.com/users/liyucheng09/followers", "following_url": "https://api.github.com/users/liyucheng09/following{/other_user}", "gists_url": "https://api.github.com/users/liyucheng09/gists{/gist_id}", "starred_url": "https://api.github.com/users/liyucheng09/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liyucheng09/subscriptions", "organizations_url": "https://api.github.com/users/liyucheng09/orgs", "repos_url": "https://api.github.com/users/liyucheng09/repos", "events_url": "https://api.github.com/users/liyucheng09/events{/privacy}", "received_events_url": "https://api.github.com/users/liyucheng09/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi @liyucheng09, thanks for reporting.\r\n\r\nApparently this issue has to do with your environment setup. One question: is your data in an NFS share? Some people have reported this error when using `fcntl` to write to an NFS share... If this is the case, then it might be that your NFS just may not be set up to provide file locks. You should ask your system administrator, or try these commands in the terminal:\r\n```shell\r\nsudo systemctl enable rpc-statd\r\nsudo systemctl start rpc-statd\r\n```" ]
1,625,843,569,000
1,626,070,830,000
null
NONE
null
## Describe the bug It seems that the `filelock.py` went error. ``` >>> ds=load_dataset('xsum') ^CTraceback (most recent call last): File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) OSError: [Errno 37] No locks available ``` According to error log, it is OSError, but there is an `except` in the `_acquire` function. ``` def _acquire(self): open_mode = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC try: fd = os.open(self._lock_file, open_mode) except (IOError, OSError): pass else: self._lock_file_fd = fd return None ``` I don't know why it stucked rather than `pass` directly. I am not quite familiar with filelock operation, so any help is highly appriciated. ## Steps to reproduce the bug ```python ds = load_dataset('xsum') ``` ## Expected results A clear and concise description of the expected results. ## Actual results ``` >>> ds=load_dataset('xsum') ^CTraceback (most recent call last): File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) OSError: [Errno 37] No locks available During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/load.py", line 818, in load_dataset use_auth_token=use_auth_token, File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/load.py", line 470, in prepare_module with FileLock(lock_path): File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 323, in __enter__ self.acquire() File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 272, in acquire self._acquire() File "/user/HS502/yl02706/.conda/envs/lyc/lib/python3.6/site-packages/datasets/utils/filelock.py", line 402, in _acquire fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) KeyboardInterrupt ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Linux-4.15.0-135-generic-x86_64-with-debian-buster-sid - Python version: 3.6.13 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2618/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2618/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2615
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2615/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2615/comments
https://api.github.com/repos/huggingface/datasets/issues/2615/events
https://github.com/huggingface/datasets/issues/2615
940,794,339
MDU6SXNzdWU5NDA3OTQzMzk=
2,615
Jsonlines export error
{ "login": "TevenLeScao", "id": 26709476, "node_id": "MDQ6VXNlcjI2NzA5NDc2", "avatar_url": "https://avatars.githubusercontent.com/u/26709476?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TevenLeScao", "html_url": "https://github.com/TevenLeScao", "followers_url": "https://api.github.com/users/TevenLeScao/followers", "following_url": "https://api.github.com/users/TevenLeScao/following{/other_user}", "gists_url": "https://api.github.com/users/TevenLeScao/gists{/gist_id}", "starred_url": "https://api.github.com/users/TevenLeScao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TevenLeScao/subscriptions", "organizations_url": "https://api.github.com/users/TevenLeScao/orgs", "repos_url": "https://api.github.com/users/TevenLeScao/repos", "events_url": "https://api.github.com/users/TevenLeScao/events{/privacy}", "received_events_url": "https://api.github.com/users/TevenLeScao/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for reporting @TevenLeScao! I'm having a look...", "(not sure what just happened on the assignations sorry)", "For some reason this happens (both `datasets` version are on master) only on Python 3.6 and not Python 3.8.", "@TevenLeScao we are using `pandas` to serialize the dataset to JSON Lines. So it must be due to pandas. Could you please check the pandas version causing the issue?", "@TevenLeScao I have just checked it: this was a bug in `pandas` and it was fixed in version 1.2: https://github.com/pandas-dev/pandas/pull/36898", "Thanks ! I'm creating a PR", "Well I though it was me who has taken on this issue... 😅 ", "Sorry, I was also talking to teven offline so I already had the PR ready before noticing x)", "I was also already working in my PR... Nevermind. Next time we should pay attention if there is somebody (self-)assigned to an issue and if he/she is still working on it before overtaking it... 😄 ", "The fix is available on `master` @TevenLeScao , thanks for reporting" ]
1,625,839,325,000
1,625,844,547,000
1,625,844,513,000
MEMBER
null
## Describe the bug When exporting large datasets in jsonlines (c4 in my case) the created file has an error every 9999 lines: the 9999th and 10000th are concatenated, thus breaking the jsonlines format. This sounds like it is related to batching, which is by 10000 by default ## Steps to reproduce the bug This what I'm running: in python: ``` from datasets import load_dataset ptb = load_dataset("ptb_text_only") ptb["train"].to_json("ptb.jsonl") ``` then out of python: ``` head -10000 ptb.jsonl ``` ## Expected results Properly separated lines ## Actual results The last line is a concatenation of two lines ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.1.dev0 - Platform: Linux-5.4.0-1046-gcp-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.6.9 - PyArrow version: 4.0.1
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2615/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2615/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2607
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2607/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2607/comments
https://api.github.com/repos/huggingface/datasets/issues/2607/events
https://github.com/huggingface/datasets/issues/2607
938,796,902
MDU6SXNzdWU5Mzg3OTY5MDI=
2,607
Streaming local gzip compressed JSON line files is not working
{ "login": "thomwolf", "id": 7353373, "node_id": "MDQ6VXNlcjczNTMzNzM=", "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thomwolf", "html_url": "https://github.com/thomwolf", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "repos_url": "https://api.github.com/users/thomwolf/repos", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Updating to pyarrow-4.0.1 didn't fix the issue", "Here is an exemple dataset with 2 of these compressed JSON files: https://huggingface.co/datasets/thomwolf/github-python", "Hi @thomwolf, thanks for reporting.\r\n\r\nIt seems this might be due to the fact that the JSON Dataset builder uses `pyarrow.json` (`paj.read_json`) to read the data without using the Python standard `open(file,...` (which is the one patched with `xopen` to work in streaming mode).\r\n\r\nThis has to be fixed.", "Sorry for reopening this, but I'm having the same issue as @thomwolf when streaming a gzipped JSON Lines file from the hub. Or is that just not possible by definition?\r\nI installed `datasets`in editable mode from source (so probably includes the fix from #2608 ?): \r\n```\r\n>>> datasets.__version__\r\n'1.9.1.dev0'\r\n```\r\n\r\n```\r\n>>> msmarco = datasets.load_dataset(\"webis/msmarco\", \"corpus\", streaming=True)\r\nUsing custom data configuration corpus-174d3b7155eb68db\r\n>>> msmarco_iter = iter(msmarco['train'])\r\n>>> print(next(msmarco_iter))\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/media/ssd/TREC/msmarco/datasets/src/datasets/iterable_dataset.py\", line 338, in __iter__\r\n for key, example in self._iter():\r\n File \"/media/ssd/TREC/msmarco/datasets/src/datasets/iterable_dataset.py\", line 335, in _iter\r\n yield from ex_iterable\r\n File \"/media/ssd/TREC/msmarco/datasets/src/datasets/iterable_dataset.py\", line 78, in __iter__\r\n for key, example in self.generate_examples_fn(**self.kwargs):\r\n File \"/home/christopher/.cache/huggingface/modules/datasets_modules/datasets/msmarco/eb63dff8d83107168e973c7a655a6082d37e08d71b4ac39a0afada479c138745/msmarco.py\", line 96, in _generate_examples\r\n with gzip.open(file, \"rt\", encoding=\"utf-8\") as f:\r\n File \"/usr/lib/python3.6/gzip.py\", line 53, in open\r\n binary_file = GzipFile(filename, gz_mode, compresslevel)\r\n File \"/usr/lib/python3.6/gzip.py\", line 163, in __init__\r\n fileobj = self.myfileobj = builtins.open(filename, mode or 'rb')\r\nFileNotFoundError: [Errno 2] No such file or directory: 'https://huggingface.co/datasets/webis/msmarco/resolve/main/msmarco_doc_00.gz'\r\n```\r\n\r\nLoading the dataset without streaming set to True, works fine.", "Hi ! To make the streaming work, we extend `open` in the dataset builder to work with urls.\r\n\r\nTherefore you just need to use `open` before using `gzip.open`:\r\n```diff\r\n- with gzip.open(file, \"rt\", encoding=\"utf-8\") as f:\r\n+ with gzip.open(open(file, \"rb\"), \"rt\", encoding=\"utf-8\") as f:\r\n```\r\n\r\nYou can see that it is the case for oscar.py and c4.py for example:\r\n\r\nhttps://github.com/huggingface/datasets/blob/8814b393984c1c2e1800ba370de2a9f7c8644908/datasets/oscar/oscar.py#L358-L358\r\n\r\nhttps://github.com/huggingface/datasets/blob/8814b393984c1c2e1800ba370de2a9f7c8644908/datasets/c4/c4.py#L88-L88\r\n\r\n", "@lhoestq Sorry I missed that. Thank you Quentin!" ]
1,625,657,793,000
1,626,774,619,000
1,625,760,521,000
MEMBER
null
## Describe the bug Using streaming to iterate on local gzip compressed JSON files raise a file not exist error ## Steps to reproduce the bug ```python from datasets import load_dataset streamed_dataset = load_dataset('json', split='train', data_files=data_files, streaming=True) next(iter(streamed_dataset)) ``` ## Actual results ``` FileNotFoundError Traceback (most recent call last) <ipython-input-6-27a664e29784> in <module> ----> 1 next(iter(streamed_dataset)) ~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in __iter__(self) 336 337 def __iter__(self): --> 338 for key, example in self._iter(): 339 if self.features: 340 # we encode the example for ClassLabel feature types for example ~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in _iter(self) 333 else: 334 ex_iterable = self._ex_iterable --> 335 yield from ex_iterable 336 337 def __iter__(self): ~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in __iter__(self) 76 77 def __iter__(self): ---> 78 for key, example in self.generate_examples_fn(**self.kwargs): 79 yield key, example 80 ~/Documents/GitHub/datasets/src/datasets/iterable_dataset.py in wrapper(**kwargs) 282 def wrapper(**kwargs): 283 python_formatter = PythonFormatter() --> 284 for key, table in generate_tables_fn(**kwargs): 285 batch = python_formatter.format_batch(table) 286 for i, example in enumerate(_batch_to_examples(batch)): ~/Documents/GitHub/datasets/src/datasets/packaged_modules/json/json.py in _generate_tables(self, files, original_files) 85 file, 86 read_options=self.config.pa_read_options, ---> 87 parse_options=self.config.pa_parse_options, 88 ) 89 except pa.ArrowInvalid as err: ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/_json.pyx in pyarrow._json.read_json() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/_json.pyx in pyarrow._json._get_reader() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_input_stream() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.get_native_file() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile.__cinit__() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/io.pxi in pyarrow.lib.OSFile._open_readable() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() ~/miniconda2/envs/datasets/lib/python3.7/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status() FileNotFoundError: [Errno 2] Failed to open local file 'gzip://file-000000000000.json::/Users/thomwolf/github-dataset/file-000000000000.json.gz'. Detail: [errno 2] No such file or directory ``` ## Environment info - `datasets` version: 1.9.1.dev0 - Platform: Darwin-19.6.0-x86_64-i386-64bit - Python version: 3.7.7 - PyArrow version: 1.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2607/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2607/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2606
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2606/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2606/comments
https://api.github.com/repos/huggingface/datasets/issues/2606/events
https://github.com/huggingface/datasets/issues/2606
938,763,684
MDU6SXNzdWU5Mzg3NjM2ODQ=
2,606
[Metrics] addition of wiki_split metrics
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 2459308248, "node_id": "MDU6TGFiZWwyNDU5MzA4MjQ4", "url": "https://api.github.com/repos/huggingface/datasets/labels/metric%20request", "name": "metric request", "color": "d4c5f9", "default": false, "description": "Requesting to add a new metric" } ]
closed
false
{ "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false }
[ { "login": "bhadreshpsavani", "id": 26653468, "node_id": "MDQ6VXNlcjI2NjUzNDY4", "avatar_url": "https://avatars.githubusercontent.com/u/26653468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bhadreshpsavani", "html_url": "https://github.com/bhadreshpsavani", "followers_url": "https://api.github.com/users/bhadreshpsavani/followers", "following_url": "https://api.github.com/users/bhadreshpsavani/following{/other_user}", "gists_url": "https://api.github.com/users/bhadreshpsavani/gists{/gist_id}", "starred_url": "https://api.github.com/users/bhadreshpsavani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhadreshpsavani/subscriptions", "organizations_url": "https://api.github.com/users/bhadreshpsavani/orgs", "repos_url": "https://api.github.com/users/bhadreshpsavani/repos", "events_url": "https://api.github.com/users/bhadreshpsavani/events{/privacy}", "received_events_url": "https://api.github.com/users/bhadreshpsavani/received_events", "type": "User", "site_admin": false } ]
null
[ "#take" ]
1,625,655,364,000
1,626,129,271,000
1,626,129,271,000
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** While training the model on sentence split the task in English we require to evaluate the trained model on `Exact Match`, `SARI` and `BLEU` score like this ![image](https://user-images.githubusercontent.com/26653468/124746876-ff5a3380-df3e-11eb-9a01-4b48db7a6694.png) While training we require metrics which can give all the output Currently, we don't have an exact match for text normalized data **Describe the solution you'd like** A custom metrics for wiki_split that can calculate these three values and provide it in the form of a single dictionary For exact match, we can refer to [this](https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py) **Describe alternatives you've considered** Two metrics are already present one more can be added for an exact match then we can run all three metrics in training script #self-assign
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2606/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2606/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2604
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2604/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2604/comments
https://api.github.com/repos/huggingface/datasets/issues/2604/events
https://github.com/huggingface/datasets/issues/2604
938,602,237
MDU6SXNzdWU5Mzg2MDIyMzc=
2,604
Add option to delete temporary files (e.g. extracted files) when loading dataset
{ "login": "thomwolf", "id": 7353373, "node_id": "MDQ6VXNlcjczNTMzNzM=", "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thomwolf", "html_url": "https://github.com/thomwolf", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "repos_url": "https://api.github.com/users/thomwolf/repos", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[ "Hi !\r\nIf we want something more general, we could either\r\n1. delete the extracted files after the arrow data generation automatically, or \r\n2. delete each extracted file during the arrow generation right after it has been closed.\r\n\r\nSolution 2 is better to save disk space during the arrow generation. Is it what you had in mind ?\r\n\r\nThe API could look like\r\n```python\r\nload_dataset(..., delete_extracted_files_after_usage=True)\r\n```\r\n\r\nIn terms of implementation, here are some directions we could take for each solution:\r\n1. get the list of the extracted files from the DownloadManager and then delete them after the dataset is processed. This can be implemented in `download_and_prepare` I guess\r\n2. maybe wrap and mock `open` in the builder to make it delete the file when the file is closed.", "Also, if I delete the extracted files they need to be re-extracted again instead of loading from the Arrow cache files", "I think we already opened an issue about this topic (suggested by @stas00): duplicated of #2481?\r\n\r\nThis is in our TODO list... 😅 ", "I think the deletion of each extracted file could be implemented in our CacheManager and ExtractManager (once merged to master: #2295, #2277). 😉 ", "Oh yes sorry, I didn't check if this was a duplicate", "Nevermind @thomwolf, I just mentioned the other issue so that both appear linked in GitHub and we do not forget to close both once we make the corresponding Pull Request... That was the main reason! 😄 ", "Ok yes. I think this is an important feature to be able to use large datasets which are pretty much always compressed files.\r\n\r\nIn particular now this requires to keep the extracted file on the drive if you want to avoid reprocessing the dataset so in my case, this require using always ~400GB of drive instead of just 200GB (which is already significant). \r\n\r\nTwo nice features would be to:\r\n- allow to delete the extracted files without loosing the ability to load the dataset from the cached arrow-file\r\n- streamlined decompression when only the currently read file is extracted - this might require to read the list of files from the extracted archives before processing them?", "Here is a sample dataset with 2 such large compressed JSON files for debugging: https://huggingface.co/datasets/thomwolf/github-python", "Note that I'm confirming that with the current master branch of dataset, deleting extracted files (without deleting the arrow cache file) lead to **re-extracting** these files when reloading the dataset instead of directly loading the arrow cache file.", "Hi ! That's weird, it doesn't do that on my side (tested on master on my laptop by deleting the `extracted` folder in the download cache directory). You tested with one of the files at https://huggingface.co/datasets/thomwolf/github-python that you have locally ?", "Yes it’s when I load local compressed JSON line files with load_dataset(‘json’, data_files=…) ", "@thomwolf I'm sorry but I can't reproduce this problem. I'm also using: \r\n```python\r\nds = load_dataset(\"json\", split=\"train\", data_files=data_files, cache_dir=cache_dir)\r\n```\r\nafter having removed the extracted files:\r\n```python\r\nassert sorted((cache_dir / \"downloads\" / \"extracted\").iterdir()) == []\r\n```\r\n\r\nI get the logging message:\r\n```shell\r\nWARNING datasets.builder:builder.py:531 Reusing dataset json ...\r\n```", "Do you confirm the extracted folder stays empty after reloading?", "> \r\n> \r\n> Do you confirm the extracted folder stays empty after reloading?\r\n\r\nYes, I have the above mentioned assertion on the emptiness of the extracted folder:\r\n```python\r\nassert sorted((cache_dir / \"downloads\" / \"extracted\").iterdir()) == []\r\n```\r\n" ]
1,625,644,576,000
1,626,685,698,000
1,626,685,698,000
MEMBER
null
I'm loading a dataset constituted of 44 GB of compressed JSON files. When loading the dataset with the JSON script, extracting the files create about 200 GB of uncompressed files before creating the 180GB of arrow cache tables Having a simple way to delete the extracted files after usage (or even better, to stream extraction/delete) would be nice to avoid disk cluter. I can maybe tackle this one in the JSON script unless you want a more general solution.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2604/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2604/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2600
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2600/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2600/comments
https://api.github.com/repos/huggingface/datasets/issues/2600/events
https://github.com/huggingface/datasets/issues/2600
938,086,745
MDU6SXNzdWU5MzgwODY3NDU=
2,600
Crash when using multiprocessing (`num_proc` > 1) on `filter` and all samples are discarded
{ "login": "mxschmdt", "id": 4904985, "node_id": "MDQ6VXNlcjQ5MDQ5ODU=", "avatar_url": "https://avatars.githubusercontent.com/u/4904985?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mxschmdt", "html_url": "https://github.com/mxschmdt", "followers_url": "https://api.github.com/users/mxschmdt/followers", "following_url": "https://api.github.com/users/mxschmdt/following{/other_user}", "gists_url": "https://api.github.com/users/mxschmdt/gists{/gist_id}", "starred_url": "https://api.github.com/users/mxschmdt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mxschmdt/subscriptions", "organizations_url": "https://api.github.com/users/mxschmdt/orgs", "repos_url": "https://api.github.com/users/mxschmdt/repos", "events_url": "https://api.github.com/users/mxschmdt/events{/privacy}", "received_events_url": "https://api.github.com/users/mxschmdt/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[]
1,625,590,405,000
1,625,662,231,000
1,625,662,231,000
CONTRIBUTOR
null
## Describe the bug If `filter` is applied to a dataset using multiprocessing (`num_proc` > 1) and all sharded datasets are empty afterwards (due to all samples being discarded), the program crashes. ## Steps to reproduce the bug ```python from datasets import Dataset data = Dataset.from_dict({'id': [0,1]}) data.filter(lambda x: False, num_proc=2) ``` ## Expected results An empty table should be returned without crashing. ## Actual results ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 185, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/home/user/venv/lib/python3.8/site-packages/datasets/fingerprint.py", line 397, in wrapper out = func(self, *args, **kwargs) File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 2143, in filter return self.map( File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 1738, in map result = concatenate_datasets(transformed_shards) File "/home/user/venv/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 3267, in concatenate_datasets table = concat_tables(tables_to_concat, axis=axis) File "/home/user/venv/lib/python3.8/site-packages/datasets/table.py", line 853, in concat_tables return ConcatenationTable.from_tables(tables, axis=axis) File "/home/user/venv/lib/python3.8/site-packages/datasets/table.py", line 713, in from_tables blocks = to_blocks(tables[0]) IndexError: list index out of range ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.9.0 - Platform: Linux-5.12.11-300.fc34.x86_64-x86_64-with-glibc2.2.5 - Python version: 3.8.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2600/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2600/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2598
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2598/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2598/comments
https://api.github.com/repos/huggingface/datasets/issues/2598/events
https://github.com/huggingface/datasets/issues/2598
937,930,632
MDU6SXNzdWU5Mzc5MzA2MzI=
2,598
Unable to download omp dataset
{ "login": "erikadistefano", "id": 25797960, "node_id": "MDQ6VXNlcjI1Nzk3OTYw", "avatar_url": "https://avatars.githubusercontent.com/u/25797960?v=4", "gravatar_id": "", "url": "https://api.github.com/users/erikadistefano", "html_url": "https://github.com/erikadistefano", "followers_url": "https://api.github.com/users/erikadistefano/followers", "following_url": "https://api.github.com/users/erikadistefano/following{/other_user}", "gists_url": "https://api.github.com/users/erikadistefano/gists{/gist_id}", "starred_url": "https://api.github.com/users/erikadistefano/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/erikadistefano/subscriptions", "organizations_url": "https://api.github.com/users/erikadistefano/orgs", "repos_url": "https://api.github.com/users/erikadistefano/repos", "events_url": "https://api.github.com/users/erikadistefano/events{/privacy}", "received_events_url": "https://api.github.com/users/erikadistefano/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @erikadistefano , thanks for reporting the issue.\r\n\r\nI have created a Pull Request that should fix it. \r\n\r\nOnce merged into master, feel free to update your installed `datasets` library (either by installing it from our GitHub master branch or waiting until our next release) to be able to load omp dataset." ]
1,625,580,052,000
1,625,662,595,000
1,625,662,595,000
NONE
null
## Describe the bug The omp dataset cannot be downloaded because of a DuplicatedKeysError ## Steps to reproduce the bug from datasets import load_dataset omp = load_dataset('omp', 'posts_labeled') print(omp) ## Expected results This code should download the omp dataset and print the dictionary ## Actual results Downloading and preparing dataset omp/posts_labeled (download: 1.27 MiB, generated: 13.31 MiB, post-processed: Unknown size, total: 14.58 MiB) to /home/erika_distefano/.cache/huggingface/datasets/omp/posts_labeled/1.1.0/2fe5b067be3bff1d4588d5b0cbb9b5b22ae1b9d5b026a8ff572cd389f862735b... 0 examples [00:00, ? examples/s]2021-07-06 09:43:55.868815: I tensorflow/stream_executor/platform/default/dso_loader.cc:48] Successfully opened dynamic library libcudart.so.11.0 Traceback (most recent call last): File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 990, in _prepare_split writer.write(example, key) File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 338, in write self.check_duplicate_keys() File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 349, in check_duplicate_keys raise DuplicatedKeysError(key) datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 3326 Keys should be unique and deterministic in nature During handling of the above exception, another exception occurred: Traceback (most recent call last): File "hf_datasets.py", line 32, in <module> omp = load_dataset('omp', 'posts_labeled') File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/load.py", line 748, in load_dataset use_auth_token=use_auth_token, File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 575, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 652, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/builder.py", line 992, in _prepare_split num_examples, num_bytes = writer.finalize() File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 409, in finalize self.check_duplicate_keys() File "/home/erika_distefano/.local/lib/python3.6/site-packages/datasets/arrow_writer.py", line 349, in check_duplicate_keys raise DuplicatedKeysError(key) datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 3326 Keys should be unique and deterministic in nature ## Environment info - `datasets` version: 1.8.0 - Platform: Ubuntu 18.04.4 LTS - Python version: 3.6.9 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2598/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2598/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2596
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2596/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2596/comments
https://api.github.com/repos/huggingface/datasets/issues/2596/events
https://github.com/huggingface/datasets/issues/2596
937,598,914
MDU6SXNzdWU5Mzc1OTg5MTQ=
2,596
Transformer Class on dataset
{ "login": "arita37", "id": 18707623, "node_id": "MDQ6VXNlcjE4NzA3NjIz", "avatar_url": "https://avatars.githubusercontent.com/u/18707623?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arita37", "html_url": "https://github.com/arita37", "followers_url": "https://api.github.com/users/arita37/followers", "following_url": "https://api.github.com/users/arita37/following{/other_user}", "gists_url": "https://api.github.com/users/arita37/gists{/gist_id}", "starred_url": "https://api.github.com/users/arita37/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arita37/subscriptions", "organizations_url": "https://api.github.com/users/arita37/orgs", "repos_url": "https://api.github.com/users/arita37/repos", "events_url": "https://api.github.com/users/arita37/events{/privacy}", "received_events_url": "https://api.github.com/users/arita37/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Hi ! Do you have an example in mind that shows how this could be useful ?", "Example:\n\nMerge 2 datasets into one datasets\n\nLabel extraction from dataset\n\ndataset(text, label)\n —> dataset(text, newlabel)\n\nTextCleaning.\n\n\nFor image dataset, \nTransformation are easier (ie linear algebra).\n\n\n\n\n\n\n> On Jul 6, 2021, at 17:39, Quentin Lhoest ***@***.***> wrote:\n> \n> \n> Hi ! Do you have an example in mind that shows how this could be useful ?\n> \n> —\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n", "There are already a few transformations that you can apply on a dataset using methods like `dataset.map()`.\r\nYou can find examples in the documentation here:\r\nhttps://huggingface.co/docs/datasets/processing.html\r\n\r\nYou can merge two datasets with `concatenate_datasets()` or do label extraction with `dataset.map()` for example", "Ok, sure.\n\nThanks for pointing on functional part.\nMy question is more\n“Philosophical”/Design perspective.\n\nThere are 2 perspetive:\n Add transformation methods to \n Dataset Class\n\n\n OR Create a Transformer Class\n which operates on Dataset Class.\n\nT(Dataset) —> Dataset\n\ndatasetnew = MyTransform.transform(dataset)\ndatasetNew.save(path)\n\n\nWhat would be the difficulty\nof implementing a Transformer Class\noperating at dataset level ?\n\n\nthanks\n\n\n\n\n\n\n\n\n\n> On Jul 6, 2021, at 22:00, Quentin Lhoest ***@***.***> wrote:\n> \n> \n> There are already a few transformations that you can apply on a dataset using methods like dataset.map().\n> You can find examples in the documentation here:\n> https://huggingface.co/docs/datasets/processing.html\n> \n> You can merge two datasets with concatenate_datasets() or do label extraction with dataset.map() for example\n> \n> —\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n", "I can imagine that this would be a useful API to implement processing pipelines as transforms. They could be used to perform higher level transforms compared to the atomic transforms allowed by methods like map, filter, etc.\r\n\r\nI guess if you find any transform that could be useful for text dataset processing, image dataset processing etc. we could definitely start having such transforms :)", "Thanks for reply.\n\nWhat would be the constraints\nto have\nDataset —> Dataset consistency ?\n\nMain issue would be\nlarger than memory dataset and\nserialization on disk.\n\nTechnically,\none still process at atomic level\nand try to wrap the full results\ninto Dataset…. (!)\n\nWhat would you think ?\n\n\n\n\n\n\n\n\n> On Jul 7, 2021, at 16:51, Quentin Lhoest ***@***.***> wrote:\n> \n> \n> I can imagine that this would be a useful API to implement processing pipelines as transforms. They could be used to perform higher level transforms compared to the atomic transforms allowed by methods like map, filter, etc.\n> \n> I guess if you find any transform that could be useful for text dataset processing, image dataset processing etc. we could definitely start having such transforms :)\n> \n> —\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n", "We can be pretty flexible and not impose any constraints for transforms.\r\n\r\nMoreover, this library is designed to support datasets bigger than memory. The datasets are loaded from the disk via memory mapping, without filling up RAM. Even processing functions like `map` work in a batched fashion to not fill up your RAM. So this shouldn't be an issue", "Ok thanks.\n\nBut, Dataset has various flavors.\nIn current design of Dataset,\n how the serialization on disk is done (?)\n\n\nThe main issue is serialization \nof newdataset= Transform(Dataset)\n (ie thats why am referring to Out Of memory dataset…):\n\n Should be part of Transform or part of dataset ?\n\n\n\n\nMaybe, not, since the output is aimed to feed model in memory (?)\n\n\n\n\n\n\n\n\n> On Jul 7, 2021, at 18:04, Quentin Lhoest ***@***.***> wrote:\n> \n> \n> We can be pretty flexible and not impose any constraints for transforms.\n> \n> Moreover, this library is designed to support datasets bigger than memory. The datasets are loaded from the disk via memory mapping, without filling up RAM. Even processing functions like map work in a batched fashion to not fill up your RAM. So this shouldn't be an issue\n> \n> —\n> You are receiving this because you authored the thread.\n> Reply to this email directly, view it on GitHub, or unsubscribe.\n", "I'm not sure I understand, could you elaborate a bit more please ?\r\n\r\nEach dataset is a wrapper of a PyArrow Table that contains all the data. The table is loaded from an arrow file on the disk.\r\nWe have an ArrowWriter and ArrowReader class to write/read arrow tables on disk or in in-memory buffers." ]
1,625,556,435,000
1,625,732,525,000
null
NONE
null
Just wondering if you have intenttion to create TransformerClass : dataset --> dataset and make determnistic transformation (ie not fit).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2596/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2596/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2595
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2595/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2595/comments
https://api.github.com/repos/huggingface/datasets/issues/2595/events
https://github.com/huggingface/datasets/issues/2595
937,483,120
MDU6SXNzdWU5Mzc0ODMxMjA=
2,595
ModuleNotFoundError: No module named 'datasets.tasks' while importing common voice datasets
{ "login": "profsatwinder", "id": 41314912, "node_id": "MDQ6VXNlcjQxMzE0OTEy", "avatar_url": "https://avatars.githubusercontent.com/u/41314912?v=4", "gravatar_id": "", "url": "https://api.github.com/users/profsatwinder", "html_url": "https://github.com/profsatwinder", "followers_url": "https://api.github.com/users/profsatwinder/followers", "following_url": "https://api.github.com/users/profsatwinder/following{/other_user}", "gists_url": "https://api.github.com/users/profsatwinder/gists{/gist_id}", "starred_url": "https://api.github.com/users/profsatwinder/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/profsatwinder/subscriptions", "organizations_url": "https://api.github.com/users/profsatwinder/orgs", "repos_url": "https://api.github.com/users/profsatwinder/repos", "events_url": "https://api.github.com/users/profsatwinder/events{/privacy}", "received_events_url": "https://api.github.com/users/profsatwinder/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @profsatwinder.\r\n\r\nIt looks like you are using an old version of `datasets`. Please update it with `pip install -U datasets` and indicate if the problem persists.", "@albertvillanova Thanks for the information. I updated it to 1.9.0 and the issue is resolved. Thanks again. " ]
1,625,541,655,000
1,625,551,189,000
1,625,551,189,000
NONE
null
Error traceback: --------------------------------------------------------------------------- ModuleNotFoundError Traceback (most recent call last) <ipython-input-8-a7b592d3bca0> in <module>() 1 from datasets import load_dataset, load_metric 2 ----> 3 common_voice_train = load_dataset("common_voice", "pa-IN", split="train+validation") 4 common_voice_test = load_dataset("common_voice", "pa-IN", split="test") 9 frames /root/.cache/huggingface/modules/datasets_modules/datasets/common_voice/078d412587e9efeb0ae2e574da99c31e18844c496008d53dc5c60f4159ed639b/common_voice.py in <module>() 19 20 import datasets ---> 21 from datasets.tasks import AutomaticSpeechRecognition 22 23 ModuleNotFoundError: No module named 'datasets.tasks'
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2595/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2595/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2591
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2591/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2591/comments
https://api.github.com/repos/huggingface/datasets/issues/2591/events
https://github.com/huggingface/datasets/issues/2591
936,957,975
MDU6SXNzdWU5MzY5NTc5NzU=
2,591
Cached dataset overflowing disk space
{ "login": "BirgerMoell", "id": 1704131, "node_id": "MDQ6VXNlcjE3MDQxMzE=", "avatar_url": "https://avatars.githubusercontent.com/u/1704131?v=4", "gravatar_id": "", "url": "https://api.github.com/users/BirgerMoell", "html_url": "https://github.com/BirgerMoell", "followers_url": "https://api.github.com/users/BirgerMoell/followers", "following_url": "https://api.github.com/users/BirgerMoell/following{/other_user}", "gists_url": "https://api.github.com/users/BirgerMoell/gists{/gist_id}", "starred_url": "https://api.github.com/users/BirgerMoell/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/BirgerMoell/subscriptions", "organizations_url": "https://api.github.com/users/BirgerMoell/orgs", "repos_url": "https://api.github.com/users/BirgerMoell/repos", "events_url": "https://api.github.com/users/BirgerMoell/events{/privacy}", "received_events_url": "https://api.github.com/users/BirgerMoell/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi! I'm transferring this issue over to `datasets`", "I'm using the datasets concatenate dataset to combine the datasets and then train.\r\ntrain_dataset = concatenate_datasets([dataset1, dataset2, common_voice_train])\r\n\r\n", "Hi @BirgerMoell.\r\n\r\nYou have several options:\r\n- to set caching to be stored on a different path location, other than the default one (`~/.cache/huggingface/datasets`):\r\n - either setting the environment variable `HF_DATASETS_CACHE` with the path to the new cache location\r\n - or by passing it with the parameter `cache_dir` when loading each of the datasets: `dataset = load_dataset(..., cache_dir=your_new_location)`\r\n\r\n You can get all the information in the docs: https://huggingface.co/docs/datasets/loading_datasets.html#cache-directory\r\n- I wouldn't recommend disabling caching, because current implementation generates cache files anyway, although in a temporary directory and they are deleted when the session closes. See details here: https://huggingface.co/docs/datasets/processing.html#enable-or-disable-caching\r\n- You could alternatively load the datasets in streaming mode. This is a new feature which allows loading the datasets without downloading the entire files. More information here: https://huggingface.co/docs/datasets/dataset_streaming.html", "Hi @BirgerMoell,\r\n\r\nWe are planning to add a new feature to datasets, which could be interesting in your case: Add the option to delete temporary files (decompressed files) from the cache directory (see: #2481, #2604).\r\n\r\nWe will ping you once this feature is implemented, so that the size of your cache directory will be considerably reduced." ]
1,625,481,799,000
1,626,685,699,000
1,626,685,699,000
CONTRIBUTOR
null
I'm training a Swedish Wav2vec2 model on a Linux GPU and having issues that the huggingface cached dataset folder is completely filling up my disk space (I'm training on a dataset of around 500 gb). The cache folder is 500gb (and now my disk space is full). Is there a way to toggle caching or set the caching to be stored on a different device (I have another drive with 4 tb that could hold the caching files). This might not technically be a bug, but I was unsure and I felt that the bug was the closest one. Traceback (most recent call last): File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/multiprocess/pool.py", line 121, in worker result = (True, func(*args, **kwds)) File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 186, in wrapper out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/fingerprint.py", line 397, in wrapper out = func(self, *args, **kwargs) File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/arrow_dataset.py", line 1983, in _map_single writer.finalize() File "/home/birger/miniconda3/envs/wav2vec2/lib/python3.7/site-packages/datasets/arrow_writer.py", line 418, in finalize self.pa_writer.close() File "pyarrow/ipc.pxi", line 402, in pyarrow.lib._CRecordBatchWriter.close File "pyarrow/error.pxi", line 97, in pyarrow.lib.check_status OSError: [Errno 28] Error writing bytes to file. Detail: [errno 28] No space left on device """ The above exception was the direct cause of the following exception:
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2591/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2591/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2585
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2585/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2585/comments
https://api.github.com/repos/huggingface/datasets/issues/2585/events
https://github.com/huggingface/datasets/issues/2585
936,484,419
MDU6SXNzdWU5MzY0ODQ0MTk=
2,585
sqaud_v2 dataset contains misalignment between the answer text and the context value at the answer index
{ "login": "mmajurski", "id": 9354454, "node_id": "MDQ6VXNlcjkzNTQ0NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/9354454?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mmajurski", "html_url": "https://github.com/mmajurski", "followers_url": "https://api.github.com/users/mmajurski/followers", "following_url": "https://api.github.com/users/mmajurski/following{/other_user}", "gists_url": "https://api.github.com/users/mmajurski/gists{/gist_id}", "starred_url": "https://api.github.com/users/mmajurski/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mmajurski/subscriptions", "organizations_url": "https://api.github.com/users/mmajurski/orgs", "repos_url": "https://api.github.com/users/mmajurski/repos", "events_url": "https://api.github.com/users/mmajurski/events{/privacy}", "received_events_url": "https://api.github.com/users/mmajurski/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @mmajurski, thanks for reporting this issue.\r\n\r\nIndeed this misalignment arises because the source dataset context field contains leading blank spaces (and these are counted within the answer_start), while our datasets loading script removes these leading blank spaces.\r\n\r\nI'm going to fix our script so that all leading blank spaces in the source dataset are kept, and there is no misalignment between the answer text and the answer_start within the context.", "If you are going to be altering the data cleaning from the source Squad dataset, here is one thing to consider.\r\nThere are occasional double spaces separating words which it might be nice to get rid of. \r\n\r\nEither way, thank you." ]
1,625,413,189,000
1,625,663,931,000
1,625,663,931,000
NONE
null
## Describe the bug The built in huggingface squad_v2 dataset that you can access via datasets.load_dataset contains mis-alignment between the answers['text'] and the characters in the context at the location specified by answers['answer_start']. For example: id = '56d1f453e7d4791d009025bd' answers = {'text': ['Pure Land'], 'answer_start': [146]} However the actual text in context at location 146 is 'ure Land,' Which is an off-by-one error from the correct answer. ## Steps to reproduce the bug ```python import datasets def check_context_answer_alignment(example): for a_idx in range(len(example['answers']['text'])): # check raw dataset for answer consistency between context and answer answer_text = example['answers']['text'][a_idx] a_st_idx = example['answers']['answer_start'][a_idx] a_end_idx = a_st_idx + len(example['answers']['text'][a_idx]) answer_text_from_context = example['context'][a_st_idx:a_end_idx] if answer_text != answer_text_from_context: #print(example['id']) return False return True dataset = datasets.load_dataset('squad_v2', split='train', keep_in_memory=True) start_len = len(dataset) dataset = dataset.filter(check_context_answer_alignment, num_proc=1, keep_in_memory=True) end_len = len(dataset) print('{} instances contain mis-alignment between the answer text and answer index.'.format(start_len - end_len)) ``` ## Expected results This code should result in 0 rows being filtered out from the dataset. ## Actual results This filter command results in 258 rows being flagged as containing a discrepancy between the text contained within answers['text'] and the text in example['context'] at the answers['answer_start'] location. This code will reproduce the problem and produce the following count: "258 instances contain mis-alignment between the answer text and answer index." ## Environment info Steps to rebuilt the Conda environment: ``` # create a virtual environment to stuff all these packages into conda create -n round8 python=3.8 -y # activate the virtual environment conda activate round8 # install pytorch (best done through conda to handle cuda dependencies) conda install pytorch torchvision torchtext cudatoolkit=11.1 -c pytorch-lts -c nvidia pip install jsonpickle transformers datasets matplotlib ``` OS: Ubuntu 20.04 Python 3.8 Result of `conda env export`: ``` name: round8 channels: - pytorch-lts - nvidia - defaults dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=4.5=1_gnu - blas=1.0=mkl - brotlipy=0.7.0=py38h27cfd23_1003 - bzip2=1.0.8=h7b6447c_0 - ca-certificates=2021.5.25=h06a4308_1 - certifi=2021.5.30=py38h06a4308_0 - cffi=1.14.5=py38h261ae71_0 - chardet=4.0.0=py38h06a4308_1003 - cryptography=3.4.7=py38hd23ed53_0 - cudatoolkit=11.1.74=h6bb024c_0 - ffmpeg=4.2.2=h20bf706_0 - freetype=2.10.4=h5ab3b9f_0 - gmp=6.2.1=h2531618_2 - gnutls=3.6.15=he1e5248_0 - idna=2.10=pyhd3eb1b0_0 - intel-openmp=2021.2.0=h06a4308_610 - jpeg=9b=h024ee3a_2 - lame=3.100=h7b6447c_0 - lcms2=2.12=h3be6417_0 - ld_impl_linux-64=2.35.1=h7274673_9 - libffi=3.3=he6710b0_2 - libgcc-ng=9.3.0=h5101ec6_17 - libgomp=9.3.0=h5101ec6_17 - libidn2=2.3.1=h27cfd23_0 - libopus=1.3.1=h7b6447c_0 - libpng=1.6.37=hbc83047_0 - libstdcxx-ng=9.3.0=hd4cf53a_17 - libtasn1=4.16.0=h27cfd23_0 - libtiff=4.2.0=h85742a9_0 - libunistring=0.9.10=h27cfd23_0 - libuv=1.40.0=h7b6447c_0 - libvpx=1.7.0=h439df22_0 - libwebp-base=1.2.0=h27cfd23_0 - lz4-c=1.9.3=h2531618_0 - mkl=2021.2.0=h06a4308_296 - mkl-service=2.3.0=py38h27cfd23_1 - mkl_fft=1.3.0=py38h42c9631_2 - mkl_random=1.2.1=py38ha9443f7_2 - ncurses=6.2=he6710b0_1 - nettle=3.7.3=hbbd107a_1 - ninja=1.10.2=hff7bd54_1 - numpy=1.20.2=py38h2d18471_0 - numpy-base=1.20.2=py38hfae3a4d_0 - olefile=0.46=py_0 - openh264=2.1.0=hd408876_0 - openssl=1.1.1k=h27cfd23_0 - pillow=8.2.0=py38he98fc37_0 - pip=21.1.2=py38h06a4308_0 - pycparser=2.20=py_2 - pyopenssl=20.0.1=pyhd3eb1b0_1 - pysocks=1.7.1=py38h06a4308_0 - python=3.8.10=h12debd9_8 - pytorch=1.8.1=py3.8_cuda11.1_cudnn8.0.5_0 - readline=8.1=h27cfd23_0 - requests=2.25.1=pyhd3eb1b0_0 - setuptools=52.0.0=py38h06a4308_0 - six=1.16.0=pyhd3eb1b0_0 - sqlite=3.35.4=hdfb4753_0 - tk=8.6.10=hbc83047_0 - torchtext=0.9.1=py38 - torchvision=0.9.1=py38_cu111 - typing_extensions=3.7.4.3=pyha847dfd_0 - urllib3=1.26.4=pyhd3eb1b0_0 - wheel=0.36.2=pyhd3eb1b0_0 - x264=1!157.20191217=h7b6447c_0 - xz=5.2.5=h7b6447c_0 - zlib=1.2.11=h7b6447c_3 - zstd=1.4.9=haebb681_0 - pip: - click==8.0.1 - cycler==0.10.0 - datasets==1.8.0 - dill==0.3.4 - filelock==3.0.12 - fsspec==2021.6.0 - huggingface-hub==0.0.8 - joblib==1.0.1 - jsonpickle==2.0.0 - kiwisolver==1.3.1 - matplotlib==3.4.2 - multiprocess==0.70.12.2 - packaging==20.9 - pandas==1.2.4 - pyarrow==3.0.0 - pyparsing==2.4.7 - python-dateutil==2.8.1 - pytz==2021.1 - regex==2021.4.4 - sacremoses==0.0.45 - tokenizers==0.10.3 - tqdm==4.49.0 - transformers==4.6.1 - xxhash==2.0.2 prefix: /home/mmajurski/anaconda3/envs/round8 ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2585/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2585/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2583
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2583/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2583/comments
https://api.github.com/repos/huggingface/datasets/issues/2583/events
https://github.com/huggingface/datasets/issues/2583
936,034,976
MDU6SXNzdWU5MzYwMzQ5NzY=
2,583
Error iteration over IterableDataset using Torch DataLoader
{ "login": "LeenaShekhar", "id": 12227436, "node_id": "MDQ6VXNlcjEyMjI3NDM2", "avatar_url": "https://avatars.githubusercontent.com/u/12227436?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LeenaShekhar", "html_url": "https://github.com/LeenaShekhar", "followers_url": "https://api.github.com/users/LeenaShekhar/followers", "following_url": "https://api.github.com/users/LeenaShekhar/following{/other_user}", "gists_url": "https://api.github.com/users/LeenaShekhar/gists{/gist_id}", "starred_url": "https://api.github.com/users/LeenaShekhar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LeenaShekhar/subscriptions", "organizations_url": "https://api.github.com/users/LeenaShekhar/orgs", "repos_url": "https://api.github.com/users/LeenaShekhar/repos", "events_url": "https://api.github.com/users/LeenaShekhar/events{/privacy}", "received_events_url": "https://api.github.com/users/LeenaShekhar/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! This is because you first need to format the dataset for pytorch:\r\n\r\n```python\r\n>>> import torch\r\n>>> from datasets import load_dataset\r\n>>> dataset = load_dataset('oscar', \"unshuffled_deduplicated_en\", split='train', streaming=True)\r\n>>> torch_iterable_dataset = dataset.with_format(\"torch\")\r\n>>> assert isinstance(torch_iterable_dataset, torch.utils.data.IterableDataset)\r\n>>> dataloader = torch.utils.data.DataLoader(torch_iterable_dataset, batch_size=4)\r\n>>> next(iter(dataloader))\r\n{'id': tensor([0, 1, 2, 3]), 'text': ['Mtendere Village was inspired...]}\r\n```\r\n\r\nThis is because the pytorch dataloader expects a subclass of `torch.utils.data.IterableDataset`. Since you can't pass an arbitrary iterable to a pytorch dataloader, you first need to build an object that inherits from `torch.utils.data.IterableDataset` using `with_format(\"torch\")` for example.\r\n", "Thank you for that and the example! \r\n\r\nWhat you said makes total sense; I just somehow missed that and assumed HF IterableDataset was a subclass of Torch IterableDataset. " ]
1,625,255,758,000
1,626,771,885,000
1,625,528,903,000
NONE
null
## Describe the bug I have an IterableDataset (created using streaming=True) and I am trying to create batches using Torch DataLoader class by passing this IterableDataset to it. This throws error which is pasted below. I can do the same by using Torch IterableDataset. One thing I noticed is that in the former case when I look at the dataloader.sampler class I get torch.utils.data.sampler.SequentialSampler while the latter one gives torch.utils.data.dataloader._InfiniteConstantSampler. I am not sure if this is how it is meant to be used, but that's what seemed reasonable to me. ## Steps to reproduce the bug 1. Does not work. ```python >>> from datasets import load_dataset >>> dataset = load_dataset('oscar', "unshuffled_deduplicated_en", split='train', streaming=True) >>> dataloader = torch.utils.data.DataLoader(dataset, batch_size=4) >>> dataloader.sampler <torch.utils.data.sampler.SequentialSampler object at 0x7f245a510208> >>> for batch in dataloader: ... print(batch) ``` 2. Works. ```python import torch from torch.utils.data import Dataset, IterableDataset, DataLoader class CustomIterableDataset(IterableDataset): 'Characterizes a dataset for PyTorch' def __init__(self, data): 'Initialization' self.data = data def __iter__(self): return iter(self.data) data = list(range(12)) dataset = CustomIterableDataset(data) dataloader = DataLoader(dataset, batch_size=4) print("dataloader: ", dataloader.sampler) for batch in dataloader: print(batch) ``` ## Expected results To get batches of data with the batch size as 4. Output from the latter one (2) though Datasource is different here so actual data is different. dataloader: <torch.utils.data.dataloader._InfiniteConstantSampler object at 0x7f1cc29e2c50> tensor([0, 1, 2, 3]) tensor([4, 5, 6, 7]) tensor([ 8, 9, 10, 11]) ## Actual results <torch.utils.data.sampler.SequentialSampler object at 0x7f245a510208> ... Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 435, in __next__ data = self._next_data() File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 474, in _next_data index = self._next_index() # may raise StopIteration File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/dataloader.py", line 427, in _next_index return next(self._sampler_iter) # may raise StopIteration File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/sampler.py", line 227, in __iter__ for idx in self.sampler: File "/data/leshekha/lib/HFDatasets/lib/python3.6/site-packages/torch/utils/data/sampler.py", line 67, in __iter__ return iter(range(len(self.data_source))) TypeError: object of type 'IterableDataset' has no len() ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: '1.8.1.dev0' - Platform: Linux - Python version: Python 3.6.8 - PyArrow version: '3.0.0'
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2583/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2583/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2573
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2573/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2573/comments
https://api.github.com/repos/huggingface/datasets/issues/2573/events
https://github.com/huggingface/datasets/issues/2573
934,584,745
MDU6SXNzdWU5MzQ1ODQ3NDU=
2,573
Finding right block-size with JSON loading difficult for user
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "This was actually a second error arising from a too small block-size in the json reader.\r\n\r\nFinding the right block size is difficult for the layman user" ]
1,625,129,315,000
1,625,166,653,000
null
MEMBER
null
As reported by @thomwolf, while loading a JSON Lines file with "json" loading script, he gets > json.decoder.JSONDecodeError: Extra data: line 2 column 1 (char 383)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2573/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2573/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2572
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2572/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2572/comments
https://api.github.com/repos/huggingface/datasets/issues/2572/events
https://github.com/huggingface/datasets/issues/2572
934,573,767
MDU6SXNzdWU5MzQ1NzM3Njc=
2,572
Support Zstandard compressed files
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
1,625,128,624,000
1,625,482,227,000
1,625,482,227,000
MEMBER
null
Add support for Zstandard compressed files: https://facebook.github.io/zstd/
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2572/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2572/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2569
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2569/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2569/comments
https://api.github.com/repos/huggingface/datasets/issues/2569/events
https://github.com/huggingface/datasets/issues/2569
933,015,797
MDU6SXNzdWU5MzMwMTU3OTc=
2,569
Weights of model checkpoint not initialized for RobertaModel for Bertscore
{ "login": "suzyahyah", "id": 2980993, "node_id": "MDQ6VXNlcjI5ODA5OTM=", "avatar_url": "https://avatars.githubusercontent.com/u/2980993?v=4", "gravatar_id": "", "url": "https://api.github.com/users/suzyahyah", "html_url": "https://github.com/suzyahyah", "followers_url": "https://api.github.com/users/suzyahyah/followers", "following_url": "https://api.github.com/users/suzyahyah/following{/other_user}", "gists_url": "https://api.github.com/users/suzyahyah/gists{/gist_id}", "starred_url": "https://api.github.com/users/suzyahyah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/suzyahyah/subscriptions", "organizations_url": "https://api.github.com/users/suzyahyah/orgs", "repos_url": "https://api.github.com/users/suzyahyah/repos", "events_url": "https://api.github.com/users/suzyahyah/events{/privacy}", "received_events_url": "https://api.github.com/users/suzyahyah/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @suzyahyah, thanks for reporting.\r\n\r\nThe message you get is indeed not an error message, but a warning coming from Hugging Face `transformers`. The complete warning message is:\r\n```\r\nSome weights of the model checkpoint at roberta-large were not used when initializing RobertaModel: ['lm_head.decoder.weight', 'lm_head.dense.weight', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.bias', 'lm_head.layer_norm.weight']\r\n- This IS expected if you are initializing RobertaModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\r\n- This IS NOT expected if you are initializing RobertaModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\r\n```\r\n\r\nIn this case, this behavior IS expected and you can safely ignore the warning message.\r\n\r\nThe reason is that you are just using RoBERTa to get the contextual embeddings of the input sentences/tokens, thus leaving away its head layer, whose weights are ignored.\r\n\r\nFeel free to reopen this issue if you need further explanations.", "Hi @suzyahyah, I have created a Pull Request to filter out that warning message in this specific case, since the behavior is as expected and the warning message can only cause confusion for users (as in your case)." ]
1,624,992,923,000
1,625,123,339,000
1,625,038,549,000
NONE
null
When applying bertscore out of the box, ```Some weights of the model checkpoint at roberta-large were not used when initializing RobertaModel: ['lm_head.decoder.weight', 'lm_head.bias', 'lm_head.dense.bias', 'lm_head.layer_norm.bias', 'lm_head.dense.weight', 'lm_head.layer_norm.weight']``` Following the typical usage from https://huggingface.co/docs/datasets/loading_metrics.html ``` from datasets import load_metric metric = load_metric('bertscore') # Example of typical usage for batch in dataset: inputs, references = batch predictions = model(inputs) metric.add_batch(predictions=predictions, references=references) score = metric.compute(lang="en") #score = metric.compute(model_type="roberta-large") # gives the same error ``` I am concerned about this because my usage shouldn't require any further fine-tuning and most people would expect to use BertScore out of the box? I realised the huggingface code is a wrapper around https://github.com/Tiiiger/bert_score, but I think this repo is anyway relying on the model code and weights from huggingface repo.... ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.4.0-1041-aws-x86_64-with-glibc2.27 - Python version: 3.9.5 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2569/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2569/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2564
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2564/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2564/comments
https://api.github.com/repos/huggingface/datasets/issues/2564/events
https://github.com/huggingface/datasets/issues/2564
932,389,639
MDU6SXNzdWU5MzIzODk2Mzk=
2,564
concatenate_datasets for iterable datasets
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
open
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[]
1,624,957,181,000
1,624,957,181,000
null
MEMBER
null
Currently `concatenate_datasets` only works for map-style `Dataset`. It would be nice to have it work for `IterableDataset` objects as well. It would simply chain the iterables of the iterable datasets.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2564/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2564/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2563
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2563/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2563/comments
https://api.github.com/repos/huggingface/datasets/issues/2563/events
https://github.com/huggingface/datasets/issues/2563
932,387,639
MDU6SXNzdWU5MzIzODc2Mzk=
2,563
interleave_datasets for map-style datasets
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[]
1,624,957,044,000
1,625,132,013,000
1,625,132,013,000
MEMBER
null
Currently the `interleave_datasets` functions only works for `IterableDataset`. Let's make it work for map-style `Dataset` objects as well. It would work the same way: either alternate between the datasets in order or randomly given probabilities specified by the user.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2563/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2563/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2561
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2561/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2561/comments
https://api.github.com/repos/huggingface/datasets/issues/2561/events
https://github.com/huggingface/datasets/issues/2561
932,321,725
MDU6SXNzdWU5MzIzMjE3MjU=
2,561
Existing cache for local dataset builder file updates is ignored with `ignore_verifications=True`
{ "login": "apsdehal", "id": 3616806, "node_id": "MDQ6VXNlcjM2MTY4MDY=", "avatar_url": "https://avatars.githubusercontent.com/u/3616806?v=4", "gravatar_id": "", "url": "https://api.github.com/users/apsdehal", "html_url": "https://github.com/apsdehal", "followers_url": "https://api.github.com/users/apsdehal/followers", "following_url": "https://api.github.com/users/apsdehal/following{/other_user}", "gists_url": "https://api.github.com/users/apsdehal/gists{/gist_id}", "starred_url": "https://api.github.com/users/apsdehal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/apsdehal/subscriptions", "organizations_url": "https://api.github.com/users/apsdehal/orgs", "repos_url": "https://api.github.com/users/apsdehal/repos", "events_url": "https://api.github.com/users/apsdehal/events{/privacy}", "received_events_url": "https://api.github.com/users/apsdehal/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi ! I just tried to reproduce what you said:\r\n- create a local builder class\r\n- use `load_dataset`\r\n- update the builder class code\r\n- use `load_dataset` again (with or without `ignore_verifications=True`)\r\nAnd it creates a new cache, as expected.\r\n\r\nWhat modifications did you do to your builder's code ?", "Hi @lhoestq. Thanks for your reply. I just did minor modifications for which it should not regenerate cache (for e.g. Adding a print statement). Overall, regardless of cache miss, there should be an explicit option to allow reuse of existing cache if author knows cache shouldn't be affected.", "The cache is based on the hash of the dataset builder's code, so changing the code makes it recompute the cache.\r\n\r\nYou could still rename the cache directory of your previous computation to the new expected cache directory if you want to avoid having to recompute it and if you're sure that it would generate the exact same result.\r\n\r\nThe verifications are data integrity verifications: it checks the checksums of the downloaded files, as well as the size of the generated splits.", "Hi @apsdehal,\r\n\r\nIf you decide to follow @lhoestq's suggestion to rename the cache directory of your previous computation to the new expected cache directory, you can do the following to get the name of the new expected cache directory once #2500 is merged:\r\n```python\r\nfrom datasets import load_dataset_builder\r\ndataset_builder = load_dataset_builder(\"path/to/your/dataset\")\r\nprint(dataset_builder.cache_dir)\r\n```\r\n\r\nThis way, you don't have to recompute the hash of the dataset script yourself each time you modify the script." ]
1,624,952,583,000
1,625,057,724,000
null
NONE
null
## Describe the bug If i have local file defining a dataset builder class and I load it using `load_dataset` functionality, the existing cache is ignored whenever the file is update even with `ignore_verifications=True`. This slows down debugging and cache generator for very large datasets. ## Steps to reproduce the bug - Create a local dataset builder class - load the local builder class file using `load_dataset` and let the cache build - update the file's content - The cache should rebuilt. ## Expected results With `ignore_verifications=True`, `load_dataset` should pick up existing cache. ## Actual results Creates new cache. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.8.0 - Platform: Linux-5.4.0-52-generic-x86_64-with-debian-bullseye-sid - Python version: 3.7.7 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2561/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2561/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2559
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2559/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2559/comments
https://api.github.com/repos/huggingface/datasets/issues/2559/events
https://github.com/huggingface/datasets/issues/2559
931,849,724
MDU6SXNzdWU5MzE4NDk3MjQ=
2,559
Memory usage consistently increases when processing a dataset with `.map`
{ "login": "apsdehal", "id": 3616806, "node_id": "MDQ6VXNlcjM2MTY4MDY=", "avatar_url": "https://avatars.githubusercontent.com/u/3616806?v=4", "gravatar_id": "", "url": "https://api.github.com/users/apsdehal", "html_url": "https://github.com/apsdehal", "followers_url": "https://api.github.com/users/apsdehal/followers", "following_url": "https://api.github.com/users/apsdehal/following{/other_user}", "gists_url": "https://api.github.com/users/apsdehal/gists{/gist_id}", "starred_url": "https://api.github.com/users/apsdehal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/apsdehal/subscriptions", "organizations_url": "https://api.github.com/users/apsdehal/orgs", "repos_url": "https://api.github.com/users/apsdehal/repos", "events_url": "https://api.github.com/users/apsdehal/events{/privacy}", "received_events_url": "https://api.github.com/users/apsdehal/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi ! Can you share the function you pass to `map` ?\r\nI know you mentioned it would be hard to share some code but this would really help to understand what happened" ]
1,624,905,118,000
1,624,956,180,000
null
NONE
null
## Describe the bug I have a HF dataset with image paths stored in it and I am trying to load those image paths using `.map` with `num_proc=80`. I am noticing that the memory usage consistently keeps on increasing with time. I tried using `DEFAULT_WRITER_BATCH_SIZE=10` in the builder to decrease arrow writer's batch size but that doesn't seem to help. ## Steps to reproduce the bug Providing code as it is would be hard. I can provide a MVP if that helps. ## Expected results Memory usage should become consistent after some time following the launch of processing. ## Actual results Memory usage keeps on increasing. ## Environment info - `datasets` version: 1.8.0 - Platform: Linux-5.4.0-52-generic-x86_64-with-debian-bullseye-sid - Python version: 3.7.7 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2559/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2559/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2556
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2556/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2556/comments
https://api.github.com/repos/huggingface/datasets/issues/2556/events
https://github.com/huggingface/datasets/issues/2556
931,595,872
MDU6SXNzdWU5MzE1OTU4NzI=
2,556
Better DuplicateKeysError error to help the user debug the issue
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[]
1,624,888,257,000
1,624,888,257,000
null
MEMBER
null
As mentioned in https://github.com/huggingface/datasets/issues/2552 it would be nice to improve the error message when a dataset fails to build because there are duplicate example keys. The current one is ```python datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 48 Keys should be unique and deterministic in nature ``` and we could have something that guides the user to debugging the issue: ```python DuplicateKeysError: both 42th and 1337th examples have the same keys `48`. Please fix the dataset script at <path/to/the/dataset/script> ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2556/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2556/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2554
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2554/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2554/comments
https://api.github.com/repos/huggingface/datasets/issues/2554/events
https://github.com/huggingface/datasets/issues/2554
931,453,855
MDU6SXNzdWU5MzE0NTM4NTU=
2,554
Multilabel metrics not supported
{ "login": "GuillemGSubies", "id": 37592763, "node_id": "MDQ6VXNlcjM3NTkyNzYz", "avatar_url": "https://avatars.githubusercontent.com/u/37592763?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GuillemGSubies", "html_url": "https://github.com/GuillemGSubies", "followers_url": "https://api.github.com/users/GuillemGSubies/followers", "following_url": "https://api.github.com/users/GuillemGSubies/following{/other_user}", "gists_url": "https://api.github.com/users/GuillemGSubies/gists{/gist_id}", "starred_url": "https://api.github.com/users/GuillemGSubies/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GuillemGSubies/subscriptions", "organizations_url": "https://api.github.com/users/GuillemGSubies/orgs", "repos_url": "https://api.github.com/users/GuillemGSubies/repos", "events_url": "https://api.github.com/users/GuillemGSubies/events{/privacy}", "received_events_url": "https://api.github.com/users/GuillemGSubies/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @GuillemGSubies, thanks for reporting.\r\n\r\nI have made a PR to fix this issue and allow metrics to be computed also for multilabel classification problems.", "Looks nice, thank you very much! 🚀 ", "Sorry for reopening but I just noticed that the `_compute` method for the F1 metric is still not good enough for multilabel problems:\r\n\r\nhttps://github.com/huggingface/datasets/blob/92a3ee549705aa0a107c9fa5caf463b3b3da2616/metrics/f1/f1.py#L115\r\n\r\nSomehow we should be able to change the parameter `average` at least", "@GuillemGSubies, the parameter `average` passed to `_compute` is then passed to `f1_score`. This is right." ]
1,624,878,586,000
1,634,128,153,000
1,625,733,615,000
NONE
null
When I try to use a metric like F1 macro I get the following error: ``` TypeError: int() argument must be a string, a bytes-like object or a number, not 'list' ``` There is an explicit casting here: https://github.com/huggingface/datasets/blob/fc79f61cbbcfa0e8c68b28c0a8257f17e768a075/src/datasets/features.py#L274 And looks like this is because here https://github.com/huggingface/datasets/blob/fc79f61cbbcfa0e8c68b28c0a8257f17e768a075/metrics/f1/f1.py#L88 the features can only be integers, so we cannot use that F1 for multilabel. Instead, if I create the following F1 (ints replaced with sequence of ints), it will work: ```python class F1(datasets.Metric): def _info(self): return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("int32")), "references": datasets.Sequence(datasets.Value("int32")), } ), reference_urls=["https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"], ) def _compute(self, predictions, references, labels=None, pos_label=1, average="binary", sample_weight=None): return { "f1": f1_score( references, predictions, labels=labels, pos_label=pos_label, average=average, sample_weight=sample_weight, ), } ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2554/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2554/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2553
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2553/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2553/comments
https://api.github.com/repos/huggingface/datasets/issues/2553/events
https://github.com/huggingface/datasets/issues/2553
931,365,926
MDU6SXNzdWU5MzEzNjU5MjY=
2,553
load_dataset("web_nlg") NonMatchingChecksumError
{ "login": "alexandrethm", "id": 33730312, "node_id": "MDQ6VXNlcjMzNzMwMzEy", "avatar_url": "https://avatars.githubusercontent.com/u/33730312?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alexandrethm", "html_url": "https://github.com/alexandrethm", "followers_url": "https://api.github.com/users/alexandrethm/followers", "following_url": "https://api.github.com/users/alexandrethm/following{/other_user}", "gists_url": "https://api.github.com/users/alexandrethm/gists{/gist_id}", "starred_url": "https://api.github.com/users/alexandrethm/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alexandrethm/subscriptions", "organizations_url": "https://api.github.com/users/alexandrethm/orgs", "repos_url": "https://api.github.com/users/alexandrethm/repos", "events_url": "https://api.github.com/users/alexandrethm/events{/privacy}", "received_events_url": "https://api.github.com/users/alexandrethm/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi ! Thanks for reporting. This is due to the WebNLG repository that got updated today.\r\nI just pushed a fix at #2558 - this shouldn't happen anymore in the future.", "This is fixed on `master` now :)\r\nWe'll do a new release soon !" ]
1,624,872,406,000
1,624,901,019,000
1,624,900,996,000
NONE
null
Hi! It seems the WebNLG dataset gives a NonMatchingChecksumError. ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset('web_nlg', name="release_v3.0_en", split="dev") ``` Gives ``` NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://gitlab.com/shimorina/webnlg-dataset/-/archive/master/webnlg-dataset-master.zip'] ``` ## Environment info - `datasets` version: 1.8.0 - Platform: macOS-11.3.1-x86_64-i386-64bit - Python version: 3.9.4 - PyArrow version: 3.0.0 Also tested on Linux, with python 3.6.8
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2553/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2553/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2552
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2552/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2552/comments
https://api.github.com/repos/huggingface/datasets/issues/2552/events
https://github.com/huggingface/datasets/issues/2552
931,354,687
MDU6SXNzdWU5MzEzNTQ2ODc=
2,552
Keys should be unique error on code_search_net
{ "login": "thomwolf", "id": 7353373, "node_id": "MDQ6VXNlcjczNTMzNzM=", "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "gravatar_id": "", "url": "https://api.github.com/users/thomwolf", "html_url": "https://github.com/thomwolf", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "repos_url": "https://api.github.com/users/thomwolf/repos", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Two questions:\r\n- with `datasets-cli env` we don't have any information on the dataset script version used. Should we give access to this somehow? Either as a note in the Error message or as an argument with the name of the dataset to `datasets-cli env`?\r\n- I don't really understand why the id is duplicated in the code of `code_search_net`, how can I debug this actually?", "Thanks for reporting. There was indeed an issue with the keys. The key was the addition of the file id and row id, which resulted in collisions. I just opened a PR to fix this at https://github.com/huggingface/datasets/pull/2555\r\n\r\nTo help users debug this kind of errors we could try to show a message like this\r\n```python\r\nDuplicateKeysError: both 42th and 1337th examples have the same keys `48`.\r\nPlease fix the dataset script at <path/to/the/dataset/script>\r\n```\r\n\r\nThis way users who what to look for if they want to debug this issue. I opened an issue to track this: https://github.com/huggingface/datasets/issues/2556", "and are we sure there are not a lot of datasets which are now broken with this change?", "Thanks to the dummy data, we know for sure that most of them work as expected.\r\n`code_search_net` wasn't caught because the dummy data only have one dummy data file while the dataset script can actually load several of them using `os.listdir`. Let me take a look at all the other datasets that use `os.listdir` to see if the keys are alright", "I found one issue on `fever` (PR here: https://github.com/huggingface/datasets/pull/2557)\r\nAll the other ones seem fine :)", "Hi! Got same error when loading other dataset:\r\n```python3\r\nload_dataset('wikicorpus', 'raw_en')\r\n```\r\n\r\ntb:\r\n```pytb\r\n---------------------------------------------------------------------------\r\nDuplicatedKeysError Traceback (most recent call last)\r\n/opt/conda/lib/python3.8/site-packages/datasets/builder.py in _prepare_split(self, split_generator)\r\n 1109 example = self.info.features.encode_example(record)\r\n-> 1110 writer.write(example, key)\r\n 1111 finally:\r\n\r\n/opt/conda/lib/python3.8/site-packages/datasets/arrow_writer.py in write(self, example, key, writer_batch_size)\r\n 341 if self._check_duplicates:\r\n--> 342 self.check_duplicate_keys()\r\n 343 # Re-intializing to empty list for next batch\r\n\r\n/opt/conda/lib/python3.8/site-packages/datasets/arrow_writer.py in check_duplicate_keys(self)\r\n 352 if hash in tmp_record:\r\n--> 353 raise DuplicatedKeysError(key)\r\n 354 else:\r\n\r\nDuplicatedKeysError: FAILURE TO GENERATE DATASET !\r\nFound duplicate Key: 519\r\nKeys should be unique and deterministic in nature\r\n```\r\n\r\nVersion: datasets==1.11.0", "Fixed by #2555.", "The wikicorpus issue has been fixed by https://github.com/huggingface/datasets/pull/2844\r\n\r\nWe'll do a new release of `datasets` soon :)" ]
1,624,871,720,000
1,630,937,310,000
1,630,571,129,000
MEMBER
null
## Describe the bug Loading `code_search_net` seems not possible at the moment. ## Steps to reproduce the bug ```python >>> load_dataset('code_search_net') Downloading: 8.50kB [00:00, 3.09MB/s] Downloading: 19.1kB [00:00, 10.1MB/s] No config specified, defaulting to: code_search_net/all Downloading and preparing dataset code_search_net/all (download: 4.77 GiB, generated: 5.99 GiB, post-processed: Unknown size, total: 10.76 GiB) to /Users/thomwolf/.cache/huggingface/datasets/code_search_net/all/1.0.0/b3e8278faf5d67da1d06981efbeac3b76a2900693bd2239bbca7a4a3b0d6e52a... Traceback (most recent call last): File "/Users/thomwolf/Documents/GitHub/datasets/src/datasets/builder.py", line 1067, in _prepare_split writer.write(example, key) File "/Users/thomwolf/Documents/GitHub/datasets/src/datasets/arrow_writer.py", line 343, in write self.check_duplicate_keys() File "/Users/thomwolf/Documents/GitHub/datasets/src/datasets/arrow_writer.py", line 354, in check_duplicate_keys raise DuplicatedKeysError(key) datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 48 Keys should be unique and deterministic in nature ``` ## Environment info - `datasets` version: 1.8.1.dev0 - Platform: macOS-10.15.7-x86_64-i386-64bit - Python version: 3.8.5 - PyArrow version: 2.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2552/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2552/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2550
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2550/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2550/comments
https://api.github.com/repos/huggingface/datasets/issues/2550/events
https://github.com/huggingface/datasets/issues/2550
930,951,287
MDU6SXNzdWU5MzA5NTEyODc=
2,550
Allow for incremental cumulative metric updates in a distributed setup
{ "login": "eladsegal", "id": 13485709, "node_id": "MDQ6VXNlcjEzNDg1NzA5", "avatar_url": "https://avatars.githubusercontent.com/u/13485709?v=4", "gravatar_id": "", "url": "https://api.github.com/users/eladsegal", "html_url": "https://github.com/eladsegal", "followers_url": "https://api.github.com/users/eladsegal/followers", "following_url": "https://api.github.com/users/eladsegal/following{/other_user}", "gists_url": "https://api.github.com/users/eladsegal/gists{/gist_id}", "starred_url": "https://api.github.com/users/eladsegal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eladsegal/subscriptions", "organizations_url": "https://api.github.com/users/eladsegal/orgs", "repos_url": "https://api.github.com/users/eladsegal/repos", "events_url": "https://api.github.com/users/eladsegal/events{/privacy}", "received_events_url": "https://api.github.com/users/eladsegal/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[]
1,624,806,058,000
1,632,663,759,000
1,632,663,759,000
CONTRIBUTOR
null
Currently, using a metric allows for one of the following: - Per example/batch metrics - Cumulative metrics over the whole data What I'd like is to have an efficient way to get cumulative metrics over the examples/batches added so far, in order to display it as part of the progress bar during training/evaluation. Since most metrics are just an average of per-example metrics (which aren't?), an efficient calculation can be done as follows: `((score_cumulative * n_cumulative) + (score_new * n_new)) / (n_cumulative+ n_new)` where `n` and `score` refer to number of examples and metric score, `cumulative` refers to the cumulative metric and `new` refers to the addition of new examples. If you don't want to add this capability in the library, a simple solution exists so users can do it themselves: It is easy to implement for a single process setup, but in a distributed one there is no way to get the correct `n_new`. The solution for this is to return the number of examples that was used to compute the metrics in `.compute()` by adding the following line here: https://github.com/huggingface/datasets/blob/5a3221785311d0ce86c2785b765e86bd6997d516/src/datasets/metric.py#L402-L403 ``` output["number_of_examples"] = len(predictions) ``` and also remove the log message here so it won't spam: https://github.com/huggingface/datasets/blob/3db67f5ff6cbf807b129d2b4d1107af27623b608/src/datasets/metric.py#L411 If this change is ok with you, I'll open a pull request.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2550/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2550/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2549
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2549/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2549/comments
https://api.github.com/repos/huggingface/datasets/issues/2549/events
https://github.com/huggingface/datasets/issues/2549
929,819,093
MDU6SXNzdWU5Mjk4MTkwOTM=
2,549
Handling unlabeled datasets
{ "login": "nelson-liu", "id": 7272031, "node_id": "MDQ6VXNlcjcyNzIwMzE=", "avatar_url": "https://avatars.githubusercontent.com/u/7272031?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nelson-liu", "html_url": "https://github.com/nelson-liu", "followers_url": "https://api.github.com/users/nelson-liu/followers", "following_url": "https://api.github.com/users/nelson-liu/following{/other_user}", "gists_url": "https://api.github.com/users/nelson-liu/gists{/gist_id}", "starred_url": "https://api.github.com/users/nelson-liu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nelson-liu/subscriptions", "organizations_url": "https://api.github.com/users/nelson-liu/orgs", "repos_url": "https://api.github.com/users/nelson-liu/repos", "events_url": "https://api.github.com/users/nelson-liu/events{/privacy}", "received_events_url": "https://api.github.com/users/nelson-liu/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "Hi @nelson-liu,\r\n\r\nYou can pass the parameter `features` to `load_dataset`: https://huggingface.co/docs/datasets/_modules/datasets/load.html#load_dataset\r\n\r\nIf you look at the code of the MNLI script you referred in your question (https://github.com/huggingface/datasets/blob/master/datasets/multi_nli/multi_nli.py#L62-L77), you can see how the Features were originally specified. \r\n\r\nFeel free to use it as a template, customize it and pass it to `load_dataset` using the parameter `features`.", "ah got it, thanks!" ]
1,624,595,543,000
1,624,655,277,000
1,624,655,276,000
NONE
null
Hi! Is there a way for datasets to produce unlabeled instances (e.g., the `ClassLabel` can be nullable). For example, I want to use the MNLI dataset reader ( https://github.com/huggingface/datasets/blob/master/datasets/multi_nli/multi_nli.py ) on a file that doesn't have the `gold_label` field. I tried setting `"label": data.get("gold_label")`, but got the following error: ``` File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/load.py", line 748, in load_dataset use_auth_token=use_auth_token, File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/builder.py", line 575, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/builder.py", line 652, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/builder.py", line 989, in _prepare_split example = self.info.features.encode_example(record) File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 953, in encode_example return encode_nested_example(self, example) File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 848, in encode_nested_example k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj) File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 848, in <dictcomp> k: encode_nested_example(sub_schema, sub_obj) for k, (sub_schema, sub_obj) in utils.zip_dict(schema, obj) File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 875, in encode_nested_example return schema.encode_example(obj) File "/home/nfliu/miniconda3/envs/debias/lib/python3.7/site-packages/datasets/features.py", line 653, in encode_example if not -1 <= example_data < self.num_classes: TypeError: '<=' not supported between instances of 'int' and 'NoneType' ``` What's the proper way to handle reading unlabeled datasets, especially for downstream usage with Transformers?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2549/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2549/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2548
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2548/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2548/comments
https://api.github.com/repos/huggingface/datasets/issues/2548/events
https://github.com/huggingface/datasets/issues/2548
929,232,831
MDU6SXNzdWU5MjkyMzI4MzE=
2,548
Field order issue in loading json
{ "login": "luyug", "id": 55288513, "node_id": "MDQ6VXNlcjU1Mjg4NTEz", "avatar_url": "https://avatars.githubusercontent.com/u/55288513?v=4", "gravatar_id": "", "url": "https://api.github.com/users/luyug", "html_url": "https://github.com/luyug", "followers_url": "https://api.github.com/users/luyug/followers", "following_url": "https://api.github.com/users/luyug/following{/other_user}", "gists_url": "https://api.github.com/users/luyug/gists{/gist_id}", "starred_url": "https://api.github.com/users/luyug/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/luyug/subscriptions", "organizations_url": "https://api.github.com/users/luyug/orgs", "repos_url": "https://api.github.com/users/luyug/repos", "events_url": "https://api.github.com/users/luyug/events{/privacy}", "received_events_url": "https://api.github.com/users/luyug/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @luyug, thanks for reporting.\r\n\r\nThe good news is that we fixed this issue only 9 days ago: #2507.\r\n\r\nThe patch is already in the master branch of our repository and it will be included in our next `datasets` release version 1.9.0.\r\n\r\nFeel free to reopen the issue if the problem persists." ]
1,624,541,393,000
1,624,545,403,000
1,624,545,245,000
NONE
null
## Describe the bug The `load_dataset` function expects columns in alphabetical order when loading json files. Similar bug was previously reported for csv in #623 and fixed in #684. ## Steps to reproduce the bug For a json file `j.json`, ``` {"c":321, "a": 1, "b": 2} ``` Running the following, ``` f= datasets.Features({'a': Value('int32'), 'b': Value('int32'), 'c': Value('int32')}) json_data = datasets.load_dataset('json', data_files='j.json', features=f) ``` ## Expected results A successful load. ## Actual results ``` File "pyarrow/table.pxi", line 1409, in pyarrow.lib.Table.cast ValueError: Target schema's field names are not matching the table's field names: ['c', 'a', 'b'], ['a', 'b', 'c'] ``` ## Environment info - `datasets` version: 1.8.0 - Platform: Linux-3.10.0-957.1.3.el7.x86_64-x86_64-with-glibc2.10 - Python version: 3.8.8 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2548/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2548/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2547
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2547/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2547/comments
https://api.github.com/repos/huggingface/datasets/issues/2547/events
https://github.com/huggingface/datasets/issues/2547
929,192,329
MDU6SXNzdWU5MjkxOTIzMjk=
2,547
Dataset load_from_disk is too slow
{ "login": "alexvaca0", "id": 35173563, "node_id": "MDQ6VXNlcjM1MTczNTYz", "avatar_url": "https://avatars.githubusercontent.com/u/35173563?v=4", "gravatar_id": "", "url": "https://api.github.com/users/alexvaca0", "html_url": "https://github.com/alexvaca0", "followers_url": "https://api.github.com/users/alexvaca0/followers", "following_url": "https://api.github.com/users/alexvaca0/following{/other_user}", "gists_url": "https://api.github.com/users/alexvaca0/gists{/gist_id}", "starred_url": "https://api.github.com/users/alexvaca0/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/alexvaca0/subscriptions", "organizations_url": "https://api.github.com/users/alexvaca0/orgs", "repos_url": "https://api.github.com/users/alexvaca0/repos", "events_url": "https://api.github.com/users/alexvaca0/events{/privacy}", "received_events_url": "https://api.github.com/users/alexvaca0/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi ! It looks like an issue with the virtual disk you are using.\r\n\r\nWe load datasets using memory mapping. In general it makes it possible to load very big files instantaneously since it doesn't have to read the file (it just assigns virtual memory to the file on disk).\r\nHowever there happens to be issues with virtual disks (for example on spot instances), for which memory mapping does a pass over the entire file, and this takes a while. We are discussing about this issue here: #2252 \r\n\r\nMemory mapping is something handled by the OS so we can't do much about it, though we're still trying to figure out what's causing this behavior exactly to see what we can do.", "Okay, that's exactly my case, with spot instances... Therefore this isn't something we can change in any way to be able to load the dataset faster? I mean, what do you do internally at huggingface for being able to use spot instances with datasets efficiently?", "There are no solutions yet unfortunately.\r\nWe're still trying to figure out a way to make the loading instantaneous on such disks, I'll keep you posted" ]
1,624,538,744,000
1,624,632,998,000
null
NONE
null
@lhoestq ## Describe the bug It's not normal that I have to wait 7-8 hours for a dataset to be loaded from disk, as there are no preprocessing steps, it's only loading it with load_from_disk. I have 96 cpus, however only 1 is used for this, which is inefficient. Moreover, its usage is at 1%... This is happening in the context of a language model training, therefore I'm wasting 100$ each time I have to load the dataset from disk again (because the spot instance was stopped by aws and I need to relaunch it for example). ## Steps to reproduce the bug Just get the oscar in spanish (around 150GGB) and try to first save in disk and then load the processed dataset. It's not dependent on the task you're doing, it just depends on the size of the text dataset. ## Expected results I expect the dataset to be loaded in a normal time, by using the whole machine for loading it, I mean if you store the dataset in multiple files (.arrow) and then load it from multiple files, you can use multiprocessing for that and therefore don't waste so much time. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.8.0 - Platform: Ubuntu 18 - Python version: 3.8 I've seen you're planning to include a streaming mode for load_dataset, but that only saves the downloading and processing time, that's not being a problem for me, you cannot save the pure loading from disk time, therefore that's not a solution for my use case or for anyone who wants to use your library for training a language model.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2547/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2547/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2543
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2543/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2543/comments
https://api.github.com/repos/huggingface/datasets/issues/2543/events
https://github.com/huggingface/datasets/issues/2543
928,571,915
MDU6SXNzdWU5Mjg1NzE5MTU=
2,543
switching some low-level log.info's to log.debug?
{ "login": "stas00", "id": 10676103, "node_id": "MDQ6VXNlcjEwNjc2MTAz", "avatar_url": "https://avatars.githubusercontent.com/u/10676103?v=4", "gravatar_id": "", "url": "https://api.github.com/users/stas00", "html_url": "https://github.com/stas00", "followers_url": "https://api.github.com/users/stas00/followers", "following_url": "https://api.github.com/users/stas00/following{/other_user}", "gists_url": "https://api.github.com/users/stas00/gists{/gist_id}", "starred_url": "https://api.github.com/users/stas00/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/stas00/subscriptions", "organizations_url": "https://api.github.com/users/stas00/orgs", "repos_url": "https://api.github.com/users/stas00/repos", "events_url": "https://api.github.com/users/stas00/events{/privacy}", "received_events_url": "https://api.github.com/users/stas00/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @stas00, thanks for pointing out this issue with logging.\r\n\r\nI agree that `datasets` can sometimes be too verbose... I can create a PR and we could discuss there the choice of the log levels for different parts of the code." ]
1,624,476,415,000
1,624,628,419,000
1,624,628,419,000
CONTRIBUTOR
null
In https://github.com/huggingface/transformers/pull/12276 we are now changing the examples to have `datasets` on the same log level as `transformers`, so that one setting can do a consistent logging across all involved components. The trouble is that now we get a ton of these: ``` 06/23/2021 12:15:31 - INFO - datasets.utils.filelock - Lock 139627640431136 acquired on /home/stas/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow.lock 06/23/2021 12:15:31 - INFO - datasets.arrow_writer - Done writing 50 examples in 12280 bytes /home/stas/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow. 06/23/2021 12:15:31 - INFO - datasets.arrow_dataset - Set __getitem__(key) output type to python objects for no columns (when key is int or slice) and don't output other (un-formatted) columns. 06/23/2021 12:15:31 - INFO - datasets.utils.filelock - Lock 139627640431136 released on /home/stas/.cache/huggingface/metrics/sacrebleu/default/default_experiment-1-0.arrow.lock ``` May I suggest that these can be `log.debug` as it's no informative to the user. More examples: these are not informative - too much information: ``` 06/23/2021 12:14:26 - INFO - datasets.load - Checking /home/stas/.cache/huggingface/datasets/downloads/459933f1fe47711fad2f6ff8110014ff189120b45ad159ef5b8e90ea43a174fa.e23e7d1259a8c6274a82a42a8936dd1b87225302c6dc9b7261beb3bc2daac640.py for additional imports. 06/23/2021 12:14:27 - INFO - datasets.builder - Constructing Dataset for split train, validation, test, from /home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a ``` While these are: ``` 06/23/2021 12:14:27 - INFO - datasets.info - Loading Dataset Infos from /home/stas/.cache/huggingface/modules/datasets_modules/datasets/wmt16/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a 06/23/2021 12:14:27 - WARNING - datasets.builder - Reusing dataset wmt16 (/home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a) ``` I also realize that `transformers` examples don't have do use `info` for `datasets` to let the default `warning` keep logging to less noisy. But I think currently the log levels are slightly misused and skewed by 1 level. Many `warnings` will better be `info`s and most `info`s be `debug`. e.g.: ``` 06/23/2021 12:14:27 - WARNING - datasets.builder - Reusing dataset wmt16 (/home/stas/.cache/huggingface/datasets/wmt16/ro-en/1.0.0/0d9fb3e814712c785176ad8cdb9f465fbe6479000ee6546725db30ad8a8b5f8a) ``` why is this a warning? it is informing me that the cache is used, there is nothing to be worried about. I'd have it as `info`. Warnings are typically something that's bordering error or the first thing to check when things don't work as expected. infrequent info is there to inform of the different stages or important events. Everything else is debug. At least the way I understand things.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2543/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2543/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2542
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2542/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2542/comments
https://api.github.com/repos/huggingface/datasets/issues/2542/events
https://github.com/huggingface/datasets/issues/2542
928,540,382
MDU6SXNzdWU5Mjg1NDAzODI=
2,542
`datasets.keyhash.DuplicatedKeysError` for `drop` and `adversarial_qa/adversarialQA`
{ "login": "VictorSanh", "id": 16107619, "node_id": "MDQ6VXNlcjE2MTA3NjE5", "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/VictorSanh", "html_url": "https://github.com/VictorSanh", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "repos_url": "https://api.github.com/users/VictorSanh/repos", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "very much related: https://github.com/huggingface/datasets/pull/2333", "Hi @VictorSanh, thank you for reporting this issue with duplicated keys.\r\n\r\n- The issue with \"adversarial_qa\" was fixed 23 days ago: #2433. Current version of `datasets` (1.8.0) includes the patch.\r\n- I am investigating the issue with `drop`. I'll ping you to keep you informed.", "Hi @VictorSanh, the issue is already fixed and merged into master branch and will be included in our next release version 1.9.0.", "thank you!" ]
1,624,473,676,000
1,624,657,805,000
1,624,546,628,000
MEMBER
null
## Describe the bug Failure to generate the datasets (`drop` and subset `adversarialQA` from `adversarial_qa`) because of duplicate keys. ## Steps to reproduce the bug ```python from datasets import load_dataset load_dataset("drop") load_dataset("adversarial_qa", "adversarialQA") ``` ## Expected results The examples keys should be unique. ## Actual results ```bash >>> load_dataset("drop") Using custom data configuration default Downloading and preparing dataset drop/default (download: 7.92 MiB, generated: 111.88 MiB, post-processed: Unknown size, total: 119.80 MiB) to /home/hf/.cache/huggingface/datasets/drop/default/0.1.0/7a94f1e2bb26c4b5c75f89857c06982967d7416e5af935a9374b9bccf5068026... Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/load.py", line 751, in load_dataset use_auth_token=use_auth_token, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 575, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 652, in _download_and_prepare self._prepare_split(split_generator, **prepare_split_kwargs) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 992, in _prepare_split num_examples, num_bytes = writer.finalize() File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/arrow_writer.py", line 409, in finalize self.check_duplicate_keys() File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/arrow_writer.py", line 349, in check_duplicate_keys raise DuplicatedKeysError(key) datasets.keyhash.DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 28553293-d719-441b-8f00-ce3dc6df5398 Keys should be unique and deterministic in nature ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.7.0 - Platform: Linux-5.4.0-1044-gcp-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2542/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2542/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2538
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2538/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2538/comments
https://api.github.com/repos/huggingface/datasets/issues/2538/events
https://github.com/huggingface/datasets/issues/2538
927,940,691
MDU6SXNzdWU5Mjc5NDA2OTE=
2,538
Loading partial dataset when debugging
{ "login": "reachtarunhere", "id": 9061913, "node_id": "MDQ6VXNlcjkwNjE5MTM=", "avatar_url": "https://avatars.githubusercontent.com/u/9061913?v=4", "gravatar_id": "", "url": "https://api.github.com/users/reachtarunhere", "html_url": "https://github.com/reachtarunhere", "followers_url": "https://api.github.com/users/reachtarunhere/followers", "following_url": "https://api.github.com/users/reachtarunhere/following{/other_user}", "gists_url": "https://api.github.com/users/reachtarunhere/gists{/gist_id}", "starred_url": "https://api.github.com/users/reachtarunhere/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/reachtarunhere/subscriptions", "organizations_url": "https://api.github.com/users/reachtarunhere/orgs", "repos_url": "https://api.github.com/users/reachtarunhere/repos", "events_url": "https://api.github.com/users/reachtarunhere/events{/privacy}", "received_events_url": "https://api.github.com/users/reachtarunhere/received_events", "type": "User", "site_admin": false }
[]
open
false
null
[]
null
[ "Hi ! `load_dataset` downloads the full dataset once and caches it, so that subsequent calls to `load_dataset` just reloads the dataset from your disk.\r\nThen when you specify a `split` in `load_dataset`, it will just load the requested split from the disk. If your specified split is a sliced split (e.g. `\"train[:10]\"`), then it will load the 10 first rows of the train split that you have on disk.\r\n\r\nTherefore, as long as you don't delete your cache, all your calls to `load_dataset` will be very fast. Except the first call that downloads the dataset of course ^^", "That’s a use case for the new streaming feature, no?", "Hi @reachtarunhere.\r\n\r\nBesides the above insights provided by @lhoestq and @thomwolf, there is also a Dataset feature in progress (I plan to finish it this week): #2249, which will allow you, when calling `load_dataset`, to pass the option to download/preprocess/cache only some specific split(s), which will definitely speed up your workflow.\r\n\r\nIf this feature is interesting for you, I can ping you once it will be merged into the master branch.", "Thanks all for responding.\r\n\r\nHey @albertvillanova \r\n\r\nThanks. Yes, I would be interested.\r\n\r\n@lhoestq I think even if a small split is specified it loads up the full dataset from the disk (please correct me if this is not the case). Because it does seem to be slow to me even on subsequent calls. There is no repeated downloading so it seems that the cache is working.\r\n\r\nI am not aware of the streaming feature @thomwolf mentioned. So I might need to read up on it.", "@reshinthadithyan I use the .select function to have a fraction of indices." ]
1,624,432,792,000
1,627,567,833,000
null
NONE
null
I am using PyTorch Lightning along with datasets (thanks for so many datasets already prepared and the great splits). Every time I execute load_dataset for the imdb dataset it takes some time even if I specify a split involving very few samples. I guess this due to hashing as per the other issues. Is there a way to only load part of the dataset on load_dataset? This would really speed up my workflow. Something like a debug mode would really help. Thanks!
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2538/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2538/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2536
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2536/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2536/comments
https://api.github.com/repos/huggingface/datasets/issues/2536/events
https://github.com/huggingface/datasets/issues/2536
927,338,639
MDU6SXNzdWU5MjczMzg2Mzk=
2,536
Use `Audio` features for `AutomaticSpeechRecognition` task template
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false } ]
null
[ "I'm just retaking and working on #2324. 😉 " ]
1,624,374,441,000
1,624,375,011,000
null
MEMBER
null
In #2533 we added a task template for speech recognition that relies on the file paths to the audio files. As pointed out by @SBrandeis this is brittle as it doesn't port easily across different OS'. The solution is to use dedicated `Audio` features when casting the dataset. These features are not yet available in `datasets`, but should be included in the `AutomaticSpeechRecognition` template once they are.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2536/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2536/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2532
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2532/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2532/comments
https://api.github.com/repos/huggingface/datasets/issues/2532/events
https://github.com/huggingface/datasets/issues/2532
927,063,196
MDU6SXNzdWU5MjcwNjMxOTY=
2,532
Tokenizer's normalization preprocessor cause misalignment in return_offsets_mapping for tokenizer classification task
{ "login": "jerryIsHere", "id": 50871412, "node_id": "MDQ6VXNlcjUwODcxNDEy", "avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jerryIsHere", "html_url": "https://github.com/jerryIsHere", "followers_url": "https://api.github.com/users/jerryIsHere/followers", "following_url": "https://api.github.com/users/jerryIsHere/following{/other_user}", "gists_url": "https://api.github.com/users/jerryIsHere/gists{/gist_id}", "starred_url": "https://api.github.com/users/jerryIsHere/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jerryIsHere/subscriptions", "organizations_url": "https://api.github.com/users/jerryIsHere/orgs", "repos_url": "https://api.github.com/users/jerryIsHere/repos", "events_url": "https://api.github.com/users/jerryIsHere/events{/privacy}", "received_events_url": "https://api.github.com/users/jerryIsHere/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @jerryIsHere, thanks for reporting the issue. But are you sure this is a bug in HuggingFace **Datasets**?", "> Hi @jerryIsHere, thanks for reporting the issue. But are you sure this is a bug in HuggingFace **Datasets**?\r\n\r\nOh, I am sorry\r\nI would reopen the post on huggingface/transformers" ]
1,624,356,498,000
1,624,425,445,000
1,624,425,445,000
CONTRIBUTOR
null
[This colab notebook](https://colab.research.google.com/drive/151gKyo0YIwnlznrOHst23oYH_a3mAe3Z?usp=sharing) implements a token classification input pipeline extending the logic from [this hugging example](https://huggingface.co/transformers/custom_datasets.html#tok-ner). The pipeline works fine with most instance in different languages, but unfortunately, [the Japanese Kana ligature (a form of abbreviation? I don't know Japanese well)](https://en.wikipedia.org/wiki/Kana_ligature) break the alignment of `return_offsets_mapping`: ![image](https://user-images.githubusercontent.com/50871412/122904371-db192700-d382-11eb-8917-1775db76db69.png) Without the try catch block, it riase `ValueError: NumPy boolean array indexing assignment cannot assign 88 input values to the 87 output values where the mask is true`, example shown here [(another colab notebook)](https://colab.research.google.com/drive/1MmOqf3ppzzdKKyMWkn0bJy6DqzOO0SSm?usp=sharing) It is clear that the normalizer is the process that break the alignment, as it is observed that `tokenizer._tokenizer.normalizer.normalize_str('ヿ')` return 'コト'. One workaround is to include `tokenizer._tokenizer.normalizer.normalize_str` before the tokenizer preprocessing pipeline, which is also provided in the [first colab notebook](https://colab.research.google.com/drive/151gKyo0YIwnlznrOHst23oYH_a3mAe3Z?usp=sharing) with the name `udposTestDatasetWorkaround`. I guess similar logics should be included inside the tokenizer and the offsets_mapping generation process such that user don't need to include them in their code. But I don't understand the code of tokenizer well that I think I am not able to do this. p.s. **I am using my own dataset building script in the provided example, but the script should be equivalent to the changes made by this [update](https://github.com/huggingface/datasets/pull/2466)** `get_dataset `is just a simple wrapping for `load_dataset` and the `tokenizer` is just `XLMRobertaTokenizerFast.from_pretrained("xlm-roberta-large")`
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2532/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2532/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2528
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2528/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2528/comments
https://api.github.com/repos/huggingface/datasets/issues/2528/events
https://github.com/huggingface/datasets/issues/2528
926,314,656
MDU6SXNzdWU5MjYzMTQ2NTY=
2,528
Logging cannot be set to NOTSET similar to transformers
{ "login": "joshzwiebel", "id": 34662010, "node_id": "MDQ6VXNlcjM0NjYyMDEw", "avatar_url": "https://avatars.githubusercontent.com/u/34662010?v=4", "gravatar_id": "", "url": "https://api.github.com/users/joshzwiebel", "html_url": "https://github.com/joshzwiebel", "followers_url": "https://api.github.com/users/joshzwiebel/followers", "following_url": "https://api.github.com/users/joshzwiebel/following{/other_user}", "gists_url": "https://api.github.com/users/joshzwiebel/gists{/gist_id}", "starred_url": "https://api.github.com/users/joshzwiebel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/joshzwiebel/subscriptions", "organizations_url": "https://api.github.com/users/joshzwiebel/orgs", "repos_url": "https://api.github.com/users/joshzwiebel/repos", "events_url": "https://api.github.com/users/joshzwiebel/events{/privacy}", "received_events_url": "https://api.github.com/users/joshzwiebel/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @joshzwiebel, thanks for reporting. We are going to align with `transformers`." ]
1,624,287,894,000
1,624,545,767,000
1,624,545,767,000
NONE
null
## Describe the bug In the transformers library you can set the verbosity level to logging.NOTSET to work around the usage of tqdm and IPywidgets, however in Datasets this is no longer possible. This is because transformers set the verbosity level of tqdm with [this](https://github.com/huggingface/transformers/blob/b53bc55ba9bb10d5ee279eab51a2f0acc5af2a6b/src/transformers/file_utils.py#L1449) `disable=bool(logging.get_verbosity() == logging.NOTSET)` and datasets accomplishes this like [so](https://github.com/huggingface/datasets/blob/83554e410e1ab8c6f705cfbb2df7953638ad3ac1/src/datasets/utils/file_utils.py#L493) `not_verbose = bool(logger.getEffectiveLevel() > WARNING)` ## Steps to reproduce the bug ```python import datasets import logging datasets.logging.get_verbosity = lambda : logging.NOTSET datasets.load_dataset("patrickvonplaten/librispeech_asr_dummy") ``` ## Expected results The code should download and load the dataset as normal without displaying progress bars ## Actual results ```ImportError Traceback (most recent call last) <ipython-input-4-aec65c0509c6> in <module> ----> 1 datasets.load_dataset("patrickvonplaten/librispeech_asr_dummy") ~/venv/lib/python3.7/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, **config_kwargs) 713 dataset=True, 714 return_resolved_file_path=True, --> 715 use_auth_token=use_auth_token, 716 ) 717 # Set the base path for downloads as the parent of the script location ~/venv/lib/python3.7/site-packages/datasets/load.py in prepare_module(path, script_version, download_config, download_mode, dataset, force_local_path, dynamic_modules_path, return_resolved_file_path, **download_kwargs) 350 file_path = hf_bucket_url(path, filename=name, dataset=False) 351 try: --> 352 local_path = cached_path(file_path, download_config=download_config) 353 except FileNotFoundError: 354 raise FileNotFoundError( ~/venv/lib/python3.7/site-packages/datasets/utils/file_utils.py in cached_path(url_or_filename, download_config, **download_kwargs) 289 use_etag=download_config.use_etag, 290 max_retries=download_config.max_retries, --> 291 use_auth_token=download_config.use_auth_token, 292 ) 293 elif os.path.exists(url_or_filename): ~/venv/lib/python3.7/site-packages/datasets/utils/file_utils.py in get_from_cache(url, cache_dir, force_download, proxies, etag_timeout, resume_download, user_agent, local_files_only, use_etag, max_retries, use_auth_token) 668 headers=headers, 669 cookies=cookies, --> 670 max_retries=max_retries, 671 ) 672 ~/venv/lib/python3.7/site-packages/datasets/utils/file_utils.py in http_get(url, temp_file, proxies, resume_size, headers, cookies, timeout, max_retries) 493 initial=resume_size, 494 desc="Downloading", --> 495 disable=not_verbose, 496 ) 497 for chunk in response.iter_content(chunk_size=1024): ~/venv/lib/python3.7/site-packages/tqdm/notebook.py in __init__(self, *args, **kwargs) 217 total = self.total * unit_scale if self.total else self.total 218 self.container = self.status_printer( --> 219 self.fp, total, self.desc, self.ncols) 220 self.sp = self.display 221 ~/venv/lib/python3.7/site-packages/tqdm/notebook.py in status_printer(_, total, desc, ncols) 95 if IProgress is None: # #187 #451 #558 #872 96 raise ImportError( ---> 97 "IProgress not found. Please update jupyter and ipywidgets." 98 " See https://ipywidgets.readthedocs.io/en/stable" 99 "/user_install.html") ImportError: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.8.0 - Platform: Linux-5.4.95-42.163.amzn2.x86_64-x86_64-with-debian-10.8 - Python version: 3.7.10 - PyArrow version: 3.0.0 I am running this code on Deepnote and which important to this issue **does not** support IPywidgets
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2528/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2528/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2526
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2526/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2526/comments
https://api.github.com/repos/huggingface/datasets/issues/2526/events
https://github.com/huggingface/datasets/issues/2526
925,929,228
MDU6SXNzdWU5MjU5MjkyMjg=
2,526
Add COCO datasets
{ "login": "NielsRogge", "id": 48327001, "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "gravatar_id": "", "url": "https://api.github.com/users/NielsRogge", "html_url": "https://github.com/NielsRogge", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "repos_url": "https://api.github.com/users/NielsRogge/repos", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" }, { "id": 3608941089, "node_id": "LA_kwDODunzps7XHBIh", "url": "https://api.github.com/repos/huggingface/datasets/labels/vision", "name": "vision", "color": "bfdadc", "default": false, "description": "Vision datasets" } ]
open
false
{ "login": "merveenoyan", "id": 53175384, "node_id": "MDQ6VXNlcjUzMTc1Mzg0", "avatar_url": "https://avatars.githubusercontent.com/u/53175384?v=4", "gravatar_id": "", "url": "https://api.github.com/users/merveenoyan", "html_url": "https://github.com/merveenoyan", "followers_url": "https://api.github.com/users/merveenoyan/followers", "following_url": "https://api.github.com/users/merveenoyan/following{/other_user}", "gists_url": "https://api.github.com/users/merveenoyan/gists{/gist_id}", "starred_url": "https://api.github.com/users/merveenoyan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/merveenoyan/subscriptions", "organizations_url": "https://api.github.com/users/merveenoyan/orgs", "repos_url": "https://api.github.com/users/merveenoyan/repos", "events_url": "https://api.github.com/users/merveenoyan/events{/privacy}", "received_events_url": "https://api.github.com/users/merveenoyan/received_events", "type": "User", "site_admin": false }
[ { "login": "merveenoyan", "id": 53175384, "node_id": "MDQ6VXNlcjUzMTc1Mzg0", "avatar_url": "https://avatars.githubusercontent.com/u/53175384?v=4", "gravatar_id": "", "url": "https://api.github.com/users/merveenoyan", "html_url": "https://github.com/merveenoyan", "followers_url": "https://api.github.com/users/merveenoyan/followers", "following_url": "https://api.github.com/users/merveenoyan/following{/other_user}", "gists_url": "https://api.github.com/users/merveenoyan/gists{/gist_id}", "starred_url": "https://api.github.com/users/merveenoyan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/merveenoyan/subscriptions", "organizations_url": "https://api.github.com/users/merveenoyan/orgs", "repos_url": "https://api.github.com/users/merveenoyan/repos", "events_url": "https://api.github.com/users/merveenoyan/events{/privacy}", "received_events_url": "https://api.github.com/users/merveenoyan/received_events", "type": "User", "site_admin": false } ]
null
[ "I'm currently adding it, the entire dataset is quite big around 30 GB so I add splits separately. You can take a look here https://huggingface.co/datasets/merve/coco", "I talked to @lhoestq and it's best if I download this dataset through TensorFlow datasets instead, so I'll be implementing that one really soon.\r\n@NielsRogge ", "I started adding COCO, will be done tomorrow EOD\r\nmy work so far https://github.com/merveenoyan/datasets (my fork)", "Hi Merve @merveenoyan , thank you so much for your great contribution! May I ask about the current progress of your implementation? Cuz I see the pull request is still in progess here. Or can I just run the COCO scripts in your fork repo?", "Hello @yixuanren I had another prioritized project about to be merged, but I'll start continuing today will finish up soon. ", "> Hello @yixuanren I had another prioritized project about to be merged, but I'll start continuing today will finish up soon.\r\n\r\nIt's really nice of you!! I see you've commited another version just now" ]
1,624,261,712,000
1,639,695,617,000
null
NONE
null
## Adding a Dataset - **Name:** COCO - **Description:** COCO is a large-scale object detection, segmentation, and captioning dataset. - **Paper + website:** https://cocodataset.org/#home - **Data:** https://cocodataset.org/#download - **Motivation:** It would be great to have COCO available in HuggingFace datasets, as we are moving beyond just text. COCO includes multi-modalities (images + text), as well as a huge amount of images annotated with objects, segmentation masks, keypoints etc., on which models like DETR (which I recently added to HuggingFace Transformers) are trained. Currently, one needs to download everything from the website and place it in a local folder, but it would be much easier if we can directly access it through the datasets API. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2526/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2526/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2523
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2523/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2523/comments
https://api.github.com/repos/huggingface/datasets/issues/2523/events
https://github.com/huggingface/datasets/issues/2523
925,421,008
MDU6SXNzdWU5MjU0MjEwMDg=
2,523
Fr
{ "login": "aDrIaNo34500", "id": 71971234, "node_id": "MDQ6VXNlcjcxOTcxMjM0", "avatar_url": "https://avatars.githubusercontent.com/u/71971234?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aDrIaNo34500", "html_url": "https://github.com/aDrIaNo34500", "followers_url": "https://api.github.com/users/aDrIaNo34500/followers", "following_url": "https://api.github.com/users/aDrIaNo34500/following{/other_user}", "gists_url": "https://api.github.com/users/aDrIaNo34500/gists{/gist_id}", "starred_url": "https://api.github.com/users/aDrIaNo34500/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aDrIaNo34500/subscriptions", "organizations_url": "https://api.github.com/users/aDrIaNo34500/orgs", "repos_url": "https://api.github.com/users/aDrIaNo34500/repos", "events_url": "https://api.github.com/users/aDrIaNo34500/events{/privacy}", "received_events_url": "https://api.github.com/users/aDrIaNo34500/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
1,624,118,192,000
1,624,128,503,000
1,624,128,503,000
NONE
null
__Originally posted by @lewtun in https://github.com/huggingface/datasets/pull/2469__
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2523/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2523/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2522
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2522/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2522/comments
https://api.github.com/repos/huggingface/datasets/issues/2522/events
https://github.com/huggingface/datasets/issues/2522
925,334,379
MDU6SXNzdWU5MjUzMzQzNzk=
2,522
Documentation Mistakes in Dataset: emotion
{ "login": "GDGauravDutta", "id": 62606251, "node_id": "MDQ6VXNlcjYyNjA2MjUx", "avatar_url": "https://avatars.githubusercontent.com/u/62606251?v=4", "gravatar_id": "", "url": "https://api.github.com/users/GDGauravDutta", "html_url": "https://github.com/GDGauravDutta", "followers_url": "https://api.github.com/users/GDGauravDutta/followers", "following_url": "https://api.github.com/users/GDGauravDutta/following{/other_user}", "gists_url": "https://api.github.com/users/GDGauravDutta/gists{/gist_id}", "starred_url": "https://api.github.com/users/GDGauravDutta/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/GDGauravDutta/subscriptions", "organizations_url": "https://api.github.com/users/GDGauravDutta/orgs", "repos_url": "https://api.github.com/users/GDGauravDutta/repos", "events_url": "https://api.github.com/users/GDGauravDutta/events{/privacy}", "received_events_url": "https://api.github.com/users/GDGauravDutta/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi,\r\n\r\nthis issue has been already reported in the dataset repo (https://github.com/dair-ai/emotion_dataset/issues/2), so this is a bug on their side." ]
1,624,086,537,000
1,624,124,296,000
null
NONE
null
As per documentation, Dataset: emotion Homepage: https://github.com/dair-ai/emotion_dataset Dataset: https://github.com/huggingface/datasets/blob/master/datasets/emotion/emotion.py Permalink: https://huggingface.co/datasets/viewer/?dataset=emotion Emotion is a dataset of English Twitter messages with eight basic emotions: anger, anticipation, disgust, fear, joy, sadness, surprise, and trust. For more detailed information please refer to the paper. But when we view the data, there are only 6 emotions, anger, fear, joy, sadness, surprise, and trust.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2522/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2522/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2520
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2520/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2520/comments
https://api.github.com/repos/huggingface/datasets/issues/2520/events
https://github.com/huggingface/datasets/issues/2520
925,015,004
MDU6SXNzdWU5MjUwMTUwMDQ=
2,520
Datasets with tricky task templates
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 2067401494, "node_id": "MDU6TGFiZWwyMDY3NDAxNDk0", "url": "https://api.github.com/repos/huggingface/datasets/labels/Dataset%20discussion", "name": "Dataset discussion", "color": "72f99f", "default": false, "description": "Discussions on the datasets" } ]
open
false
null
[]
null
[]
1,624,030,437,000
1,624,031,186,000
null
MEMBER
null
I'm collecting a list of datasets here that don't follow the "standard" taxonomy and require further investigation to implement task templates for. ## Text classification * [hatexplain](https://huggingface.co/datasets/hatexplain): ostensibly a form of text classification, but not in the standard `(text, target)` format and each sample appears to be tokenized. * [muchocine](https://huggingface.co/datasets/muchocine): contains two candidate text columns (long-form and summary) which in principle requires two `TextClassification` templates which is not currently supported
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2520/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2520/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2516
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2516/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2516/comments
https://api.github.com/repos/huggingface/datasets/issues/2516/events
https://github.com/huggingface/datasets/issues/2516
924,597,470
MDU6SXNzdWU5MjQ1OTc0NzA=
2,516
datasets.map pickle issue resulting in invalid mapping function
{ "login": "david-waterworth", "id": 5028974, "node_id": "MDQ6VXNlcjUwMjg5NzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/5028974?v=4", "gravatar_id": "", "url": "https://api.github.com/users/david-waterworth", "html_url": "https://github.com/david-waterworth", "followers_url": "https://api.github.com/users/david-waterworth/followers", "following_url": "https://api.github.com/users/david-waterworth/following{/other_user}", "gists_url": "https://api.github.com/users/david-waterworth/gists{/gist_id}", "starred_url": "https://api.github.com/users/david-waterworth/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/david-waterworth/subscriptions", "organizations_url": "https://api.github.com/users/david-waterworth/orgs", "repos_url": "https://api.github.com/users/david-waterworth/repos", "events_url": "https://api.github.com/users/david-waterworth/events{/privacy}", "received_events_url": "https://api.github.com/users/david-waterworth/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi ! `map` calls `__getstate__` using `dill` to hash your map function. This is used by the caching mechanism to recover previously computed results. That's why you don't see any `__setstate__` call.\r\n\r\nWhy do you change an attribute of your tokenizer when `__getstate__` is called ?", "@lhoestq because if I try to pickle my custom tokenizer (it contains a pure python pretokenization step in an otherwise rust backed tokenizer) I get\r\n\r\n> Exception: Error while attempting to pickle Tokenizer: Custom PreTokenizer cannot be serialized\r\n\r\nSo I remove the Custom PreTokenizer in `__getstate__` and then restore it in `__setstate__` (since it doesn't contain any state). This is what my `__getstate__` / `__setstate__` looks like:\r\n\r\n def __getstate__(self):\r\n \"\"\"\r\n Removes pre_tokenizer since it cannot be pickled\r\n \"\"\"\r\n logger.debug(\"Copy state dict\")\r\n out = self.__dict__.copy()\r\n logger.debug(\"Detaching pre_tokenizer\")\r\n out['_tokenizer'].pre_tokenizer = tokenizers.pre_tokenizers.Sequence([]) \r\n return out\r\n\r\n def __setstate__(self, d):\r\n \"\"\"\r\n Reinstates pre_tokenizer\r\n \"\"\"\r\n logger.debug(\"Reattaching pre_tokenizer\")\r\n self.__dict__ = d\r\n self.backend_tokenizer.pre_tokenizer = self._pre_tokenizer()\r\n\r\nIf this is the case can you think of another way of avoiding my issue?", "Actually, maybe I need to deep copy `self.__dict__`? That way `self` isn't modified. That was my intention and I thought it was working - I'll double-check after the weekend.", "Doing a deep copy results in the warning:\r\n\r\n> 06/20/2021 16:02:15 - WARNING - datasets.fingerprint - Parameter 'function'=<function tokenize_function at 0x7f1e95f05d40> of the transform datasets.arrow_dataset.Dataset._map_single couldn't be hashed properly, a random hash was used instead. Make sure your transforms and parameters are serializable with pickle or dill for the dataset fingerprinting and caching to work. If you reuse this transform, the caching mechanism will consider it to be different from the previous calls and recompute everything. This warning is only showed once. Subsequent hashing failures won't be showed.\r\n\r\n\r\n```\r\ndef __getstate__(self):\r\n \"\"\"\r\n Removes pre_tokenizer since it cannot be pickled\r\n \"\"\"\r\n logger.debug(\"Copy state dict\")\r\n out = copy.deepcopy(self.__dict__)\r\n logger.debug(\"Detaching pre_tokenizer\")\r\n out['_tokenizer'].pre_tokenizer = tokenizers.pre_tokenizers.Sequence([]) \r\n return out\r\n```", "Looks like there is still an object that is not pickable in your `tokenize_function` function.\r\n\r\nYou can test if an object can be pickled and hashed by using \r\n```python\r\nfrom datasets.fingerprint import Hasher\r\n\r\nHasher.hash(my_object)\r\n```\r\n\r\nUnder the hood it pickles the object to compute its hash, so it calls `__getstate__` when applicable.", "I figured it out, the problem is deep copy itself uses pickle (unless you implement `__deepcopy__`). So when I changed `__getstate__` it started throwing an error.\r\n\r\nI'm sure there's a better way of doing this, but in order to return the `__dict__` without the non-pikelable pre-tokeniser and without modifying self I removed the pre-tokenizers, did a deep copy and then re-generated it.\r\n\r\nIt does work - although I noticed Hasher doesn't call `__hash__` if the object being hashed implements it which I feel it should? If it did I could return a hash of the tokenizers.json file instead.\r\n\r\n```\r\n def __getstate__(self):\r\n \"\"\"\r\n Removes pre_tokenizer since it cannot be pickled\r\n \"\"\"\r\n logger.debug(\"Copy state dict\")\r\n self.backend_tokenizer.pre_tokenizer = tokenizers.pre_tokenizers.Sequence([])\r\n out = copy.deepcopy(self.__dict__) #self.__dict__.copy()\r\n self.backend_tokenizer.pre_tokenizer = self._pre_tokenizer()\r\n\r\n return out\r\n```\r\n", "I'm glad you figured something out :)\r\n\r\nRegarding hashing: we're not using hashing for the same purpose as the python `__hash__` purpose (which is in general for dictionary lookups). For example it is allowed for python hashing to not return the same hash across sessions, while our hashing must return the same hashes across sessions for the caching to work properly." ]
1,623,998,846,000
1,624,456,069,000
null
NONE
null
I trained my own tokenizer, and I needed to use a python custom class. Because of this I have to detach the custom step before saving and reattach after restore. I did this using the standard pickle `__get_state__` / `__set_state__` mechanism. I think it's correct but it fails when I use it inside a function which is mapped to a dataset, i.e. in the manner of run_mlm.py and other huggingface scripts. The following reproduces the issue - most likely I'm missing something A simulated tokeniser which can be pickled ``` class CustomTokenizer: def __init__(self): self.state = "init" def __getstate__(self): print("__getstate__ called") out = self.__dict__.copy() self.state = "pickled" return out def __setstate__(self, d): print("__setstate__ called") self.__dict__ = d self.state = "restored" tokenizer = CustomTokenizer() ``` Test that it actually works - prints "__getstate__ called" and "__setstate__ called" ``` import pickle serialized = pickle.dumps(tokenizer) restored = pickle.loads(serialized) assert restored.state == "restored" ``` Simulate a function that tokenises examples, when dataset.map is called, this function ``` def tokenize_function(examples): assert tokenizer.state == "restored" # this shouldn't fail but it does output = tokenizer(examples) # this will fail as tokenizer isn't really a tokenizer return output ``` Use map to simulate tokenization ``` import glob from datasets import load_dataset assert tokenizer.state == "restored" train_files = glob.glob('train*.csv') validation_files = glob.glob('validation*.csv') datasets = load_dataset("csv", data_files=dict(train=train_files, validation=validation_files)) tokenized_datasets = datasets.map( tokenize_function, batched=True, ) ``` What's happening is I can see that __getstate__ is called but not __setstate__, so the state of `tokenize_function` is invalid at the point that it's actually executed. This doesn't matter as far as I can see for the standard tokenizers as they don't use __getstate__ / __setstate__. I'm not sure if there's another hook I'm supposed to implement as well? --------------------------------------------------------------------------- AssertionError Traceback (most recent call last) <ipython-input-22-a2aef4f74aaa> in <module> 8 tokenized_datasets = datasets.map( 9 tokenize_function, ---> 10 batched=True, 11 ) ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/dataset_dict.py in map(self, function, with_indices, input_columns, batched, batch_size, remove_columns, keep_in_memory, load_from_cache_file, cache_file_names, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, desc) 487 desc=desc, 488 ) --> 489 for k, dataset in self.items() 490 } 491 ) ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/dataset_dict.py in <dictcomp>(.0) 487 desc=desc, 488 ) --> 489 for k, dataset in self.items() 490 } 491 ) ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1633 fn_kwargs=fn_kwargs, 1634 new_fingerprint=new_fingerprint, -> 1635 desc=desc, 1636 ) 1637 else: ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 184 } 185 # apply actual function --> 186 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 187 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 188 # re-apply format to the output ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/fingerprint.py in wrapper(*args, **kwargs) 395 # Call actual function 396 --> 397 out = func(self, *args, **kwargs) 398 399 # Update fingerprint of in-place transforms + update in-place history of transforms ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, desc) 1961 indices, 1962 check_same_num_examples=len(input_dataset.list_indexes()) > 0, -> 1963 offset=offset, 1964 ) 1965 except NumExamplesMismatch: ~/.pyenv/versions/3.7.6/envs/xxx/lib/python3.7/site-packages/datasets/arrow_dataset.py in apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples, offset) 1853 effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset 1854 processed_inputs = ( -> 1855 function(*fn_args, effective_indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs) 1856 ) 1857 if update_data is None: <ipython-input-21-8ee4a8ba5b1b> in tokenize_function(examples) 1 def tokenize_function(examples): ----> 2 assert tokenizer.state == "restored" 3 tokenizer(examples) 4 return examples
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2516/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2516/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2514
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2514/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2514/comments
https://api.github.com/repos/huggingface/datasets/issues/2514/events
https://github.com/huggingface/datasets/issues/2514
924,417,172
MDU6SXNzdWU5MjQ0MTcxNzI=
2,514
Can datasets remove duplicated rows?
{ "login": "liuxinglan", "id": 16516583, "node_id": "MDQ6VXNlcjE2NTE2NTgz", "avatar_url": "https://avatars.githubusercontent.com/u/16516583?v=4", "gravatar_id": "", "url": "https://api.github.com/users/liuxinglan", "html_url": "https://github.com/liuxinglan", "followers_url": "https://api.github.com/users/liuxinglan/followers", "following_url": "https://api.github.com/users/liuxinglan/following{/other_user}", "gists_url": "https://api.github.com/users/liuxinglan/gists{/gist_id}", "starred_url": "https://api.github.com/users/liuxinglan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/liuxinglan/subscriptions", "organizations_url": "https://api.github.com/users/liuxinglan/orgs", "repos_url": "https://api.github.com/users/liuxinglan/repos", "events_url": "https://api.github.com/users/liuxinglan/events{/privacy}", "received_events_url": "https://api.github.com/users/liuxinglan/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "Hi ! For now this is probably the best option.\r\nWe might add a feature like this in the feature as well.\r\n\r\nDo you know any deduplication method that works on arbitrary big datasets without filling up RAM ?\r\nOtherwise we can have do the deduplication in memory like pandas but I feel like this is going to be limiting for some cases", "Yes, I'd like to work on this feature once I'm done with #2500, but first I have to do some research, and see if the implementation wouldn't be too complex.\r\n\r\nIn the meantime, maybe [this lib](https://github.com/TomScheffers/pyarrow_ops) can help. However, note that this lib operates directly on pyarrow tables and relies only on `hash` to find duplicates (e.g. `-1` and `-2` have the same hash in Python 3, so this lib will treat them as duplicates), which doesn't make much sense.", "> Hi ! For now this is probably the best option.\r\n> We might add a feature like this in the feature as well.\r\n> \r\n> Do you know any deduplication method that works on arbitrary big datasets without filling up RAM ?\r\n> Otherwise we can have do the deduplication in memory like pandas but I feel like this is going to be limiting for some cases\r\n\r\nGreat if this is can be done. Thanks!!\r\n\r\nNot sure if you are asking me. In any case I don't know of any unfortunately :( in practice if data is really large we normally do it with spark (only for info. I understand this is not useful in developing this library..)", "Hello,\r\n\r\nI'm also interested in this feature.\r\nHas there been progress on this issue?\r\n\r\nCould we use a similar trick as above, but with a better hashing algorithm like SHA?\r\n\r\nWe could also use a [bloom filter](https://en.wikipedia.org/wiki/Bloom_filter), should we care a lot about collision in this case?", "For reference, we can get a solution fairly easily if we assume that we can hold in memory all unique values. \r\n\r\n```python\r\nfrom datasets import Dataset\r\nfrom itertools import cycle\r\nfrom functools import partial\r\n\r\nmemory = set()\r\ndef is_unique(elem:Any , column: str, memory: set) -> bool:\r\n if elem[column] in memory:\r\n return False\r\n else:\r\n memory.add(elem[column])\r\n return True\r\n\r\n# Example dataset\r\nds = Dataset.from_dict({\"col1\" : [sent for i, sent in zip(range(10), cycle([\"apple\", \"orange\", \"pear\"]))],\r\n \"col2\": [i % 5 for i in range(10)]})\r\n\r\n# Drop duplicates in `ds` on \"col1\"\r\nds2 = ds.filter(partial(is_unique, column=\"col1\", memory=memory))\r\n```\r\n\r\nOf course, we can improve the API so that we can introduce `Dataset.drop_duplicates`.\r\nFor the parallel version, we can use a shared memory set.", "An approach that works assuming you can hold the all the unique document hashes in memory:\r\n```python\r\nfrom datasets import load_dataset\r\n\r\ndef get_hash(example):\r\n \"\"\"Get hash of content field.\"\"\"\r\n return {\"hash\": hash(example[\"content\"])} # can use any hashing function here\r\n \r\ndef check_uniques(example, uniques):\r\n \"\"\"Check if current hash is still in set of unique hashes and remove if true.\"\"\"\r\n if example[\"hash\"] in uniques:\r\n uniques.remove(example[\"hash\"])\r\n return True\r\n else:\r\n return False\r\n\r\nds = load_dataset(\"some_dataset\")\r\nds = ds.map(get_hash)\r\nuniques = set(ds.unique(\"hash\"))\r\nds_filter = ds.filter(check_uniques, fn_kwargs={\"uniques\": uniques})\r\n```\r\nIf the `uniques` could be stored in arrow then no additional memory would used at all but I don't know if this is possible.\r\n" ]
1,623,972,938,000
1,638,434,361,000
null
NONE
null
**Is your feature request related to a problem? Please describe.** i find myself more and more relying on datasets just to do all the preprocessing. One thing however, for removing duplicated rows, I couldn't find out how and am always converting datasets to pandas to do that.. **Describe the solution you'd like** have a functionality of " remove duplicated rows" **Describe alternatives you've considered** convert dataset to pandas, remove duplicate, and convert back... **Additional context** no
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2514/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2514/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2513
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2513/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2513/comments
https://api.github.com/repos/huggingface/datasets/issues/2513/events
https://github.com/huggingface/datasets/issues/2513
924,174,413
MDU6SXNzdWU5MjQxNzQ0MTM=
2,513
Corelation should be Correlation
{ "login": "colbym-MM", "id": 71514164, "node_id": "MDQ6VXNlcjcxNTE0MTY0", "avatar_url": "https://avatars.githubusercontent.com/u/71514164?v=4", "gravatar_id": "", "url": "https://api.github.com/users/colbym-MM", "html_url": "https://github.com/colbym-MM", "followers_url": "https://api.github.com/users/colbym-MM/followers", "following_url": "https://api.github.com/users/colbym-MM/following{/other_user}", "gists_url": "https://api.github.com/users/colbym-MM/gists{/gist_id}", "starred_url": "https://api.github.com/users/colbym-MM/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/colbym-MM/subscriptions", "organizations_url": "https://api.github.com/users/colbym-MM/orgs", "repos_url": "https://api.github.com/users/colbym-MM/repos", "events_url": "https://api.github.com/users/colbym-MM/events{/privacy}", "received_events_url": "https://api.github.com/users/colbym-MM/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @colbym-MM, thanks for reporting. We are fixing it." ]
1,623,950,928,000
1,624,005,835,000
1,624,005,835,000
NONE
null
https://github.com/huggingface/datasets/blob/0e87e1d053220e8ecddfa679bcd89a4c7bc5af62/metrics/matthews_correlation/matthews_correlation.py#L66
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2513/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2513/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2512
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2512/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2512/comments
https://api.github.com/repos/huggingface/datasets/issues/2512/events
https://github.com/huggingface/datasets/issues/2512
924,069,353
MDU6SXNzdWU5MjQwNjkzNTM=
2,512
seqeval metric does not work with a recent version of sklearn: classification_report() got an unexpected keyword argument 'output_dict'
{ "login": "avidale", "id": 8642136, "node_id": "MDQ6VXNlcjg2NDIxMzY=", "avatar_url": "https://avatars.githubusercontent.com/u/8642136?v=4", "gravatar_id": "", "url": "https://api.github.com/users/avidale", "html_url": "https://github.com/avidale", "followers_url": "https://api.github.com/users/avidale/followers", "following_url": "https://api.github.com/users/avidale/following{/other_user}", "gists_url": "https://api.github.com/users/avidale/gists{/gist_id}", "starred_url": "https://api.github.com/users/avidale/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/avidale/subscriptions", "organizations_url": "https://api.github.com/users/avidale/orgs", "repos_url": "https://api.github.com/users/avidale/repos", "events_url": "https://api.github.com/users/avidale/events{/privacy}", "received_events_url": "https://api.github.com/users/avidale/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Sorry, I was using an old version of sequeval" ]
1,623,944,162,000
1,623,944,767,000
1,623,944,767,000
NONE
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python from datasets import load_dataset, load_metric seqeval = load_metric("seqeval") seqeval.compute(predictions=[['A']], references=[['A']]) ``` ## Expected results The function computes a dict with metrics ## Actual results ``` --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-39-69a57f5cf06f> in <module> 1 from datasets import load_dataset, load_metric 2 seqeval = load_metric("seqeval") ----> 3 seqeval.compute(predictions=[['A']], references=[['A']]) ~/p3/lib/python3.7/site-packages/datasets/metric.py in compute(self, *args, **kwargs) 396 references = self.data["references"] 397 with temp_seed(self.seed): --> 398 output = self._compute(predictions=predictions, references=references, **kwargs) 399 400 if self.buf_writer is not None: ~/.cache/huggingface/modules/datasets_modules/metrics/seqeval/81eda1ff004361d4fa48754a446ec69bb7aa9cf4d14c7215f407d1475941c5ff/seqeval.py in _compute(self, predictions, references, suffix) 95 96 def _compute(self, predictions, references, suffix=False): ---> 97 report = classification_report(y_true=references, y_pred=predictions, suffix=suffix, output_dict=True) 98 report.pop("macro avg") 99 report.pop("weighted avg") TypeError: classification_report() got an unexpected keyword argument 'output_dict' ``` ## Environment info sklearn=0.24 datasets=1.1.3
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2512/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2512/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2511
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2511/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2511/comments
https://api.github.com/repos/huggingface/datasets/issues/2511/events
https://github.com/huggingface/datasets/issues/2511
923,762,133
MDU6SXNzdWU5MjM3NjIxMzM=
2,511
Add C4
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "Update on this: I'm computing the checksums of the data files. It will be available soon", "Added in #2575 :)" ]
1,623,925,864,000
1,625,488,618,000
1,625,488,617,000
MEMBER
null
## Adding a Dataset - **Name:** *C4* - **Description:** *https://github.com/allenai/allennlp/discussions/5056* - **Paper:** *https://arxiv.org/abs/1910.10683* - **Data:** *https://huggingface.co/datasets/allenai/c4* - **Motivation:** *Used a lot for pretraining* Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md). Should fix https://github.com/huggingface/datasets/issues/1710
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2511/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2511/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2508
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2508/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2508/comments
https://api.github.com/repos/huggingface/datasets/issues/2508/events
https://github.com/huggingface/datasets/issues/2508
921,863,173
MDU6SXNzdWU5MjE4NjMxNzM=
2,508
Load Image Classification Dataset from Local
{ "login": "Jacobsolawetz", "id": 8428198, "node_id": "MDQ6VXNlcjg0MjgxOTg=", "avatar_url": "https://avatars.githubusercontent.com/u/8428198?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Jacobsolawetz", "html_url": "https://github.com/Jacobsolawetz", "followers_url": "https://api.github.com/users/Jacobsolawetz/followers", "following_url": "https://api.github.com/users/Jacobsolawetz/following{/other_user}", "gists_url": "https://api.github.com/users/Jacobsolawetz/gists{/gist_id}", "starred_url": "https://api.github.com/users/Jacobsolawetz/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jacobsolawetz/subscriptions", "organizations_url": "https://api.github.com/users/Jacobsolawetz/orgs", "repos_url": "https://api.github.com/users/Jacobsolawetz/repos", "events_url": "https://api.github.com/users/Jacobsolawetz/events{/privacy}", "received_events_url": "https://api.github.com/users/Jacobsolawetz/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "nateraw", "id": 32437151, "node_id": "MDQ6VXNlcjMyNDM3MTUx", "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nateraw", "html_url": "https://github.com/nateraw", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "organizations_url": "https://api.github.com/users/nateraw/orgs", "repos_url": "https://api.github.com/users/nateraw/repos", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "received_events_url": "https://api.github.com/users/nateraw/received_events", "type": "User", "site_admin": false }
[ { "login": "nateraw", "id": 32437151, "node_id": "MDQ6VXNlcjMyNDM3MTUx", "avatar_url": "https://avatars.githubusercontent.com/u/32437151?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nateraw", "html_url": "https://github.com/nateraw", "followers_url": "https://api.github.com/users/nateraw/followers", "following_url": "https://api.github.com/users/nateraw/following{/other_user}", "gists_url": "https://api.github.com/users/nateraw/gists{/gist_id}", "starred_url": "https://api.github.com/users/nateraw/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nateraw/subscriptions", "organizations_url": "https://api.github.com/users/nateraw/orgs", "repos_url": "https://api.github.com/users/nateraw/repos", "events_url": "https://api.github.com/users/nateraw/events{/privacy}", "received_events_url": "https://api.github.com/users/nateraw/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi ! Is this folder structure a standard, a bit like imagenet ?\r\nIn this case maybe we can consider having a dataset loader for cifar-like, imagenet-like, squad-like, conll-like etc. datasets ?\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nmy_custom_cifar = load_dataset(\"cifar_like\", data_dir=\"path/to/data/dir\")\r\n```\r\n\r\nLet me know what you think", "Yep that would be sweet - closing for now as we found a workaround. ", "@lhoestq I think we'll want a generic `image-folder` dataset (same as 'imagenet-like'). This is like `torchvision.datasets.ImageFolder`, and is something vision folks are used to seeing.", "Opening this back up, since I'm planning on tackling this. Already posted a quick version of it on my account on the hub.\r\n\r\n```python\r\nfrom datasets import load_dataset\r\n\r\nds = load_dataset('nateraw/image-folder', data_files='PetImages/')\r\n```" ]
1,623,797,013,000
1,626,113,034,000
null
NONE
null
**Is your feature request related to a problem? Please describe.** Yes - we would like to load an image classification dataset with datasets without having to write a custom data loader. **Describe the solution you'd like** Given a folder structure with images of each class in each folder, the ability to load these folders into a HuggingFace dataset like "cifar10". **Describe alternatives you've considered** Implement ViT training outside of the HuggingFace Trainer and without datasets (we did this but prefer to stay on the main path) Write custom data loader logic **Additional context** We're training ViT on custom dataset
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2508/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2508/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2503
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2503/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2503/comments
https://api.github.com/repos/huggingface/datasets/issues/2503/events
https://github.com/huggingface/datasets/issues/2503
920,636,186
MDU6SXNzdWU5MjA2MzYxODY=
2,503
SubjQA wrong boolean values in entries
{ "login": "arnaudstiegler", "id": 26485052, "node_id": "MDQ6VXNlcjI2NDg1MDUy", "avatar_url": "https://avatars.githubusercontent.com/u/26485052?v=4", "gravatar_id": "", "url": "https://api.github.com/users/arnaudstiegler", "html_url": "https://github.com/arnaudstiegler", "followers_url": "https://api.github.com/users/arnaudstiegler/followers", "following_url": "https://api.github.com/users/arnaudstiegler/following{/other_user}", "gists_url": "https://api.github.com/users/arnaudstiegler/gists{/gist_id}", "starred_url": "https://api.github.com/users/arnaudstiegler/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/arnaudstiegler/subscriptions", "organizations_url": "https://api.github.com/users/arnaudstiegler/orgs", "repos_url": "https://api.github.com/users/arnaudstiegler/repos", "events_url": "https://api.github.com/users/arnaudstiegler/events{/privacy}", "received_events_url": "https://api.github.com/users/arnaudstiegler/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @arnaudstiegler, thanks for reporting. I'm investigating it.", "@arnaudstiegler I have just checked that these mismatches are already present in the original dataset: https://github.com/megagonlabs/SubjQA\r\n\r\nWe are going to contact the dataset owners to report this.", "I have:\r\n- opened an issue in their repo: https://github.com/megagonlabs/SubjQA/issues/3\r\n- written an email to all the paper authors", "Please [see my response](https://github.com/megagonlabs/SubjQA/issues/3#issuecomment-905160010). There will be a fix in a couple of days." ]
1,623,692,566,000
1,629,863,526,000
null
NONE
null
## Describe the bug SubjQA seems to have a boolean that's consistently wrong. It defines: - question_subj_level: The subjectiviy level of the question (on a 1 to 5 scale with 1 being the most subjective). - is_ques_subjective: A boolean subjectivity label derived from question_subj_level (i.e., scores below 4 are considered as subjective) However, `is_ques_subjective` seems to have wrong values in the entire dataset. For instance, in the example in the dataset card, we have: - "question_subj_level": 2 - "is_ques_subjective": false However, according to the description, the question should be subjective since the `question_subj_level` is below 4
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2503/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2503/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2499
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2499/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2499/comments
https://api.github.com/repos/huggingface/datasets/issues/2499/events
https://github.com/huggingface/datasets/issues/2499
920,413,021
MDU6SXNzdWU5MjA0MTMwMjE=
2,499
Python Programming Puzzles
{ "login": "VictorSanh", "id": 16107619, "node_id": "MDQ6VXNlcjE2MTA3NjE5", "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/VictorSanh", "html_url": "https://github.com/VictorSanh", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "repos_url": "https://api.github.com/users/VictorSanh/repos", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "type": "User", "site_admin": false }
[ { "id": 2067376369, "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request", "name": "dataset request", "color": "e99695", "default": false, "description": "Requesting to add a new dataset" } ]
open
false
null
[]
null
[ "👀 @TalSchuster", "Thanks @VictorSanh!\r\nThere's also a [notebook](https://aka.ms/python_puzzles) and [demo](https://aka.ms/python_puzzles_study) available now to try out some of the puzzles" ]
1,623,677,238,000
1,623,780,854,000
null
MEMBER
null
## Adding a Dataset - **Name:** Python Programming Puzzles - **Description:** Programming challenge called programming puzzles, as an objective and comprehensive evaluation of program synthesis - **Paper:** https://arxiv.org/pdf/2106.05784.pdf - **Data:** https://github.com/microsoft/PythonProgrammingPuzzles ([Scrolling through the data](https://github.com/microsoft/PythonProgrammingPuzzles/blob/main/problems/README.md)) - **Motivation:** Spans a large range of difficulty, problems, and domains. A useful resource for evaluation as we don't have a clear understanding of the abilities and skills of extremely large LMs. Note: it's a growing dataset (contributions are welcome), so we'll need careful versioning for this dataset. Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2499/reactions", "total_count": 2, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 1, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2499/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2498
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2498/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2498/comments
https://api.github.com/repos/huggingface/datasets/issues/2498/events
https://github.com/huggingface/datasets/issues/2498
920,411,285
MDU6SXNzdWU5MjA0MTEyODU=
2,498
Improve torch formatting performance
{ "login": "vblagoje", "id": 458335, "node_id": "MDQ6VXNlcjQ1ODMzNQ==", "avatar_url": "https://avatars.githubusercontent.com/u/458335?v=4", "gravatar_id": "", "url": "https://api.github.com/users/vblagoje", "html_url": "https://github.com/vblagoje", "followers_url": "https://api.github.com/users/vblagoje/followers", "following_url": "https://api.github.com/users/vblagoje/following{/other_user}", "gists_url": "https://api.github.com/users/vblagoje/gists{/gist_id}", "starred_url": "https://api.github.com/users/vblagoje/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vblagoje/subscriptions", "organizations_url": "https://api.github.com/users/vblagoje/orgs", "repos_url": "https://api.github.com/users/vblagoje/repos", "events_url": "https://api.github.com/users/vblagoje/events{/privacy}", "received_events_url": "https://api.github.com/users/vblagoje/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "That’s interesting thanks, let’s see what we can do. Can you detail your last sentence? I’m not sure I understand it well.", "Hi ! I just re-ran a quick benchmark and using `to_numpy()` seems to be faster now:\r\n\r\n```python\r\nimport pyarrow as pa # I used pyarrow 3.0.0\r\nimport numpy as np\r\n\r\nn, max_length = 1_000, 512\r\nlow, high, size = 0, 2 << 16, (n, max_length)\r\n\r\ntable = pa.Table.from_pydict({\r\n \"input_ids\": np.random.default_rng(42).integers(low=low, high=high, size=size).tolist()\r\n})\r\n\r\n\r\n%%timeit\r\n_ = table.to_pandas()[\"input_ids\"].to_numpy()\r\n# 1.44 ms ± 80.1 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\r\n\r\n%%timeit\r\n_ = table[\"input_ids\"].to_pandas().to_numpy()\r\n# 461 µs ± 14.2 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\r\n\r\n%%timeit\r\n_ = table[\"input_ids\"].to_numpy()\r\n# 317 µs ± 5.06 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)\r\n```\r\n\r\nCurrently the conversion from arrow to numpy is done in the NumpyArrowExtractor here:\r\n\r\nhttps://github.com/huggingface/datasets/blob/d6d0ede9486ffad7944642ca9a326e058b676788/src/datasets/formatting/formatting.py#L143-L166\r\n\r\nLet's update the NumpyArrowExtractor to call `to_numpy` directly and see how our github benchmarks evolve ?__", "Sounds like a plan @lhoestq If you create a PR I'll pick it up and try it out right away! ", "@lhoestq I can also prepare the PR, just lmk. ", "I’m not exactly sure how to read the graph but it seems that to_categorical take a lot of time here. Could you share more informations on the features/stats of your datasets so we could maybe design a synthetic datasets that looks more similar for debugging testing?", "I created https://github.com/huggingface/datasets/pull/2505 if you want to play with it @vblagoje ", "> I’m not exactly sure how to read the graph but it seems that to_categorical take a lot of time here. Could you share more informations on the features/stats of your datasets so we could maybe design a synthetic datasets that looks more similar for debugging testing?\r\n\r\n@thomwolf starting from the top, each rectangle represents the cumulative amount of it takes to execute the method call. Therefore, format_batch in torch_formatter.py takes ~20 sec, and the largest portion of that call is taken by to_pandas call and the smaller portion (grey rectangle) by the other method invocation(s) in format_batch (series_to_numpy etc). \r\n\r\nFeatures of the dataset are BERT pre-training model input columns i.e:\r\n```\r\nf = Features({ \r\n \"input_ids\": Sequence(feature=Value(dtype=\"int32\")), \r\n \"attention_mask\": Sequence(feature=Value(dtype=\"int8\")), \r\n \"token_type_ids\": Sequence(feature=Value(dtype=\"int8\")), \r\n \"labels\": Sequence(feature=Value(dtype=\"int32\")), \r\n \"next_sentence_label\": Value(dtype=\"int8\")\r\n})\r\n```\r\n\r\nI'll work with @lhoestq till we get to the bottom of this one. \r\n ", "@lhoestq the proposed branch is faster, but overall training speedup is a few percentage points. I couldn't figure out how to include the GitHub branch into setup.py, so I couldn't start NVidia optimized Docker-based pre-training run. But on bare metal, there is a slight improvement. I'll do some more performance traces. ", "Hi @vblagoje, to install Datasets from @lhoestq PR reference #2505, you can use:\r\n```shell\r\npip install git+ssh://git@github.com/huggingface/datasets.git@refs/pull/2505/head#egg=datasets\r\n```", "Hey @albertvillanova yes thank you, I am aware, I can easily pull it from a terminal command line but then I can't automate docker image builds as dependencies are picked up from setup.py and for some reason setup.py doesn't accept this string format.", "@vblagoje in that case, you can add this to your `setup.py`:\r\n```python\r\n install_requires=[\r\n \"datasets @ git+ssh://git@github.com/huggingface/datasets.git@refs/pull/2505/head\",\r\n```", "@lhoestq @thomwolf @albertvillanova The new approach is definitely faster, dataloader now takes less than 3% cumulative time (pink rectangle two rectangles to the right of tensor.py backward invocation)\r\n\r\n![Screen Shot 2021-06-16 at 3 05 06 PM](https://user-images.githubusercontent.com/458335/122224432-19de4700-ce82-11eb-982f-d45d4bcc1e41.png)\r\n\r\nWhen we drill down into dataloader next invocation we get:\r\n\r\n![Screen Shot 2021-06-16 at 3 09 56 PM](https://user-images.githubusercontent.com/458335/122224976-a1c45100-ce82-11eb-8d40-59194740d616.png)\r\n\r\nAnd finally format_batch:\r\n\r\n![Screen Shot 2021-06-16 at 3 11 07 PM](https://user-images.githubusercontent.com/458335/122225132-cae4e180-ce82-11eb-8a16-967ab7c1c2aa.png)\r\n\r\n\r\nNot sure this could be further improved but this is definitely a decent step forward.\r\n\r\n", "> ```python\r\n> datasets @ git+ssh://git@github.com/huggingface/datasets.git@refs/pull/2505/head\r\n> ```\r\n\r\n@albertvillanova how would I replace datasets dependency in https://github.com/huggingface/transformers/blob/master/setup.py as the above approach is not working. ", "@vblagoje I tested my proposed approach before posting it here and it worked for me. \r\n\r\nIs it not working in your case because of the SSH protocol? In that case you could try the same approach but using HTTPS:\r\n```\r\n\"datasets @ git+https://github.com/huggingface/datasets.git@refs/pull/2505/head\",\r\n``` ", "Also note the blanks before and after the `@`.", "@albertvillanova of course it works. Apologies. I needed to change datasets in all deps references , like [here](https://github.com/huggingface/transformers/blob/master/setup.py#L235) for example. " ]
1,623,677,124,000
1,624,269,294,000
null
CONTRIBUTOR
null
**Is your feature request related to a problem? Please describe.** It would be great, if possible, to further improve read performance of raw encoded datasets and their subsequent conversion to torch tensors. A bit more background. I am working on LM pre-training using HF ecosystem. We use encoded HF Wikipedia and BookCorpus datasets. The training machines are similar to DGX-1 workstations. We use HF trainer torch.distributed training approach on a single machine with 8 GPUs. The current performance is about 30% slower than NVidia optimized BERT [examples](https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling) baseline. Quite a bit of customized code and training loop tricks were used to achieve the baseline performance. It would be great to achieve the same performance while using nothing more than off the shelf HF ecosystem. Perhaps, in the future, with @stas00 work on deepspeed integration, it could even be exceeded. **Describe the solution you'd like** Using profiling tools we've observed that appx. 25% of cumulative run time is spent on data loader next call. ![dataloader_next](https://user-images.githubusercontent.com/458335/121895543-59742a00-ccee-11eb-85fb-f07715e3f1f6.png) As you can observe most of the data loader next call is spent in HF datasets torch_formatter.py format_batch call. Digging a bit deeper into format_batch we can see the following profiler data: ![torch_formatter](https://user-images.githubusercontent.com/458335/121895944-c7b8ec80-ccee-11eb-95d5-5875c5716c30.png) Once again, a lot of time is spent in pyarrow table conversion to pandas which seems like an intermediary step. Offline @lhoestq told me that this approach was, for some unknown reason, faster than direct to numpy conversion. **Describe alternatives you've considered** I am not familiar with pyarrow and have not yet considered the alternatives to the current approach. Most of the online advice around data loader performance improvements revolve around increasing number of workers, using pin memory for copying tensors from host device to gpus but we've already tried these avenues without much performance improvement. Weights & Biases dashboard for the pre-training task reports CPU utilization of ~ 10%, GPUs are completely saturated (GPU utilization is above 95% on all GPUs), while disk utilization is above 90%.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2498/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2498/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2496
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2496/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2496/comments
https://api.github.com/repos/huggingface/datasets/issues/2496/events
https://github.com/huggingface/datasets/issues/2496
920,216,314
MDU6SXNzdWU5MjAyMTYzMTQ=
2,496
Dataset fingerprint changes after moving the cache directory, which prevent cache reload when using `map`
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[]
1,623,662,426,000
1,624,287,903,000
1,624,287,903,000
MEMBER
null
`Dataset.map` uses the dataset fingerprint (a hash) for caching. However the fingerprint seems to change when someone moves the cache directory of the dataset. This is because it uses the default fingerprint generation: 1. the dataset path is used to get the fingerprint 2. the modification times of the arrow file is also used to get the fingerprint To fix that we could set the fingerprint of the dataset to be a hash of (<dataset_name>, <config_name>, <version>, <script_hash>), i.e. a hash of the the cache path relative to the cache directory.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2496/reactions", "total_count": 2, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2496/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2495
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2495/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2495/comments
https://api.github.com/repos/huggingface/datasets/issues/2495/events
https://github.com/huggingface/datasets/issues/2495
920,170,030
MDU6SXNzdWU5MjAxNzAwMzA=
2,495
JAX formatting
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[]
1,623,659,527,000
1,624,292,149,000
1,624,292,149,000
MEMBER
null
We already support pytorch, tensorflow, numpy, pandas and arrow dataset formatting. Let's add jax as well
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2495/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2495/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2494
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2494/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2494/comments
https://api.github.com/repos/huggingface/datasets/issues/2494/events
https://github.com/huggingface/datasets/issues/2494
920,149,183
MDU6SXNzdWU5MjAxNDkxODM=
2,494
Improve docs on Enhancing performance
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892861, "node_id": "MDU6TGFiZWwxOTM1ODkyODYx", "url": "https://api.github.com/repos/huggingface/datasets/labels/documentation", "name": "documentation", "color": "0075ca", "default": true, "description": "Improvements or additions to documentation" } ]
open
false
null
[]
null
[]
1,623,658,308,000
1,623,658,308,000
null
MEMBER
null
In the ["Enhancing performance"](https://huggingface.co/docs/datasets/loading_datasets.html#enhancing-performance) section of docs, add specific use cases: - How to make datasets the fastest - How to make datasets take the less RAM - How to make datasets take the less hard drive mem cc: @thomwolf
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2494/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2494/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2489
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2489/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2489/comments
https://api.github.com/repos/huggingface/datasets/issues/2489/events
https://github.com/huggingface/datasets/issues/2489
919,569,749
MDU6SXNzdWU5MTk1Njk3NDk=
2,489
Allow latest pyarrow version once segfault bug is fixed
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
1,623,506,992,000
1,623,657,203,000
1,623,657,203,000
MEMBER
null
As pointed out by @symeneses (see https://github.com/huggingface/datasets/pull/2268#issuecomment-860048613), pyarrow has fixed the segfault bug present in version 4.0.0 (see https://issues.apache.org/jira/browse/ARROW-12568): - it was fixed on 3 May 2021 - version 4.0.1 was released on 19 May 2021 with the bug fix
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2489/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2489/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2485
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2485/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2485/comments
https://api.github.com/repos/huggingface/datasets/issues/2485/events
https://github.com/huggingface/datasets/issues/2485
919,099,218
MDU6SXNzdWU5MTkwOTkyMTg=
2,485
Implement layered building
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[]
1,623,437,665,000
1,623,437,665,000
null
MEMBER
null
As discussed with @stas00 and @lhoestq (see also here https://github.com/huggingface/datasets/issues/2481#issuecomment-859712190): > My suggestion for this would be to have this enabled by default. > > Plus I don't know if there should be a dedicated issue to that is another functionality. But I propose layered building rather than all at once. That is: > > 1. uncompress a handful of files via a generator enough to generate one arrow file > 2. process arrow file 1 > 3. delete all the files that went in and aren't needed anymore. > > rinse and repeat. > > 1. This way much less disc space will be required - e.g. on JZ we won't be running into inode limitation, also it'd help with the collaborative hub training project > 2. The user doesn't need to go and manually clean up all the huge files that were left after pre-processing > 3. It would already include deleting temp files this issue is talking about > > I wonder if the new streaming API would be of help, except here the streaming would be into arrow files as the destination, rather than dataloaders.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2485/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2485/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2484
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2484/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2484/comments
https://api.github.com/repos/huggingface/datasets/issues/2484/events
https://github.com/huggingface/datasets/issues/2484
919,092,635
MDU6SXNzdWU5MTkwOTI2MzU=
2,484
Implement loading a dataset builder
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false } ]
null
[ "#self-assign" ]
1,623,437,242,000
1,625,481,957,000
1,625,481,957,000
MEMBER
null
As discussed with @stas00 and @lhoestq, this would allow things like: ```python from datasets import load_dataset_builder dataset_name = "openwebtext" builder = load_dataset_builder(dataset_name) print(builder.cache_dir) ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2484/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2484/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2481
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2481/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2481/comments
https://api.github.com/repos/huggingface/datasets/issues/2481/events
https://github.com/huggingface/datasets/issues/2481
918,680,168
MDU6SXNzdWU5MTg2ODAxNjg=
2,481
Delete extracted files to save disk space
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/6", "html_url": "https://github.com/huggingface/datasets/milestone/6", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/6/labels", "id": 6836458, "node_id": "MDk6TWlsZXN0b25lNjgzNjQ1OA==", "number": 6, "title": "1.10", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 29, "state": "closed", "created_at": 1623178113000, "updated_at": 1626881809000, "due_on": 1628146800000, "closed_at": 1626881809000 }
[ "My suggestion for this would be to have this enabled by default.\r\n\r\nPlus I don't know if there should be a dedicated issue to that is another functionality. But I propose layered building rather than all at once. That is:\r\n\r\n1. uncompress a handful of files via a generator enough to generate one arrow file\r\n2. process arrow file 1\r\n3. delete all the files that went in and aren't needed anymore.\r\n\r\nrinse and repeat.\r\n\r\n1. This way much less disc space will be required - e.g. on JZ we won't be running into inode limitation, also it'd help with the collaborative hub training project\r\n2. The user doesn't need to go and manually clean up all the huge files that were left after pre-processing\r\n3. It would already include deleting temp files this issue is talking about\r\n\r\nI wonder if the new streaming API would be of help, except here the streaming would be into arrow files as the destination, rather than dataloaders." ]
1,623,414,112,000
1,626,685,698,000
1,626,685,698,000
MEMBER
null
As discussed with @stas00 and @lhoestq, allowing the deletion of extracted files would save a great amount of disk space to typical user.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2481/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2481/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2480
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2480/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2480/comments
https://api.github.com/repos/huggingface/datasets/issues/2480/events
https://github.com/huggingface/datasets/issues/2480
918,678,578
MDU6SXNzdWU5MTg2Nzg1Nzg=
2,480
Set download/extracted paths configurable
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "For example to be able to send uncompressed and temp build files to another volume/partition, so that the user gets the minimal disk usage on their primary setup - and ends up with just the downloaded compressed data + arrow files, but outsourcing the huge files and building to another partition. e.g. on JZ there is a special partition for fast data, but it's also volatile, so only temp files should go there.\r\n\r\nThink of it as `TMPDIR` so we need the equivalent for `datasets`." ]
1,623,414,024,000
1,623,767,029,000
null
MEMBER
null
As discussed with @stas00 and @lhoestq, setting these paths configurable may allow to overcome disk space limitation on different partitions/drives. TODO: - [x] Set configurable extracted datasets path: #2487 - [x] Set configurable downloaded datasets path: #2488 - [ ] Set configurable "incomplete" datasets path?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2480/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2480/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2478
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2478/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2478/comments
https://api.github.com/repos/huggingface/datasets/issues/2478/events
https://github.com/huggingface/datasets/issues/2478
918,507,510
MDU6SXNzdWU5MTg1MDc1MTA=
2,478
Create release script
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[]
1,623,404,282,000
1,623,404,282,000
null
MEMBER
null
Create a script so that releases can be done automatically (as done in `transformers`).
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2478/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2478/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2475
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2475/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2475/comments
https://api.github.com/repos/huggingface/datasets/issues/2475/events
https://github.com/huggingface/datasets/issues/2475
917,650,882
MDU6SXNzdWU5MTc2NTA4ODI=
2,475
Issue in timit_asr database
{ "login": "hrahamim", "id": 85702107, "node_id": "MDQ6VXNlcjg1NzAyMTA3", "avatar_url": "https://avatars.githubusercontent.com/u/85702107?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hrahamim", "html_url": "https://github.com/hrahamim", "followers_url": "https://api.github.com/users/hrahamim/followers", "following_url": "https://api.github.com/users/hrahamim/following{/other_user}", "gists_url": "https://api.github.com/users/hrahamim/gists{/gist_id}", "starred_url": "https://api.github.com/users/hrahamim/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hrahamim/subscriptions", "organizations_url": "https://api.github.com/users/hrahamim/orgs", "repos_url": "https://api.github.com/users/hrahamim/repos", "events_url": "https://api.github.com/users/hrahamim/events{/privacy}", "received_events_url": "https://api.github.com/users/hrahamim/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "This bug was fixed in #1995. Upgrading datasets to version 1.6 fixes the issue!", "Indeed was a fixed bug.\r\nWorks on version 1.8\r\nThanks " ]
1,623,348,329,000
1,623,572,030,000
1,623,571,993,000
NONE
null
## Describe the bug I am trying to load the timit_asr dataset however only the first record is shown (duplicated over all the rows). I am using the next code line dataset = load_dataset(“timit_asr”, split=“test”).shuffle().select(range(10)) The above code result with the same sentence duplicated ten times. It also happens when I use the dataset viewer at Streamlit . ## Steps to reproduce the bug from datasets import load_dataset dataset = load_dataset(“timit_asr”, split=“test”).shuffle().select(range(10)) data = dataset.to_pandas() # Sample code to reproduce the bug ``` ## Expected results table with different row information ## Actual results Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.4.1 (also occur in the latest version) - Platform: Linux-4.15.0-143-generic-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.6.9 - PyTorch version (GPU?): 1.8.1+cu102 (False) - Tensorflow version (GPU?): 1.15.3 (False) - Using GPU in script?: No - Using distributed or parallel set-up in script?: No - `datasets` version: - Platform: - Python version: - PyArrow version:
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2475/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2475/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2474
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2474/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2474/comments
https://api.github.com/repos/huggingface/datasets/issues/2474/events
https://github.com/huggingface/datasets/issues/2474
917,622,055
MDU6SXNzdWU5MTc2MjIwNTU=
2,474
cache_dir parameter for load_from_disk ?
{ "login": "TaskManager91", "id": 7063207, "node_id": "MDQ6VXNlcjcwNjMyMDc=", "avatar_url": "https://avatars.githubusercontent.com/u/7063207?v=4", "gravatar_id": "", "url": "https://api.github.com/users/TaskManager91", "html_url": "https://github.com/TaskManager91", "followers_url": "https://api.github.com/users/TaskManager91/followers", "following_url": "https://api.github.com/users/TaskManager91/following{/other_user}", "gists_url": "https://api.github.com/users/TaskManager91/gists{/gist_id}", "starred_url": "https://api.github.com/users/TaskManager91/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/TaskManager91/subscriptions", "organizations_url": "https://api.github.com/users/TaskManager91/orgs", "repos_url": "https://api.github.com/users/TaskManager91/repos", "events_url": "https://api.github.com/users/TaskManager91/events{/privacy}", "received_events_url": "https://api.github.com/users/TaskManager91/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi ! `load_from_disk` doesn't move the data. If you specify a local path to your mounted drive, then the dataset is going to be loaded directly from the arrow file in this directory. The cache files that result from `map` operations are also stored in the same directory by default.\r\n\r\nHowever note than writing data to your google drive actually fills the VM's disk (see https://github.com/huggingface/datasets/issues/643)\r\n\r\nGiven that, I don't think that changing the cache directory changes anything.\r\n\r\nLet me know what you think", "Thanks for your answer! I am a little surprised since I just want to read the dataset.\r\n\r\nAfter debugging a bit, I noticed that the VM’s disk fills up when the tables (generator) are converted to a list:\r\n\r\nhttps://github.com/huggingface/datasets/blob/5ba149773d23369617563d752aca922081277ec2/src/datasets/table.py#L850\r\n\r\nIf I try to iterate through the table’s generator e.g.: \r\n\r\n`length = sum(1 for x in tables)`\r\n\r\nthe VM’s disk fills up as well.\r\n\r\nI’m running out of Ideas 😄 ", "Indeed reading the data shouldn't increase the VM's disk. Not sure what google colab does under the hood for that to happen" ]
1,623,346,776,000
1,623,660,069,000
null
NONE
null
**Is your feature request related to a problem? Please describe.** When using Google Colab big datasets can be an issue, as they won't fit on the VM's disk. Therefore mounting google drive could be a possible solution. Unfortunatly when loading my own dataset by using the _load_from_disk_ function, the data gets cached to the VM's disk: ` from datasets import load_from_disk myPreprocessedData = load_from_disk("/content/gdrive/MyDrive/ASR_data/myPreprocessedData") ` I know that chaching on google drive could slow down learning. But at least it would run. **Describe the solution you'd like** Add cache_Dir parameter to the load_from_disk function. **Describe alternatives you've considered** It looks like you could write a custom loading script for the load_dataset function. But this seems to be much too complex for my use case. Is there perhaps a template here that uses the load_from_disk function?
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2474/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2474/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2472
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2472/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2472/comments
https://api.github.com/repos/huggingface/datasets/issues/2472/events
https://github.com/huggingface/datasets/issues/2472
917,463,821
MDU6SXNzdWU5MTc0NjM4MjE=
2,472
Fix automatic generation of Zenodo DOI
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/5", "html_url": "https://github.com/huggingface/datasets/milestone/5", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/5/labels", "id": 6808903, "node_id": "MDk6TWlsZXN0b25lNjgwODkwMw==", "number": 5, "title": "1.9", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 12, "state": "closed", "created_at": 1622477586000, "updated_at": 1626099120000, "due_on": 1625727600000, "closed_at": 1625809807000 }
[ "I have received a reply from Zenodo support:\r\n> We are currently investigating and fixing this issue related to GitHub releases. As soon as we have solved it we will reach back to you.", "Other repo maintainers had the same problem with Zenodo. \r\n\r\nThere is an open issue on their GitHub repo: zenodo/zenodo#2181", "I have received the following request from Zenodo support:\r\n> Could you send us the link to the repository as well as the release tag?\r\n\r\nMy reply:\r\n> Sure, here it is:\r\n> - Link to the repository: https://github.com/huggingface/datasets\r\n> - Link to the repository at the release tag: https://github.com/huggingface/datasets/releases/tag/1.8.0\r\n> - Release tag: 1.8.0", "Zenodo issue has been fixed. The 1.8.0 release DOI can be found here: https://zenodo.org/record/4946100#.YMd6vKj7RPY" ]
1,623,338,146,000
1,623,689,382,000
1,623,689,382,000
MEMBER
null
After the last release of Datasets (1.8.0), the automatic generation of the Zenodo DOI failed: it appears in yellow as "Received", instead of in green as "Published". I have contacted Zenodo support to fix this issue. TODO: - [x] Check with Zenodo to fix the issue - [x] Check BibTeX entry is right
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2472/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2472/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2471
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2471/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2471/comments
https://api.github.com/repos/huggingface/datasets/issues/2471/events
https://github.com/huggingface/datasets/issues/2471
917,067,165
MDU6SXNzdWU5MTcwNjcxNjU=
2,471
Fix PermissionError on Windows when using tqdm >=4.50.0
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/5", "html_url": "https://github.com/huggingface/datasets/milestone/5", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/5/labels", "id": 6808903, "node_id": "MDk6TWlsZXN0b25lNjgwODkwMw==", "number": 5, "title": "1.9", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 12, "state": "closed", "created_at": 1622477586000, "updated_at": 1626099120000, "due_on": 1625727600000, "closed_at": 1625809807000 }
[]
1,623,313,909,000
1,623,424,310,000
1,623,424,310,000
MEMBER
null
See: https://app.circleci.com/pipelines/github/huggingface/datasets/235/workflows/cfb6a39f-68eb-4802-8b17-2cd5e8ea7369/jobs/1111 ``` PermissionError: [WinError 32] The process cannot access the file because it is being used by another process ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2471/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2471/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2470
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2470/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2470/comments
https://api.github.com/repos/huggingface/datasets/issues/2470/events
https://github.com/huggingface/datasets/issues/2470
916,724,260
MDU6SXNzdWU5MTY3MjQyNjA=
2,470
Crash when `num_proc` > dataset length for `map()` on a `datasets.Dataset`.
{ "login": "mbforbes", "id": 1170062, "node_id": "MDQ6VXNlcjExNzAwNjI=", "avatar_url": "https://avatars.githubusercontent.com/u/1170062?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mbforbes", "html_url": "https://github.com/mbforbes", "followers_url": "https://api.github.com/users/mbforbes/followers", "following_url": "https://api.github.com/users/mbforbes/following{/other_user}", "gists_url": "https://api.github.com/users/mbforbes/gists{/gist_id}", "starred_url": "https://api.github.com/users/mbforbes/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mbforbes/subscriptions", "organizations_url": "https://api.github.com/users/mbforbes/orgs", "repos_url": "https://api.github.com/users/mbforbes/repos", "events_url": "https://api.github.com/users/mbforbes/events{/privacy}", "received_events_url": "https://api.github.com/users/mbforbes/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! It looks like the issue comes from pyarrow. What version of pyarrow are you using ? How did you install it ?", "Thank you for the quick reply! I have `pyarrow==4.0.0`, and I am installing with `pip`. It's not one of my explicit dependencies, so I assume it came along with something else.", "Could you trying reinstalling pyarrow with pip ?\r\nI'm not sure why it would check in your multicurtural-sc directory for source files.", "Sure! I tried reinstalling to get latest. pip was mad because it looks like Datasets currently wants <4.0.0 (which is interesting, because apparently I ended up with 4.0.0 already?), but I gave it a shot anyway:\r\n\r\n```bash\r\n$ pip install --upgrade --force-reinstall pyarrow\r\nCollecting pyarrow\r\n Downloading pyarrow-4.0.1-cp39-cp39-manylinux2014_x86_64.whl (21.9 MB)\r\n |████████████████████████████████| 21.9 MB 23.8 MB/s\r\nCollecting numpy>=1.16.6\r\n Using cached numpy-1.20.3-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl (15.4 MB)\r\nInstalling collected packages: numpy, pyarrow\r\n Attempting uninstall: numpy\r\n Found existing installation: numpy 1.20.3\r\n Uninstalling numpy-1.20.3:\r\n Successfully uninstalled numpy-1.20.3\r\n Attempting uninstall: pyarrow\r\n Found existing installation: pyarrow 3.0.0\r\n Uninstalling pyarrow-3.0.0:\r\n Successfully uninstalled pyarrow-3.0.0\r\nERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\ndatasets 1.8.0 requires pyarrow<4.0.0,>=1.0.0, but you have pyarrow 4.0.1 which is incompatible.\r\nSuccessfully installed numpy-1.20.3 pyarrow-4.0.1\r\n```\r\n\r\nTrying it, the same issue:\r\n\r\n![image](https://user-images.githubusercontent.com/1170062/121730226-3f470b80-caa4-11eb-85a5-684c44c816da.png)\r\n\r\nI tried installing `\"pyarrow<4.0.0\"`, which gave me 3.0.0. Running, still, same issue.\r\n\r\nI agree it's weird that pyarrow is checking the source code directory for its files. (There is no `pyarrow/` directory there.) To me, that makes it seem like an issue with how pyarrow is called.\r\n\r\nOut of curiosity, I tried running this with fewer workers to see when the error arises:\r\n\r\n- 1: ✅\r\n- 2: ✅\r\n- 4: ✅\r\n- 8: ✅\r\n- 10: ✅\r\n- 11: ❌ 🤔\r\n- 12: ❌\r\n- 16: ❌\r\n- 32: ❌\r\n\r\nchecking my datasets:\r\n\r\n```python\r\n>>> datasets\r\nDatasetDict({\r\n train: Dataset({\r\n features: ['text'],\r\n num_rows: 389290\r\n })\r\n validation.sc: Dataset({\r\n features: ['text'],\r\n num_rows: 10 # 🤔\r\n })\r\n validation.wvs: Dataset({\r\n features: ['text'],\r\n num_rows: 93928\r\n })\r\n})\r\n```\r\n\r\nNew hypothesis: crash if `num_proc` > length of a dataset? 😅\r\n\r\nIf so, this might be totally my fault, as the caller. Could be a docs fix, or maybe this library could do a check to limit `num_proc` for this case?", "Good catch ! Not sure why it could raise such a weird issue from pyarrow though\r\nWe should definitely reduce num_proc to the length of the dataset if needed and log a warning.", "This has been fixed in #2566, thanks @connor-mccarthy !\r\nWe'll make a new release soon that includes the fix ;)" ]
1,623,278,422,000
1,625,132,094,000
1,625,130,673,000
NONE
null
## Describe the bug Crash if when using `num_proc` > 1 (I used 16) for `map()` on a `datasets.Dataset`. I believe I've had cases where `num_proc` > 1 works before, but now it seems either inconsistent, or depends on my data. I'm not sure whether the issue is on my end, because it's difficult for me to debug! Any tips greatly appreciated, I'm happy to provide more info if it would helps us diagnose. ## Steps to reproduce the bug ```python # this function will be applied with map() def tokenize_function(examples): return tokenizer( examples["text"], padding=PaddingStrategy.DO_NOT_PAD, truncation=True, ) # data_files is a Dict[str, str] mapping name -> path datasets = load_dataset("text", data_files={...}) # this is where the error happens if num_proc = 16, # but is fine if num_proc = 1 tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=num_workers, ) ``` ## Expected results The `map()` function succeeds with `num_proc` > 1. ## Actual results ![image](https://user-images.githubusercontent.com/1170062/121404271-a6cc5200-c910-11eb-8e27-5c893bd04042.png) ![image](https://user-images.githubusercontent.com/1170062/121404362-be0b3f80-c910-11eb-9117-658943029aef.png) ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Linux-5.4.0-73-generic-x86_64-with-glibc2.31 - Python version: 3.9.5 - PyTorch version (GPU?): 1.8.1+cu111 (True) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: Yes, but I think N/A for this issue - Using distributed or parallel set-up in script?: Multi-GPU on one machine, but I think also N/A for this issue
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2470/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2470/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2462
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2462/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2462/comments
https://api.github.com/repos/huggingface/datasets/issues/2462/events
https://github.com/huggingface/datasets/issues/2462
915,384,613
MDU6SXNzdWU5MTUzODQ2MTM=
2,462
Merge DatasetDict and Dataset
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" }, { "id": 2067400324, "node_id": "MDU6TGFiZWwyMDY3NDAwMzI0", "url": "https://api.github.com/repos/huggingface/datasets/labels/generic%20discussion", "name": "generic discussion", "color": "c5def5", "default": false, "description": "Generic discussion on the library" } ]
open
false
null
[]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/8", "html_url": "https://github.com/huggingface/datasets/milestone/8", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/8/labels", "id": 6968069, "node_id": "MI_kwDODunzps4AalMF", "number": 8, "title": "1.12", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 4, "closed_issues": 2, "state": "open", "created_at": 1626881696000, "updated_at": 1634120793000, "due_on": 1630306800000, "closed_at": null }
[]
1,623,180,124,000
1,630,560,812,000
null
MEMBER
null
As discussed in #2424 and #2437 (please see there for detailed conversation): - It would be desirable to improve UX with respect the confusion between DatasetDict and Dataset. - The difference between Dataset and DatasetDict is an additional abstraction complexity that confuses "typical" end users. - A user expects a "Dataset" (whatever it contains multiple or a single split) and maybe it could be interesting to try to simplify the user-facing API as much as possible to hide this complexity from the end user. Here is a proposal for discussion and refined (and potential abandon if it's not good enough): - let's consider that a DatasetDict is also a Dataset with the various split concatenated one after the other - let's disallow the use of integers in split names (probably not a very big breaking change) - when you index with integers you access the examples progressively in split after the other is finished (in a deterministic order) - when you index with strings/split name you have the same behavior as now (full backward compat) - let's then also have all the methods of a Dataset on the DatasetDict The end goal would be to merge both Dataset and DatasetDict object in a single object that would be (pretty much totally) backward compatible with both. There are a few things that we could discuss if we want to merge Dataset and DatasetDict: 1. what happens if you index by a string ? Does it return the column or the split ? We could disallow conflicts between column names and split names to avoid ambiguities. It can be surprising to be able to get a column or a split using the same indexing feature ``` from datasets import load_dataset dataset = load_dataset(...) dataset["train"] dataset["input_ids"] ``` 2. what happens when you iterate over the object ? I guess it should iterate over the examples as a Dataset object, but a DatasetDict used to iterate over the splits as they are the dictionary keys. This is a breaking change that we can discuss. Moreover regarding your points: - integers are not allowed as split names already - it's definitely doable to have all the methods. Maybe some of them like train_test_split that is currently only available for Dataset can be tweaked to work for a split dataset cc: @thomwolf @lhoestq
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2462/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2462/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2459
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2459/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2459/comments
https://api.github.com/repos/huggingface/datasets/issues/2459/events
https://github.com/huggingface/datasets/issues/2459
915,222,015
MDU6SXNzdWU5MTUyMjIwMTU=
2,459
`Proto_qa` hosting seems to be broken
{ "login": "VictorSanh", "id": 16107619, "node_id": "MDQ6VXNlcjE2MTA3NjE5", "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "gravatar_id": "", "url": "https://api.github.com/users/VictorSanh", "html_url": "https://github.com/VictorSanh", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "repos_url": "https://api.github.com/users/VictorSanh/repos", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "@VictorSanh , I think @mariosasko is already working on it. " ]
1,623,168,992,000
1,623,313,869,000
1,623,313,869,000
MEMBER
null
## Describe the bug The hosting (on Github) of the `proto_qa` dataset seems broken. I haven't investigated more yet, just flagging it for now. @zaidalyafeai if you want to dive into it, I think it's just a matter of changing the links in `proto_qa.py` ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("proto_qa") ``` ## Actual results ``` Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/load.py", line 751, in load_dataset use_auth_token=use_auth_token, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 575, in download_and_prepare dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/builder.py", line 630, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/home/hf/.cache/huggingface/modules/datasets_modules/datasets/proto_qa/445346efaad5c5f200ecda4aa7f0fb50ff1b55edde3003be424a2112c3e8102e/proto_qa.py", line 131, in _split_generators train_fpath = dl_manager.download(_URLs[self.config.name]["train"]) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 199, in download num_proc=download_config.num_proc, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/py_utils.py", line 195, in map_nested return function(data_struct) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/download_manager.py", line 218, in _download return cached_path(url_or_filename, download_config=download_config) File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/home/hf/dev/promptsource/.venv/lib/python3.7/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/iesl/protoqa-data/master/data/train/protoqa_train.jsonl ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2459/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2459/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2458
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2458/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2458/comments
https://api.github.com/repos/huggingface/datasets/issues/2458/events
https://github.com/huggingface/datasets/issues/2458
915,199,693
MDU6SXNzdWU5MTUxOTk2OTM=
2,458
Revert default in-memory for small datasets
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
{ "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }
[ { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false } ]
{ "url": "https://api.github.com/repos/huggingface/datasets/milestones/4", "html_url": "https://github.com/huggingface/datasets/milestone/4", "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/4/labels", "id": 6680642, "node_id": "MDk6TWlsZXN0b25lNjY4MDY0Mg==", "number": 4, "title": "1.8", "description": "Next minor release", "creator": { "login": "albertvillanova", "id": 8515462, "node_id": "MDQ6VXNlcjg1MTU0NjI=", "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "gravatar_id": "", "url": "https://api.github.com/users/albertvillanova", "html_url": "https://github.com/albertvillanova", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "repos_url": "https://api.github.com/users/albertvillanova/repos", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "type": "User", "site_admin": false }, "open_issues": 0, "closed_issues": 2, "state": "closed", "created_at": 1618937356000, "updated_at": 1623178297000, "due_on": 1623135600000, "closed_at": 1623178264000 }
[ "cc: @krandiash (pinged in reverted PR)." ]
1,623,167,501,000
1,623,178,631,000
1,623,174,943,000
MEMBER
null
Users are reporting issues and confusion about setting default in-memory to True for small datasets. We see 2 clear use cases of Datasets: - the "canonical" way, where you can work with very large datasets, as they are memory-mapped and cached (after every transformation) - some edge cases (speed benchmarks, interactive/exploratory analysis,...), where default in-memory can explicitly be enabled, and no caching will be done After discussing with @lhoestq we have agreed to: - revert this feature (implemented in #2182) - explain in the docs how to optimize speed/performance by setting default in-memory cc: @stas00 https://github.com/huggingface/datasets/pull/2409#issuecomment-856210552
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2458/reactions", "total_count": 2, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 1, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2458/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2452
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2452/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2452/comments
https://api.github.com/repos/huggingface/datasets/issues/2452/events
https://github.com/huggingface/datasets/issues/2452
913,603,877
MDU6SXNzdWU5MTM2MDM4Nzc=
2,452
MRPC test set differences between torch and tensorflow datasets
{ "login": "FredericOdermatt", "id": 50372080, "node_id": "MDQ6VXNlcjUwMzcyMDgw", "avatar_url": "https://avatars.githubusercontent.com/u/50372080?v=4", "gravatar_id": "", "url": "https://api.github.com/users/FredericOdermatt", "html_url": "https://github.com/FredericOdermatt", "followers_url": "https://api.github.com/users/FredericOdermatt/followers", "following_url": "https://api.github.com/users/FredericOdermatt/following{/other_user}", "gists_url": "https://api.github.com/users/FredericOdermatt/gists{/gist_id}", "starred_url": "https://api.github.com/users/FredericOdermatt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FredericOdermatt/subscriptions", "organizations_url": "https://api.github.com/users/FredericOdermatt/orgs", "repos_url": "https://api.github.com/users/FredericOdermatt/repos", "events_url": "https://api.github.com/users/FredericOdermatt/events{/privacy}", "received_events_url": "https://api.github.com/users/FredericOdermatt/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Realized that `tensorflow_datasets` is not provided by Huggingface and should therefore raise the issue there." ]
1,623,075,626,000
1,623,076,472,000
1,623,076,472,000
NONE
null
## Describe the bug When using `load_dataset("glue", "mrpc")` to load the MRPC dataset, the test set includes the labels. When using `tensorflow_datasets.load('glue/{}'.format('mrpc'))` to load the dataset the test set does not contain the labels. There should be consistency between torch and tensorflow ways of importing the GLUE datasets. ## Steps to reproduce the bug Minimal working code ```python from datasets import load_dataset import tensorflow as tf import tensorflow_datasets # torch dataset = load_dataset("glue", "mrpc") # tf data = tensorflow_datasets.load('glue/{}'.format('mrpc')) data = list(data['test'].as_numpy_iterator()) for i in range(40,50): tf_sentence1 = data[i]['sentence1'].decode("utf-8") tf_sentence2 = data[i]['sentence2'].decode("utf-8") tf_label = data[i]['label'] index = data[i]['idx'] print('Index {}'.format(index)) torch_sentence1 = dataset['test']['sentence1'][index] torch_sentence2 = dataset['test']['sentence2'][index] torch_label = dataset['test']['label'][index] print('Tensorflow: \n\tSentence1 {}\n\tSentence2 {}\n\tLabel {}'.format(tf_sentence1, tf_sentence2, tf_label)) print('Torch: \n\tSentence1 {}\n\tSentence2 {}\n\tLabel {}'.format(torch_sentence1, torch_sentence2, torch_label)) ``` Sample output ``` Index 954 Tensorflow: Sentence1 Sabri Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate yesterday on charges of violating U.S. arms-control laws . Sentence2 The elder Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate Wednesday on charges of violating U.S. arms control laws . Label -1 Torch: Sentence1 Sabri Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate yesterday on charges of violating U.S. arms-control laws . Sentence2 The elder Yakou , an Iraqi native who is a legal U.S. resident , appeared before a federal magistrate Wednesday on charges of violating U.S. arms control laws . Label 1 Index 711 Tensorflow: Sentence1 Others keep records sealed for as little as five years or as much as 30 . Sentence2 Some states make them available immediately ; others keep them sealed for as much as 30 years . Label -1 Torch: Sentence1 Others keep records sealed for as little as five years or as much as 30 . Sentence2 Some states make them available immediately ; others keep them sealed for as much as 30 years . Label 0 ``` ## Expected results I would expect the datasets to be independent of whether I am working with torch or tensorflow. ## Actual results Test set labels are provided in the `datasets.load_datasets()` for MRPC. However MRPC is the only task where the test set labels are not -1. ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.4.109+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2452/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2452/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2450
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2450/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2450/comments
https://api.github.com/repos/huggingface/datasets/issues/2450/events
https://github.com/huggingface/datasets/issues/2450
912,890,291
MDU6SXNzdWU5MTI4OTAyOTE=
2,450
BLUE file not found
{ "login": "mirfan899", "id": 3822565, "node_id": "MDQ6VXNlcjM4MjI1NjU=", "avatar_url": "https://avatars.githubusercontent.com/u/3822565?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mirfan899", "html_url": "https://github.com/mirfan899", "followers_url": "https://api.github.com/users/mirfan899/followers", "following_url": "https://api.github.com/users/mirfan899/following{/other_user}", "gists_url": "https://api.github.com/users/mirfan899/gists{/gist_id}", "starred_url": "https://api.github.com/users/mirfan899/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mirfan899/subscriptions", "organizations_url": "https://api.github.com/users/mirfan899/orgs", "repos_url": "https://api.github.com/users/mirfan899/repos", "events_url": "https://api.github.com/users/mirfan899/events{/privacy}", "received_events_url": "https://api.github.com/users/mirfan899/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi ! The `blue` metric doesn't exist, but the `bleu` metric does.\r\nYou can get the full list of metrics [here](https://github.com/huggingface/datasets/tree/master/metrics) or by running\r\n```python\r\nfrom datasets import list_metrics\r\n\r\nprint(list_metrics())\r\n```", "Ah, my mistake. Thanks for correcting" ]
1,622,998,914,000
1,623,062,775,000
1,623,062,775,000
NONE
null
Hi, I'm having the following issue when I try to load the `blue` metric. ```shell import datasets metric = datasets.load_metric('blue') Traceback (most recent call last): File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 320, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/1.7.0/metrics/blue/blue.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 332, in prepare_module local_path = cached_path(file_path, download_config=download_config) File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 291, in cached_path use_auth_token=download_config.use_auth_token, File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/huggingface/datasets/master/metrics/blue/blue.py During handling of the above exception, another exception occurred: Traceback (most recent call last): File "<input>", line 1, in <module> File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 605, in load_metric dataset=False, File "/home/irfan/environments/Perplexity_Transformers/lib/python3.6/site-packages/datasets/load.py", line 343, in prepare_module combined_path, github_file_path FileNotFoundError: Couldn't find file locally at blue/blue.py, or remotely at https://raw.githubusercontent.com/huggingface/datasets/1.7.0/metrics/blue/blue.py. The file is also not present on the master branch on github. ``` Here is dataset installed version info ```shell pip freeze | grep datasets datasets==1.7.0 ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2450/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2450/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2447
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2447/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2447/comments
https://api.github.com/repos/huggingface/datasets/issues/2447/events
https://github.com/huggingface/datasets/issues/2447
912,299,527
MDU6SXNzdWU5MTIyOTk1Mjc=
2,447
dataset adversarial_qa has no answers in the "test" set
{ "login": "bjascob", "id": 22728060, "node_id": "MDQ6VXNlcjIyNzI4MDYw", "avatar_url": "https://avatars.githubusercontent.com/u/22728060?v=4", "gravatar_id": "", "url": "https://api.github.com/users/bjascob", "html_url": "https://github.com/bjascob", "followers_url": "https://api.github.com/users/bjascob/followers", "following_url": "https://api.github.com/users/bjascob/following{/other_user}", "gists_url": "https://api.github.com/users/bjascob/gists{/gist_id}", "starred_url": "https://api.github.com/users/bjascob/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bjascob/subscriptions", "organizations_url": "https://api.github.com/users/bjascob/orgs", "repos_url": "https://api.github.com/users/bjascob/repos", "events_url": "https://api.github.com/users/bjascob/events{/privacy}", "received_events_url": "https://api.github.com/users/bjascob/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! I'm pretty sure that the answers are not made available for the test set on purpose because it is part of the DynaBench benchmark, for which you can submit your predictions on the website.\r\nIn any case we should mention this in the dataset card of this dataset.", "Makes sense, but not intuitive for someone searching through the datasets. Thanks for adding the note to clarify." ]
1,622,905,058,000
1,623,064,387,000
1,623,064,387,000
NONE
null
## Describe the bug When loading the adversarial_qa dataset the 'test' portion has no answers. Only the 'train' and 'validation' portions do. This occurs with all four of the configs ('adversarialQA', 'dbidaf', 'dbert', 'droberta') ## Steps to reproduce the bug ``` from datasets import load_dataset examples = load_dataset('adversarial_qa', 'adversarialQA', script_version="master")['test'] print('Loaded {:,} examples'.format(len(examples))) has_answers = 0 for e in examples: if e['answers']['text']: has_answers += 1 print('{:,} have answers'.format(has_answers)) >>> Loaded 3,000 examples >>> 0 have answers examples = load_dataset('adversarial_qa', 'adversarialQA', script_version="master")['validation'] <...code above...> >>> Loaded 3,000 examples >>> 3,000 have answers ``` ## Expected results If 'test' is a valid dataset, it should have answers. Also note that all of the 'train' and 'validation' sets have answers, there are no "no answer" questions with this set (not sure if this is correct or not). ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.8.0-53-generic-x86_64-with-glibc2.29 - Python version: 3.8.5 - PyArrow version: 1.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2447/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2447/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2446
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2446/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2446/comments
https://api.github.com/repos/huggingface/datasets/issues/2446/events
https://github.com/huggingface/datasets/issues/2446
911,635,399
MDU6SXNzdWU5MTE2MzUzOTk=
2,446
`yelp_polarity` is broken
{ "login": "JetRunner", "id": 22514219, "node_id": "MDQ6VXNlcjIyNTE0MjE5", "avatar_url": "https://avatars.githubusercontent.com/u/22514219?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JetRunner", "html_url": "https://github.com/JetRunner", "followers_url": "https://api.github.com/users/JetRunner/followers", "following_url": "https://api.github.com/users/JetRunner/following{/other_user}", "gists_url": "https://api.github.com/users/JetRunner/gists{/gist_id}", "starred_url": "https://api.github.com/users/JetRunner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JetRunner/subscriptions", "organizations_url": "https://api.github.com/users/JetRunner/orgs", "repos_url": "https://api.github.com/users/JetRunner/repos", "events_url": "https://api.github.com/users/JetRunner/events{/privacy}", "received_events_url": "https://api.github.com/users/JetRunner/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "```\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/script_runner.py\", line 332, in _run_script\r\n exec(code, module.__dict__)\r\nFile \"/home/sasha/nlp-viewer/run.py\", line 233, in <module>\r\n configs = get_confs(option)\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/caching.py\", line 604, in wrapped_func\r\n return get_or_create_cached_value()\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/streamlit/caching.py\", line 588, in get_or_create_cached_value\r\n return_value = func(*args, **kwargs)\r\nFile \"/home/sasha/nlp-viewer/run.py\", line 148, in get_confs\r\n builder_cls = nlp.load.import_main_class(module_path[0], dataset=True)\r\nFile \"/home/sasha/.local/share/virtualenvs/lib-ogGKnCK_/lib/python3.7/site-packages/datasets/load.py\", line 85, in import_main_class\r\n module = importlib.import_module(module_path)\r\nFile \"/usr/lib/python3.7/importlib/__init__.py\", line 127, in import_module\r\n return _bootstrap._gcd_import(name[level:], package, level)\r\nFile \"<frozen importlib._bootstrap>\", line 1006, in _gcd_import\r\nFile \"<frozen importlib._bootstrap>\", line 983, in _find_and_load\r\nFile \"<frozen importlib._bootstrap>\", line 967, in _find_and_load_unlocked\r\nFile \"<frozen importlib._bootstrap>\", line 677, in _load_unlocked\r\nFile \"<frozen importlib._bootstrap_external>\", line 728, in exec_module\r\nFile \"<frozen importlib._bootstrap>\", line 219, in _call_with_frames_removed\r\nFile \"/home/sasha/.cache/huggingface/modules/datasets_modules/datasets/yelp_polarity/a770787b2526bdcbfc29ac2d9beb8e820fbc15a03afd3ebc4fb9d8529de57544/yelp_polarity.py\", line 36, in <module>\r\n from datasets.tasks import TextClassification\r\n```", "Solved by updating the `nlpviewer`" ]
1,622,821,469,000
1,622,833,007,000
1,622,833,007,000
MEMBER
null
![image](https://user-images.githubusercontent.com/22514219/120828150-c4a35b00-c58e-11eb-8083-a537cee4dbb3.png)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2446/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2446/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2444
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2444/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2444/comments
https://api.github.com/repos/huggingface/datasets/issues/2444/events
https://github.com/huggingface/datasets/issues/2444
911,297,139
MDU6SXNzdWU5MTEyOTcxMzk=
2,444
Sentence Boundaries missing in Dataset: xtreme / udpos
{ "login": "jerryIsHere", "id": 50871412, "node_id": "MDQ6VXNlcjUwODcxNDEy", "avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jerryIsHere", "html_url": "https://github.com/jerryIsHere", "followers_url": "https://api.github.com/users/jerryIsHere/followers", "following_url": "https://api.github.com/users/jerryIsHere/following{/other_user}", "gists_url": "https://api.github.com/users/jerryIsHere/gists{/gist_id}", "starred_url": "https://api.github.com/users/jerryIsHere/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jerryIsHere/subscriptions", "organizations_url": "https://api.github.com/users/jerryIsHere/orgs", "repos_url": "https://api.github.com/users/jerryIsHere/repos", "events_url": "https://api.github.com/users/jerryIsHere/events{/privacy}", "received_events_url": "https://api.github.com/users/jerryIsHere/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi,\r\n\r\nThis is a known issue. More info on this issue can be found in #2061. If you are looking for an open-source contribution, there are step-by-step instructions in the linked issue that you can follow to fix it.", "Closed by #2466." ]
1,622,797,826,000
1,624,017,223,000
1,624,017,223,000
CONTRIBUTOR
null
I was browsing through annotation guidelines, as suggested by the datasets introduction. The guidlines saids "There must be exactly one blank line after every sentence, including the last sentence in the file. Empty sentences are not allowed." in the [Sentence Boundaries and Comments section](https://universaldependencies.org/format.html#sentence-boundaries-and-comments) But the sentence boundaries seems not to be represented by huggingface datasets features well. I found out that multiple sentence are concatenated together as a 1D array, without any delimiter. PAN-x, which is another token classification subset from xtreme do represent the sentence boundary using a 2D array. You may compare in PAN-x.en and udpos.English in the explorer: https://huggingface.co/datasets/viewer/?dataset=xtreme
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2444/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2444/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2443
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2443/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2443/comments
https://api.github.com/repos/huggingface/datasets/issues/2443/events
https://github.com/huggingface/datasets/issues/2443
909,983,574
MDU6SXNzdWU5MDk5ODM1NzQ=
2,443
Some tests hang on Windows
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! That would be nice indeed to at least have a warning, since we don't handle the max path length limit.\r\nAlso if we could have an error instead of an infinite loop I'm sure windows users will appreciate that", "Unfortunately, I know this problem very well... 😅 \r\n\r\nI remember having proposed to throw an error instead of hanging in an infinite loop #2220: 60c7d1b6b71469599a27147a08100f594e7a3f84, 8c8ab60018b00463edf1eca500e434ff061546fc \r\nbut @lhoestq told me:\r\n> Note that the filelock module comes from this project that hasn't changed in years - while still being used by ten of thousands of projects:\r\nhttps://github.com/benediktschmitt/py-filelock\r\n> \r\n> Unless we have proper tests for this, I wouldn't recommend to change it\r\n\r\nI opened an Issue requesting a warning/error at startup for that case: #2224", "@albertvillanova Thanks for additional info on this issue.\r\n\r\nYes, I think the best option is to throw an error instead of suppressing it in a loop. I've considered 2 more options, but I don't really like them:\r\n1. create a temporary file with a filename longer than 255 characters on import; if this fails, long paths are not enabled and raise a warning. I'm not sure about this approach because I don't like the idea of creating a temporary file on import for this purpose.\r\n2. check if long paths are enabled with [this code](https://stackoverflow.com/a/46546731/14095927). As mentioned in the comment, this code relies on an undocumented function and Win10-specific." ]
1,622,680,050,000
1,624,870,059,000
1,624,870,059,000
CONTRIBUTOR
null
Currently, several tests hang on Windows if the max path limit of 260 characters is not disabled. This happens due to the changes introduced by #2223 that cause an infinite loop in `WindowsFileLock` described in #2220. This can be very tricky to debug, so I think now is a good time to address these issues/PRs. IMO throwing an error is too harsh, but maybe we can emit a warning in the top-level `__init__.py ` on startup if long paths are not enabled.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2443/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2443/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2441
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2441/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2441/comments
https://api.github.com/repos/huggingface/datasets/issues/2441/events
https://github.com/huggingface/datasets/issues/2441
908,554,713
MDU6SXNzdWU5MDg1NTQ3MTM=
2,441
DuplicatedKeysError on personal dataset
{ "login": "lucaguarro", "id": 22605313, "node_id": "MDQ6VXNlcjIyNjA1MzEz", "avatar_url": "https://avatars.githubusercontent.com/u/22605313?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lucaguarro", "html_url": "https://github.com/lucaguarro", "followers_url": "https://api.github.com/users/lucaguarro/followers", "following_url": "https://api.github.com/users/lucaguarro/following{/other_user}", "gists_url": "https://api.github.com/users/lucaguarro/gists{/gist_id}", "starred_url": "https://api.github.com/users/lucaguarro/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lucaguarro/subscriptions", "organizations_url": "https://api.github.com/users/lucaguarro/orgs", "repos_url": "https://api.github.com/users/lucaguarro/repos", "events_url": "https://api.github.com/users/lucaguarro/events{/privacy}", "received_events_url": "https://api.github.com/users/lucaguarro/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! In your dataset script you must be yielding examples like\r\n```python\r\nfor line in file:\r\n ...\r\n yield key, {...}\r\n```\r\n\r\nSince `datasets` 1.7.0 we enforce the keys to be unique.\r\nHowever it looks like your examples generator creates duplicate keys: at least two examples have key 0.\r\n\r\nYou can fix that by making sure that your keys are unique.\r\n\r\nFor example if you use a counter to define the key of each example, make sure that your counter is not reset to 0 in during examples generation (between two open files for examples).\r\n\r\nLet me know if you have other questions :)", "Yup, I indeed was generating duplicate keys. Fixed it and now it's working." ]
1,622,570,381,000
1,622,850,603,000
1,622,850,603,000
NONE
null
## Describe the bug Ever since today, I have been getting a DuplicatedKeysError while trying to load my dataset from my own script. Error returned when running this line: `dataset = load_dataset('/content/drive/MyDrive/Thesis/Datasets/book_preprocessing/goodreads_maharjan_trimmed_and_nered/goodreadsnered.py')` Note that my script was working fine with earlier versions of the Datasets library. Cannot say with 100% certainty if I have been doing something wrong with my dataset script this whole time or if this is simply a bug with the new version of datasets. ## Steps to reproduce the bug I cannot provide code to reproduce the error as I am working with my own dataset. I can however provide my script if requested. ## Expected results For my data to be loaded. ## Actual results **DuplicatedKeysError** exception is raised ``` Downloading and preparing dataset good_reads_practice_dataset/main_domain (download: Unknown size, generated: Unknown size, post-processed: Unknown size, total: Unknown size) to /root/.cache/huggingface/datasets/good_reads_practice_dataset/main_domain/1.1.0/64ff7c3fee2693afdddea75002eb6887d4fedc3d812ae3622128c8504ab21655... --------------------------------------------------------------------------- DuplicatedKeysError Traceback (most recent call last) <ipython-input-6-c342ea0dae9d> in <module>() ----> 1 dataset = load_dataset('/content/drive/MyDrive/Thesis/Datasets/book_preprocessing/goodreads_maharjan_trimmed_and_nered/goodreadsnered.py') 5 frames /usr/local/lib/python3.7/dist-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, task, **config_kwargs) 749 try_from_hf_gcs=try_from_hf_gcs, 750 base_path=base_path, --> 751 use_auth_token=use_auth_token, 752 ) 753 /usr/local/lib/python3.7/dist-packages/datasets/builder.py in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 573 if not downloaded_from_gcs: 574 self._download_and_prepare( --> 575 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 576 ) 577 # Sync info /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 650 try: 651 # Prepare split will record examples associated to the split --> 652 self._prepare_split(split_generator, **prepare_split_kwargs) 653 except OSError as e: 654 raise OSError( /usr/local/lib/python3.7/dist-packages/datasets/builder.py in _prepare_split(self, split_generator) 990 writer.write(example, key) 991 finally: --> 992 num_examples, num_bytes = writer.finalize() 993 994 split_generator.split_info.num_examples = num_examples /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in finalize(self, close_stream) 407 # In case current_examples < writer_batch_size, but user uses finalize() 408 if self._check_duplicates: --> 409 self.check_duplicate_keys() 410 # Re-intializing to empty list for next batch 411 self.hkey_record = [] /usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in check_duplicate_keys(self) 347 for hash, key in self.hkey_record: 348 if hash in tmp_record: --> 349 raise DuplicatedKeysError(key) 350 else: 351 tmp_record.add(hash) DuplicatedKeysError: FAILURE TO GENERATE DATASET ! Found duplicate Key: 0 Keys should be unique and deterministic in nature ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.7.0 - Platform: Windows-10-10.0.19041-SP0 - Python version: 3.7.9 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2441/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2441/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2440
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2440/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2440/comments
https://api.github.com/repos/huggingface/datasets/issues/2440/events
https://github.com/huggingface/datasets/issues/2440
908,521,954
MDU6SXNzdWU5MDg1MjE5NTQ=
2,440
Remove `extended` field from dataset tagger
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "The tagger also doesn't insert the value for the `size_categories` field automatically, so this should be fixed too", "Thanks for reporting. Indeed the `extended` tag doesn't exist. Not sure why we had that in the tagger.\r\nThe repo of the tagger is here if someone wants to give this a try: https://github.com/huggingface/datasets-tagging\r\nOtherwise I can probably fix it next week", "I've opened a PR on `datasets-tagging` to fix the issue 🚀 ", "thanks ! this is fixed now" ]
1,622,567,922,000
1,623,229,591,000
1,623,229,590,000
MEMBER
null
## Describe the bug While working on #2435 I used the [dataset tagger](https://huggingface.co/datasets/tagging/) to generate the missing tags for the YAML metadata of each README.md file. However, it seems that our CI raises an error when the `extended` field is included: ``` dataset_name = 'arcd' @pytest.mark.parametrize("dataset_name", get_changed_datasets(repo_path)) def test_changed_dataset_card(dataset_name): card_path = repo_path / "datasets" / dataset_name / "README.md" assert card_path.exists() error_messages = [] try: ReadMe.from_readme(card_path) except Exception as readme_error: error_messages.append(f"The following issues have been found in the dataset cards:\nREADME:\n{readme_error}") try: DatasetMetadata.from_readme(card_path) except Exception as metadata_error: error_messages.append( f"The following issues have been found in the dataset cards:\nYAML tags:\n{metadata_error}" ) if error_messages: > raise ValueError("\n".join(error_messages)) E ValueError: The following issues have been found in the dataset cards: E YAML tags: E __init__() got an unexpected keyword argument 'extended' tests/test_dataset_cards.py:70: ValueError ``` Consider either removing this tag from the tagger or including it as part of the validation step in the CI. cc @yjernite
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2440/reactions", "total_count": 1, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 1 }
https://api.github.com/repos/huggingface/datasets/issues/2440/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2434
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2434/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2434/comments
https://api.github.com/repos/huggingface/datasets/issues/2434/events
https://github.com/huggingface/datasets/issues/2434
907,503,557
MDU6SXNzdWU5MDc1MDM1NTc=
2,434
Extend QuestionAnsweringExtractive template to handle nested columns
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
null
[]
null
[ "this is also the case for the following datasets and configurations:\r\n\r\n* `mlqa` with config `mlqa-translate-train.ar`\r\n\r\n" ]
1,622,470,011,000
1,623,918,090,000
null
MEMBER
null
Currently the `QuestionAnsweringExtractive` task template and `preprare_for_task` only support "flat" features. We should extend the functionality to cover QA datasets like: * `iapp_wiki_qa_squad` * `parsinlu_reading_comprehension` where the nested features differ with those from `squad` and trigger an `ArrowNotImplementedError`: ``` --------------------------------------------------------------------------- ArrowNotImplementedError Traceback (most recent call last) <ipython-input-12-50e5b8f69c20> in <module> ----> 1 ds.prepare_for_task("question-answering-extractive")[0] ~/git/datasets/src/datasets/arrow_dataset.py in prepare_for_task(self, task) 1436 # We found a template so now flush `DatasetInfo` to skip the template update in `DatasetInfo.__post_init__` 1437 dataset.info.task_templates = None -> 1438 dataset = dataset.cast(features=template.features) 1439 return dataset 1440 ~/git/datasets/src/datasets/arrow_dataset.py in cast(self, features, batch_size, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, num_proc) 977 format = self.format 978 dataset = self.with_format("arrow") --> 979 dataset = dataset.map( 980 lambda t: t.cast(schema), 981 batched=True, ~/git/datasets/src/datasets/arrow_dataset.py in map(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, num_proc, suffix_template, new_fingerprint, desc) 1600 1601 if num_proc is None or num_proc == 1: -> 1602 return self._map_single( 1603 function=function, 1604 with_indices=with_indices, ~/git/datasets/src/datasets/arrow_dataset.py in wrapper(*args, **kwargs) 176 } 177 # apply actual function --> 178 out: Union["Dataset", "DatasetDict"] = func(self, *args, **kwargs) 179 datasets: List["Dataset"] = list(out.values()) if isinstance(out, dict) else [out] 180 # re-apply format to the output ~/git/datasets/src/datasets/fingerprint.py in wrapper(*args, **kwargs) 395 # Call actual function 396 --> 397 out = func(self, *args, **kwargs) 398 399 # Update fingerprint of in-place transforms + update in-place history of transforms ~/git/datasets/src/datasets/arrow_dataset.py in _map_single(self, function, with_indices, input_columns, batched, batch_size, drop_last_batch, remove_columns, keep_in_memory, load_from_cache_file, cache_file_name, writer_batch_size, features, disable_nullable, fn_kwargs, new_fingerprint, rank, offset, desc) 1940 ) # Something simpler? 1941 try: -> 1942 batch = apply_function_on_filtered_inputs( 1943 batch, 1944 indices, ~/git/datasets/src/datasets/arrow_dataset.py in apply_function_on_filtered_inputs(inputs, indices, check_same_num_examples, offset) 1836 effective_indices = [i + offset for i in indices] if isinstance(indices, list) else indices + offset 1837 processed_inputs = ( -> 1838 function(*fn_args, effective_indices, **fn_kwargs) if with_indices else function(*fn_args, **fn_kwargs) 1839 ) 1840 if update_data is None: ~/git/datasets/src/datasets/arrow_dataset.py in <lambda>(t) 978 dataset = self.with_format("arrow") 979 dataset = dataset.map( --> 980 lambda t: t.cast(schema), 981 batched=True, 982 batch_size=batch_size, ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/table.pxi in pyarrow.lib.Table.cast() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/table.pxi in pyarrow.lib.ChunkedArray.cast() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/compute.py in cast(arr, target_type, safe) 241 else: 242 options = CastOptions.unsafe(target_type) --> 243 return call_function("cast", [arr], options) 244 245 ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/_compute.pyx in pyarrow._compute.call_function() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/_compute.pyx in pyarrow._compute.Function.call() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/error.pxi in pyarrow.lib.pyarrow_internal_check_status() ~/miniconda3/envs/datasets/lib/python3.8/site-packages/pyarrow/error.pxi in pyarrow.lib.check_status() ArrowNotImplementedError: Unsupported cast from struct<answer_end: list<item: int32>, answer_start: list<item: int32>, text: list<item: string>> to struct using function cast_struct ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2434/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2434/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2431
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2431/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2431/comments
https://api.github.com/repos/huggingface/datasets/issues/2431/events
https://github.com/huggingface/datasets/issues/2431
907,413,691
MDU6SXNzdWU5MDc0MTM2OTE=
2,431
DuplicatedKeysError when trying to load adversarial_qa
{ "login": "hanss0n", "id": 21348833, "node_id": "MDQ6VXNlcjIxMzQ4ODMz", "avatar_url": "https://avatars.githubusercontent.com/u/21348833?v=4", "gravatar_id": "", "url": "https://api.github.com/users/hanss0n", "html_url": "https://github.com/hanss0n", "followers_url": "https://api.github.com/users/hanss0n/followers", "following_url": "https://api.github.com/users/hanss0n/following{/other_user}", "gists_url": "https://api.github.com/users/hanss0n/gists{/gist_id}", "starred_url": "https://api.github.com/users/hanss0n/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/hanss0n/subscriptions", "organizations_url": "https://api.github.com/users/hanss0n/orgs", "repos_url": "https://api.github.com/users/hanss0n/repos", "events_url": "https://api.github.com/users/hanss0n/events{/privacy}", "received_events_url": "https://api.github.com/users/hanss0n/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Thanks for reporting !\r\n#2433 fixed the issue, thanks @mariosasko :)\r\n\r\nWe'll do a patch release soon of the library.\r\nIn the meantime, you can use the fixed version of adversarial_qa by adding `script_version=\"master\"` in `load_dataset`" ]
1,622,463,079,000
1,622,537,643,000
1,622,537,531,000
NONE
null
## Describe the bug A clear and concise description of what the bug is. ## Steps to reproduce the bug ```python dataset = load_dataset('adversarial_qa', 'adversarialQA') ``` ## Expected results The dataset should be loaded into memory ## Actual results >DuplicatedKeysError: FAILURE TO GENERATE DATASET ! >Found duplicate Key: 4d3cb5677211ee32895ca9c66dad04d7152254d4 >Keys should be unique and deterministic in nature > > >During handling of the above exception, another exception occurred: > >DuplicatedKeysError Traceback (most recent call last) > >/usr/local/lib/python3.7/dist-packages/datasets/arrow_writer.py in check_duplicate_keys(self) > 347 for hash, key in self.hkey_record: > 348 if hash in tmp_record: >--> 349 raise DuplicatedKeysError(key) > 350 else: > 351 tmp_record.add(hash) > >DuplicatedKeysError: FAILURE TO GENERATE DATASET ! >Found duplicate Key: 4d3cb5677211ee32895ca9c66dad04d7152254d4 >Keys should be unique and deterministic in nature ## Environment info - `datasets` version: 1.7.0 - Platform: Linux-5.4.109+-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2431/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2431/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2426
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2426/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2426/comments
https://api.github.com/repos/huggingface/datasets/issues/2426/events
https://github.com/huggingface/datasets/issues/2426
906,473,546
MDU6SXNzdWU5MDY0NzM1NDY=
2,426
Saving Graph/Structured Data in Datasets
{ "login": "gsh199449", "id": 3295342, "node_id": "MDQ6VXNlcjMyOTUzNDI=", "avatar_url": "https://avatars.githubusercontent.com/u/3295342?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gsh199449", "html_url": "https://github.com/gsh199449", "followers_url": "https://api.github.com/users/gsh199449/followers", "following_url": "https://api.github.com/users/gsh199449/following{/other_user}", "gists_url": "https://api.github.com/users/gsh199449/gists{/gist_id}", "starred_url": "https://api.github.com/users/gsh199449/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gsh199449/subscriptions", "organizations_url": "https://api.github.com/users/gsh199449/orgs", "repos_url": "https://api.github.com/users/gsh199449/repos", "events_url": "https://api.github.com/users/gsh199449/events{/privacy}", "received_events_url": "https://api.github.com/users/gsh199449/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
closed
false
null
[]
null
[ "It should probably work out of the box to save structured data. If you want to show an example we can help you.", "An example of a toy dataset is like:\r\n```json\r\n[\r\n {\r\n \"name\": \"mike\",\r\n \"friends\": [\r\n \"tom\",\r\n \"lily\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"aaaaa\",\r\n \"reader\": [\r\n \"tom\",\r\n \"lucy\"\r\n ]\r\n }\r\n ]\r\n },\r\n {\r\n \"name\": \"tom\",\r\n \"friends\": [\r\n \"mike\",\r\n \"bbb\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"xxxxx\",\r\n \"reader\": [\r\n \"tom\",\r\n \"qqqq\"\r\n ]\r\n }\r\n ]\r\n }\r\n]\r\n```\r\nWe can use the friendship relation to build a directional graph, and a user node can be represented using the articles written by himself. And the relationship between articles can be built when the article has read by the same user.\r\nThis dataset can be used to model the heterogeneous relationship between users and articles, and this graph can be used to build recommendation systems to recommend articles to the user, or potential friends to the user.", "Hi,\r\n\r\nyou can do the following to load this data into a `Dataset`:\r\n```python\r\nfrom datasets import Dataset\r\nexamples = [\r\n {\r\n \"name\": \"mike\",\r\n \"friends\": [\r\n \"tom\",\r\n \"lily\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"aaaaa\",\r\n \"reader\": [\r\n \"tom\",\r\n \"lucy\"\r\n ]\r\n }\r\n ]\r\n },\r\n {\r\n \"name\": \"tom\",\r\n \"friends\": [\r\n \"mike\",\r\n \"bbb\"\r\n ],\r\n \"articles\": [\r\n {\r\n \"title\": \"xxxxx\",\r\n \"reader\": [\r\n \"tom\",\r\n \"qqqq\"\r\n ]\r\n }\r\n ]\r\n }\r\n]\r\n\r\nkeys = examples[0].keys()\r\nvalues = [ex.values() for ex in examples]\r\ndataset = Dataset.from_dict({k: list(v) for k, v in zip(keys, zip(*values))})\r\n```\r\n\r\nLet us know if this works for you.", "Thank you so much, and that works! I also have a question that if the dataset is very large, that cannot be loaded into the memory. How to create the Dataset?", "If your dataset doesn't fit in memory, store it in a local file and load it from there. Check out [this chapter](https://huggingface.co/docs/datasets/master/loading_datasets.html#from-local-files) in the docs for more info.", "Nice! Thanks for your help." ]
1,622,295,321,000
1,622,596,863,000
1,622,596,863,000
NONE
null
Thanks for this amazing library! And my question is I have structured data that is organized with a graph. For example, a dataset with users' friendship relations and user's articles. When I try to save a python dict in the dataset, an error occurred ``did not recognize Python value type when inferring an Arrow data type''. Although I also know that storing a python dict in pyarrow datasets is not the best practice, but I have no idea about how to save structured data in the Datasets. Thank you very much for your help.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2426/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2426/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2424
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2424/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2424/comments
https://api.github.com/repos/huggingface/datasets/issues/2424/events
https://github.com/huggingface/datasets/issues/2424
906,193,679
MDU6SXNzdWU5MDYxOTM2Nzk=
2,424
load_from_disk and save_to_disk are not compatible with each other
{ "login": "roholazandie", "id": 7584674, "node_id": "MDQ6VXNlcjc1ODQ2NzQ=", "avatar_url": "https://avatars.githubusercontent.com/u/7584674?v=4", "gravatar_id": "", "url": "https://api.github.com/users/roholazandie", "html_url": "https://github.com/roholazandie", "followers_url": "https://api.github.com/users/roholazandie/followers", "following_url": "https://api.github.com/users/roholazandie/following{/other_user}", "gists_url": "https://api.github.com/users/roholazandie/gists{/gist_id}", "starred_url": "https://api.github.com/users/roholazandie/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/roholazandie/subscriptions", "organizations_url": "https://api.github.com/users/roholazandie/orgs", "repos_url": "https://api.github.com/users/roholazandie/repos", "events_url": "https://api.github.com/users/roholazandie/events{/privacy}", "received_events_url": "https://api.github.com/users/roholazandie/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "Hi,\r\n\r\n`load_dataset` returns an instance of `DatasetDict` if `split` is not specified, so instead of `Dataset.load_from_disk`, use `DatasetDict.load_from_disk` to load the dataset from disk.", "Thanks it worked!", "Though I see a stream of issues open by people lost between datasets and datasets dicts so maybe there is here something that could be better in terms of UX. Could be better error handling or something else smarter to even avoid said errors but maybe we should think about this. Reopening to use this issue as a discussion place but feel free to open a new open if you prefer @lhoestq @albertvillanova ", "We should probably improve the error message indeed.\r\n\r\nAlso note that there exists a function `load_from_disk` that can load a Dataset or a DatasetDict. Under the hood it calls either `Dataset.load_from_disk` or `DatasetDict.load_from_disk`:\r\n\r\n\r\n```python\r\nfrom datasets import load_from_disk\r\n\r\ndataset_dict = load_from_disk(\"path/to/dataset/dict\")\r\nsingle_dataset = load_from_disk(\"path/to/single/dataset\")\r\n```", "I just opened #2437 to improve the error message", "Superseded by #2462 " ]
1,622,243,230,000
1,623,180,152,000
1,623,180,152,000
NONE
null
## Describe the bug load_from_disk and save_to_disk are not compatible. When I use save_to_disk to save a dataset to disk it works perfectly but given the same directory load_from_disk throws an error that it can't find state.json. looks like the load_from_disk only works on one split ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset("art") dataset.save_to_disk("mydir") d = Dataset.load_from_disk("mydir") ``` ## Expected results It is expected that these two functions be the reverse of each other without more manipulation ## Actual results FileNotFoundError: [Errno 2] No such file or directory: 'mydir/art/state.json' ## Environment info - `datasets` version: 1.6.2 - Platform: Linux-5.4.0-73-generic-x86_64-with-Ubuntu-18.04-bionic - Python version: 3.7.10 - PyTorch version (GPU?): 1.8.1+cu102 (True) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: <fill in> - Using distributed or parallel set-up in script?: <fill in>
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2424/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2424/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2415
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2415/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2415/comments
https://api.github.com/repos/huggingface/datasets/issues/2415/events
https://github.com/huggingface/datasets/issues/2415
903,923,097
MDU6SXNzdWU5MDM5MjMwOTc=
2,415
Cached dataset not loaded
{ "login": "borisdayma", "id": 715491, "node_id": "MDQ6VXNlcjcxNTQ5MQ==", "avatar_url": "https://avatars.githubusercontent.com/u/715491?v=4", "gravatar_id": "", "url": "https://api.github.com/users/borisdayma", "html_url": "https://github.com/borisdayma", "followers_url": "https://api.github.com/users/borisdayma/followers", "following_url": "https://api.github.com/users/borisdayma/following{/other_user}", "gists_url": "https://api.github.com/users/borisdayma/gists{/gist_id}", "starred_url": "https://api.github.com/users/borisdayma/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/borisdayma/subscriptions", "organizations_url": "https://api.github.com/users/borisdayma/orgs", "repos_url": "https://api.github.com/users/borisdayma/repos", "events_url": "https://api.github.com/users/borisdayma/events{/privacy}", "received_events_url": "https://api.github.com/users/borisdayma/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "It actually seems to happen all the time in above configuration:\r\n* the function `filter_by_duration` correctly loads cached processed dataset\r\n* the function `prepare_dataset` is always reexecuted\r\n\r\nI end up solving the issue by saving to disk my dataset at the end but I'm still wondering if it's a bug or limitation here.", "Hi ! The hash used for caching `map` results is the fingerprint of the resulting dataset. It is computed using three things:\r\n- the old fingerprint of the dataset\r\n- the hash of the function\r\n- the hash of the other parameters passed to `map`\r\n\r\nYou can compute the hash of your function (or any python object) with\r\n```python\r\nfrom datasets.fingerprint import Hasher\r\n\r\nmy_func = lambda x: x + 1\r\nprint(Hasher.hash(my_func))\r\n```\r\n\r\nIf `prepare_dataset` is always executed, maybe this is because your `processor` has a different hash each time you want to execute it.", "> If `prepare_dataset` is always executed, maybe this is because your `processor` has a different hash each time you want to execute it.\r\n\r\nYes I think that was the issue.\r\n\r\nFor the hash of the function:\r\n* does it consider just the name or the actual code of the function\r\n* does it consider variables that are not passed explicitly as parameters to the functions (such as the processor here)", "> does it consider just the name or the actual code of the function\r\n\r\nIt looks at the name and the actual code and all variables such as recursively. It uses `dill` to do so, which is based on `pickle`.\r\nBasically the hash is computed using the pickle bytes of your function (computed using `dill` to support most python objects).\r\n\r\n> does it consider variables that are not passed explicitly as parameters to the functions (such as the processor here)\r\n\r\nYes it does thanks to recursive pickling.", "Thanks for these explanations. I'm closing the issue." ]
1,622,130,006,000
1,622,639,747,000
1,622,639,747,000
CONTRIBUTOR
null
## Describe the bug I have a large dataset (common_voice, english) where I use several map and filter functions. Sometimes my cached datasets after specific functions are not loaded. I always use the same arguments, same functions, no seed… ## Steps to reproduce the bug ```python def filter_by_duration(batch): return ( batch["duration"] <= 10 and batch["duration"] >= 1 and len(batch["target_text"]) > 5 ) def prepare_dataset(batch): batch["input_values"] = processor( batch["speech"], sampling_rate=batch["sampling_rate"][0] ).input_values with processor.as_target_processor(): batch["labels"] = processor(batch["target_text"]).input_ids return batch train_dataset = train_dataset.filter( filter_by_duration, remove_columns=["duration"], num_proc=data_args.preprocessing_num_workers, ) # PROBLEM HERE -> below function is reexecuted and cache is not loaded train_dataset = train_dataset.map( prepare_dataset, remove_columns=train_dataset.column_names, batch_size=training_args.per_device_train_batch_size, batched=True, num_proc=data_args.preprocessing_num_workers, ) # Later in script set_caching_enabled(False) # apply map on trained model to eval/test sets ``` ## Expected results The cached dataset should always be reloaded. ## Actual results The function is reexecuted. I have access to cached files `cache-xxxxx.arrow`. Is there a way I can somehow load manually 2 versions and see how the hash was created for debug purposes (to know if it's an issue with dataset or function)? ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Linux-5.8.0-45-generic-x86_64-with-glibc2.29 - Python version: 3.8.5 - PyTorch version (GPU?): 1.8.1+cu102 (True) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: Yes - Using distributed or parallel set-up in script?: No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2415/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2415/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2413
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2413/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2413/comments
https://api.github.com/repos/huggingface/datasets/issues/2413/events
https://github.com/huggingface/datasets/issues/2413
903,777,557
MDU6SXNzdWU5MDM3Nzc1NTc=
2,413
AttributeError: 'DatasetInfo' object has no attribute 'task_templates'
{ "login": "jungwhank", "id": 53588015, "node_id": "MDQ6VXNlcjUzNTg4MDE1", "avatar_url": "https://avatars.githubusercontent.com/u/53588015?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jungwhank", "html_url": "https://github.com/jungwhank", "followers_url": "https://api.github.com/users/jungwhank/followers", "following_url": "https://api.github.com/users/jungwhank/following{/other_user}", "gists_url": "https://api.github.com/users/jungwhank/gists{/gist_id}", "starred_url": "https://api.github.com/users/jungwhank/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jungwhank/subscriptions", "organizations_url": "https://api.github.com/users/jungwhank/orgs", "repos_url": "https://api.github.com/users/jungwhank/repos", "events_url": "https://api.github.com/users/jungwhank/events{/privacy}", "received_events_url": "https://api.github.com/users/jungwhank/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi ! Can you try using a more up-to-date version ? We added the task_templates in `datasets` 1.7.0.\r\n\r\nIdeally when you're working on new datasets, you should install and use the local version of your fork of `datasets`. Here I think you tried to run the 1.7.0 tests with the 1.6.2 code" ]
1,622,123,068,000
1,622,509,547,000
1,622,509,547,000
CONTRIBUTOR
null
## Describe the bug Hello, I'm trying to add dataset and contribute, but test keep fail with below cli. ` RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_<my_dataset>` ## Steps to reproduce the bug It seems like a bug when I see an error with the existing dataset, not the dataset I'm trying to add. ` RUN_SLOW=1 pytest tests/test_dataset_common.py::LocalDatasetTest::test_load_dataset_all_configs_<any_dataset>` ## Expected results All test passed ## Actual results ``` # check that dataset is not empty self.parent.assertListEqual(sorted(dataset_builder.info.splits.keys()), sorted(dataset)) for split in dataset_builder.info.splits.keys(): # check that loaded datset is not empty self.parent.assertTrue(len(dataset[split]) > 0) # check that we can cast features for each task template > task_templates = dataset_builder.info.task_templates E AttributeError: 'DatasetInfo' object has no attribute 'task_templates' tests/test_dataset_common.py:175: AttributeError ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Darwin-20.4.0-x86_64-i386-64bit - Python version: 3.7.7 - PyTorch version (GPU?): 1.7.0 (False) - Tensorflow version (GPU?): 2.3.0 (False) - Using GPU in script?: No - Using distributed or parallel set-up in script?: No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2413/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2413/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2412
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2412/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2412/comments
https://api.github.com/repos/huggingface/datasets/issues/2412/events
https://github.com/huggingface/datasets/issues/2412
903,769,151
MDU6SXNzdWU5MDM3NjkxNTE=
2,412
Docstring mistake: dataset vs. metric
{ "login": "PhilipMay", "id": 229382, "node_id": "MDQ6VXNlcjIyOTM4Mg==", "avatar_url": "https://avatars.githubusercontent.com/u/229382?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PhilipMay", "html_url": "https://github.com/PhilipMay", "followers_url": "https://api.github.com/users/PhilipMay/followers", "following_url": "https://api.github.com/users/PhilipMay/following{/other_user}", "gists_url": "https://api.github.com/users/PhilipMay/gists{/gist_id}", "starred_url": "https://api.github.com/users/PhilipMay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PhilipMay/subscriptions", "organizations_url": "https://api.github.com/users/PhilipMay/orgs", "repos_url": "https://api.github.com/users/PhilipMay/repos", "events_url": "https://api.github.com/users/PhilipMay/events{/privacy}", "received_events_url": "https://api.github.com/users/PhilipMay/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[ "> I can provide a PR l8er...\r\n\r\nSee #2425 " ]
1,622,122,751,000
1,622,535,484,000
1,622,535,484,000
CONTRIBUTOR
null
This: https://github.com/huggingface/datasets/blob/d95b95f8cf3cb0cff5f77a675139b584dcfcf719/src/datasets/load.py#L582 Should better be something like: `a metric identifier on HuggingFace AWS bucket (list all available metrics and ids with ``datasets.list_metrics()``)` I can provide a PR l8er...
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2412/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2412/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2407
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2407/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2407/comments
https://api.github.com/repos/huggingface/datasets/issues/2407/events
https://github.com/huggingface/datasets/issues/2407
903,111,755
MDU6SXNzdWU5MDMxMTE3NTU=
2,407
.map() function got an unexpected keyword argument 'cache_file_name'
{ "login": "cindyxinyiwang", "id": 7390482, "node_id": "MDQ6VXNlcjczOTA0ODI=", "avatar_url": "https://avatars.githubusercontent.com/u/7390482?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cindyxinyiwang", "html_url": "https://github.com/cindyxinyiwang", "followers_url": "https://api.github.com/users/cindyxinyiwang/followers", "following_url": "https://api.github.com/users/cindyxinyiwang/following{/other_user}", "gists_url": "https://api.github.com/users/cindyxinyiwang/gists{/gist_id}", "starred_url": "https://api.github.com/users/cindyxinyiwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cindyxinyiwang/subscriptions", "organizations_url": "https://api.github.com/users/cindyxinyiwang/orgs", "repos_url": "https://api.github.com/users/cindyxinyiwang/repos", "events_url": "https://api.github.com/users/cindyxinyiwang/events{/privacy}", "received_events_url": "https://api.github.com/users/cindyxinyiwang/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi @cindyxinyiwang,\r\nDid you try adding `.arrow` after `cache_file_name` argument? Here I think they're expecting something like that only for a cache file:\r\nhttps://github.com/huggingface/datasets/blob/e08362256fb157c0b3038437fc0d7a0bbb50de5c/src/datasets/arrow_dataset.py#L1556-L1558", "Hi ! `cache_file_name` is an argument of the `Dataset.map` method. Can you check that your `dataset` is indeed a `Dataset` object ?\r\n\r\nIf you loaded several splits, then it would actually be a `DatasetDict` (one dataset per split, in a dictionary).\r\nIn this case, since there are several datasets in the dict, the `DatasetDict.map` method requires a `cache_file_names` argument (with an 's'), so that you can provide one file name per split.", "I think you are right. I used cache_file_names={data1: name1, data2: name2} and it works. Thank you!" ]
1,622,080,466,000
1,622,123,200,000
1,622,123,200,000
NONE
null
## Describe the bug I'm trying to save the result of datasets.map() to a specific file, so that I can easily share it among multiple computers without reprocessing the dataset. However, when I try to pass an argument 'cache_file_name' to the .map() function, it throws an error that ".map() function got an unexpected keyword argument 'cache_file_name'". I believe I'm using the latest dataset 1.6.2. Also seems like the document and the actual code indicates there is an argument 'cache_file_name' for the .map() function. Here is the code I use ## Steps to reproduce the bug ```datasets = load_from_disk(dataset_path=my_path) [...] def tokenize_function(examples): return tokenizer(examples[text_column_name]) logger.info("Mapping dataset to tokenized dataset.") tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=True, cache_file_name="my_tokenized_file" ) ``` ## Actual results tokenized_datasets = datasets.map( TypeError: map() got an unexpected keyword argument 'cache_file_name' ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version:1.6.2 - Platform:Linux-4.18.0-193.28.1.el8_2.x86_64-x86_64-with-glibc2.10 - Python version:3.8.5 - PyArrow version:3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2407/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2407/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2406
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2406/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2406/comments
https://api.github.com/repos/huggingface/datasets/issues/2406/events
https://github.com/huggingface/datasets/issues/2406
902,643,844
MDU6SXNzdWU5MDI2NDM4NDQ=
2,406
Add guide on using task templates to documentation
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892871, "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement", "name": "enhancement", "color": "a2eeef", "default": true, "description": "New feature or request" } ]
open
false
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false } ]
null
[]
1,622,046,506,000
1,622,046,506,000
null
MEMBER
null
Once we have a stable API on the text classification and question answering task templates, add a guide on how to use them in the documentation.
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2406/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2406/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2402
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2402/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2402/comments
https://api.github.com/repos/huggingface/datasets/issues/2402/events
https://github.com/huggingface/datasets/issues/2402
900,025,329
MDU6SXNzdWU5MDAwMjUzMjk=
2,402
PermissionError on Windows when using temp dir for caching
{ "login": "mariosasko", "id": 47462742, "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mariosasko", "html_url": "https://github.com/mariosasko", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "repos_url": "https://api.github.com/users/mariosasko/repos", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[]
1,621,891,379,000
1,622,047,169,000
1,622,047,169,000
CONTRIBUTOR
null
Currently, the following code raises a PermissionError on master if working on Windows: ```python # run as a script or call exit() in REPL to initiate the temp dir cleanup from datasets import * d = load_dataset("sst", split="train", keep_in_memory=False) set_caching_enabled(False) d.map(lambda ex: ex) ``` Error stack trace: ``` Traceback (most recent call last): File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\weakref.py", line 624, in _exitfunc f() File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\weakref.py", line 548, in __call__ return info.func(*info.args, **(info.kwargs or {})) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\tempfile.py", line 799, in _cleanup _shutil.rmtree(name) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\shutil.py", line 500, in rmtree return _rmtree_unsafe(path, onerror) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\shutil.py", line 395, in _rmtree_unsafe onerror(os.unlink, fullname, sys.exc_info()) File "C:\Users\Mario\Anaconda3\envs\hf-datasets\lib\shutil.py", line 393, in _rmtree_unsafe os.unlink(fullname) PermissionError: [WinError 5] Access is denied: 'C:\\Users\\Mario\\AppData\\Local\\Temp\\tmp20epyhmq\\cache-87a87ffb5a956e68.arrow' ```
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2402/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2402/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2401
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2401/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2401/comments
https://api.github.com/repos/huggingface/datasets/issues/2401/events
https://github.com/huggingface/datasets/issues/2401
899,910,521
MDU6SXNzdWU4OTk5MTA1MjE=
2,401
load_dataset('natural_questions') fails with "ValueError: External features info don't match the dataset"
{ "login": "jonrbates", "id": 15602718, "node_id": "MDQ6VXNlcjE1NjAyNzE4", "avatar_url": "https://avatars.githubusercontent.com/u/15602718?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jonrbates", "html_url": "https://github.com/jonrbates", "followers_url": "https://api.github.com/users/jonrbates/followers", "following_url": "https://api.github.com/users/jonrbates/following{/other_user}", "gists_url": "https://api.github.com/users/jonrbates/gists{/gist_id}", "starred_url": "https://api.github.com/users/jonrbates/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jonrbates/subscriptions", "organizations_url": "https://api.github.com/users/jonrbates/orgs", "repos_url": "https://api.github.com/users/jonrbates/repos", "events_url": "https://api.github.com/users/jonrbates/events{/privacy}", "received_events_url": "https://api.github.com/users/jonrbates/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false }
[ { "login": "lhoestq", "id": 42851186, "node_id": "MDQ6VXNlcjQyODUxMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lhoestq", "html_url": "https://github.com/lhoestq", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "repos_url": "https://api.github.com/users/lhoestq/repos", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "type": "User", "site_admin": false } ]
null
[ "I faced the similar problem. Downgrading datasets to 1.5.0 fixed it.", "Thanks for reporting, I'm looking into it", "I just opened #2438 to fix this :)", "Hi ! This has been fixed in the 1.8.0 release of `datasets`" ]
1,621,881,533,000
1,623,229,645,000
1,623,229,645,000
NONE
null
## Describe the bug load_dataset('natural_questions') throws ValueError ## Steps to reproduce the bug ```python from datasets import load_dataset datasets = load_dataset('natural_questions', split='validation[:10]') ``` ## Expected results Call to load_dataset returns data. ## Actual results ``` Using custom data configuration default Reusing dataset natural_questions (/mnt/d/huggingface/datasets/natural_questions/default/0.0.2/19bc04755018a3ad02ee74f7045cde4ba9b4162cb64450a87030ab786b123b76) --------------------------------------------------------------------------- ValueError Traceback (most recent call last) <ipython-input-2-d55ab8a8cc1c> in <module> ----> 1 datasets = load_dataset('natural_questions', split='validation[:10]', cache_dir='/mnt/d/huggingface/datasets') ~/miniconda3/lib/python3.8/site-packages/datasets/load.py in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, script_version, use_auth_token, **config_kwargs) 756 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 757 ) --> 758 ds = builder_instance.as_dataset(split=split, ignore_verifications=ignore_verifications, in_memory=keep_in_memory) 759 if save_infos: 760 builder_instance._save_infos() ~/miniconda3/lib/python3.8/site-packages/datasets/builder.py in as_dataset(self, split, run_post_process, ignore_verifications, in_memory) 735 736 # Create a dataset for each of the given splits --> 737 datasets = utils.map_nested( 738 partial( 739 self._build_single_dataset, ~/miniconda3/lib/python3.8/site-packages/datasets/utils/py_utils.py in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, types) 193 # Singleton 194 if not isinstance(data_struct, dict) and not isinstance(data_struct, types): --> 195 return function(data_struct) 196 197 disable_tqdm = bool(logger.getEffectiveLevel() > INFO) ~/miniconda3/lib/python3.8/site-packages/datasets/builder.py in _build_single_dataset(self, split, run_post_process, ignore_verifications, in_memory) 762 763 # Build base dataset --> 764 ds = self._as_dataset( 765 split=split, 766 in_memory=in_memory, ~/miniconda3/lib/python3.8/site-packages/datasets/builder.py in _as_dataset(self, split, in_memory) 838 in_memory=in_memory, 839 ) --> 840 return Dataset(**dataset_kwargs) 841 842 def _post_process(self, dataset: Dataset, resources_paths: Dict[str, str]) -> Optional[Dataset]: ~/miniconda3/lib/python3.8/site-packages/datasets/arrow_dataset.py in __init__(self, arrow_table, info, split, indices_table, fingerprint) 271 assert self._fingerprint is not None, "Fingerprint can't be None in a Dataset object" 272 if self.info.features.type != inferred_features.type: --> 273 raise ValueError( 274 "External features info don't match the dataset:\nGot\n{}\nwith type\n{}\n\nbut expected something like\n{}\nwith type\n{}".format( 275 self.info.features, self.info.features.type, inferred_features, inferred_features.type ValueError: External features info don't match the dataset: Got {'id': Value(dtype='string', id=None), 'document': {'title': Value(dtype='string', id=None), 'url': Value(dtype='string', id=None), 'html': Value(dtype='string', id=None), 'tokens': Sequence(feature={'token': Value(dtype='string', id=None), 'is_html': Value(dtype='bool', id=None)}, length=-1, id=None)}, 'question': {'text': Value(dtype='string', id=None), 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'annotations': Sequence(feature={'id': Value(dtype='string', id=None), 'long_answer': {'start_token': Value(dtype='int64', id=None), 'end_token': Value(dtype='int64', id=None), 'start_byte': Value(dtype='int64', id=None), 'end_byte': Value(dtype='int64', id=None)}, 'short_answers': Sequence(feature={'start_token': Value(dtype='int64', id=None), 'end_token': Value(dtype='int64', id=None), 'start_byte': Value(dtype='int64', id=None), 'end_byte': Value(dtype='int64', id=None), 'text': Value(dtype='string', id=None)}, length=-1, id=None), 'yes_no_answer': ClassLabel(num_classes=2, names=['NO', 'YES'], names_file=None, id=None)}, length=-1, id=None)} with type struct<annotations: struct<id: list<item: string>, long_answer: list<item: struct<start_token: int64, end_token: int64, start_byte: int64, end_byte: int64>>, short_answers: list<item: struct<end_byte: list<item: int64>, end_token: list<item: int64>, start_byte: list<item: int64>, start_token: list<item: int64>, text: list<item: string>>>, yes_no_answer: list<item: int64>>, document: struct<title: string, url: string, html: string, tokens: struct<is_html: list<item: bool>, token: list<item: string>>>, id: string, question: struct<text: string, tokens: list<item: string>>> but expected something like {'id': Value(dtype='string', id=None), 'document': {'html': Value(dtype='string', id=None), 'title': Value(dtype='string', id=None), 'tokens': {'is_html': Sequence(feature=Value(dtype='bool', id=None), length=-1, id=None), 'token': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'url': Value(dtype='string', id=None)}, 'question': {'text': Value(dtype='string', id=None), 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}, 'annotations': {'id': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'long_answer': [{'end_byte': Value(dtype='int64', id=None), 'end_token': Value(dtype='int64', id=None), 'start_byte': Value(dtype='int64', id=None), 'start_token': Value(dtype='int64', id=None)}], 'short_answers': [{'end_byte': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'end_token': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'start_byte': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'start_token': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'text': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)}], 'yes_no_answer': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None)}} with type struct<annotations: struct<id: list<item: string>, long_answer: list<item: struct<end_byte: int64, end_token: int64, start_byte: int64, start_token: int64>>, short_answers: list<item: struct<end_byte: list<item: int64>, end_token: list<item: int64>, start_byte: list<item: int64>, start_token: list<item: int64>, text: list<item: string>>>, yes_no_answer: list<item: int64>>, document: struct<html: string, title: string, tokens: struct<is_html: list<item: bool>, token: list<item: string>>, url: string>, id: string, question: struct<text: string, tokens: list<item: string>>> ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2 - Platform: Linux-5.4.72-microsoft-standard-WSL2-x86_64-with-glibc2.10 - Python version: 3.8.3 - PyTorch version (GPU?): 1.6.0 (False) - Tensorflow version (GPU?): not installed (NA) - Using GPU in script?: No - Using distributed or parallel set-up in script?: No
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2401/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2401/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2400
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2400/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2400/comments
https://api.github.com/repos/huggingface/datasets/issues/2400/events
https://github.com/huggingface/datasets/issues/2400
899,867,212
MDU6SXNzdWU4OTk4NjcyMTI=
2,400
Concatenate several datasets with removed columns is not working.
{ "login": "philschmid", "id": 32632186, "node_id": "MDQ6VXNlcjMyNjMyMTg2", "avatar_url": "https://avatars.githubusercontent.com/u/32632186?v=4", "gravatar_id": "", "url": "https://api.github.com/users/philschmid", "html_url": "https://github.com/philschmid", "followers_url": "https://api.github.com/users/philschmid/followers", "following_url": "https://api.github.com/users/philschmid/following{/other_user}", "gists_url": "https://api.github.com/users/philschmid/gists{/gist_id}", "starred_url": "https://api.github.com/users/philschmid/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/philschmid/subscriptions", "organizations_url": "https://api.github.com/users/philschmid/orgs", "repos_url": "https://api.github.com/users/philschmid/repos", "events_url": "https://api.github.com/users/philschmid/events{/privacy}", "received_events_url": "https://api.github.com/users/philschmid/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "Hi,\r\n\r\ndid you fill out the env info section manually or by copy-pasting the output of the `datasets-cli env` command?\r\n\r\nThis code should work without issues on 1.6.2 version (I'm working on master (1.6.2.dev0 version) and can't reproduce this error).", "@mariosasko you are right I was still on `1.5.0`. " ]
1,621,878,015,000
1,621,921,921,000
1,621,921,919,000
MEMBER
null
## Describe the bug You can't concatenate datasets when you removed columns before. ## Steps to reproduce the bug ```python from datasets import load_dataset, concatenate_datasets wikiann= load_dataset("wikiann","en") wikiann["train"] = wikiann["train"].remove_columns(["langs","spans"]) wikiann["test"] = wikiann["test"].remove_columns(["langs","spans"]) assert wikiann["train"].features.type == wikiann["test"].features.type concate = concatenate_datasets([wikiann["train"],wikiann["test"]]) ``` ## Expected results Merged dataset ## Actual results ```python ValueError: External features info don't match the dataset: Got {'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'ner_tags': Sequence(feature=ClassLabel(num_classes=7, names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'], names_file=None, id=None), length=-1, id=None), 'langs': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None), 'spans': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)} with type struct<langs: list<item: string>, ner_tags: list<item: int64>, spans: list<item: string>, tokens: list<item: string>> but expected something like {'ner_tags': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None), 'tokens': Sequence(feature=Value(dtype='string', id=None), length=-1, id=None)} with type struct<ner_tags: list<item: int64>, tokens: list<item: string>> ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: ~1.6.2~ 1.5.0 - Platform: macos - Python version: 3.8.5 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2400/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2400/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2398
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2398/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2398/comments
https://api.github.com/repos/huggingface/datasets/issues/2398/events
https://github.com/huggingface/datasets/issues/2398
899,511,837
MDU6SXNzdWU4OTk1MTE4Mzc=
2,398
News_commentary Dataset Translation Pairs are of Incorrect Language Specified Pairs
{ "login": "anassalamah", "id": 8571003, "node_id": "MDQ6VXNlcjg1NzEwMDM=", "avatar_url": "https://avatars.githubusercontent.com/u/8571003?v=4", "gravatar_id": "", "url": "https://api.github.com/users/anassalamah", "html_url": "https://github.com/anassalamah", "followers_url": "https://api.github.com/users/anassalamah/followers", "following_url": "https://api.github.com/users/anassalamah/following{/other_user}", "gists_url": "https://api.github.com/users/anassalamah/gists{/gist_id}", "starred_url": "https://api.github.com/users/anassalamah/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/anassalamah/subscriptions", "organizations_url": "https://api.github.com/users/anassalamah/orgs", "repos_url": "https://api.github.com/users/anassalamah/repos", "events_url": "https://api.github.com/users/anassalamah/events{/privacy}", "received_events_url": "https://api.github.com/users/anassalamah/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[]
1,621,850,614,000
1,621,850,614,000
null
NONE
null
I used load_dataset to load the news_commentary dataset for "ar-en" translation pairs but found translations from Arabic to Hindi. ``` train_ds = load_dataset("news_commentary", "ar-en", split='train[:98%]') val_ds = load_dataset("news_commentary", "ar-en", split='train[98%:]') # filtering out examples that are not ar-en translations but ar-hi val_ds = val_ds.filter(lambda example, indice: indice not in chain(range(1312,1327) ,range(1384,1399), range(1030,1042)), with_indices=True) ``` * I'm fairly new to using datasets so I might be doing something wrong
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2398/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2398/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2396
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2396/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2396/comments
https://api.github.com/repos/huggingface/datasets/issues/2396/events
https://github.com/huggingface/datasets/issues/2396
899,016,308
MDU6SXNzdWU4OTkwMTYzMDg=
2,396
strange datasets from OSCAR corpus
{ "login": "jerryIsHere", "id": 50871412, "node_id": "MDQ6VXNlcjUwODcxNDEy", "avatar_url": "https://avatars.githubusercontent.com/u/50871412?v=4", "gravatar_id": "", "url": "https://api.github.com/users/jerryIsHere", "html_url": "https://github.com/jerryIsHere", "followers_url": "https://api.github.com/users/jerryIsHere/followers", "following_url": "https://api.github.com/users/jerryIsHere/following{/other_user}", "gists_url": "https://api.github.com/users/jerryIsHere/gists{/gist_id}", "starred_url": "https://api.github.com/users/jerryIsHere/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jerryIsHere/subscriptions", "organizations_url": "https://api.github.com/users/jerryIsHere/orgs", "repos_url": "https://api.github.com/users/jerryIsHere/repos", "events_url": "https://api.github.com/users/jerryIsHere/events{/privacy}", "received_events_url": "https://api.github.com/users/jerryIsHere/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
open
false
null
[]
null
[ "Hi ! Thanks for reporting\r\ncc @pjox is this an issue from the data ?\r\n\r\nAnyway we should at least mention that OSCAR could contain such contents in the dataset card, you're totally right @jerryIsHere ", "Hi @jerryIsHere , sorry for the late response! Sadly this is normal, the problem comes form fasttext's classifier which we used to create the original corpus. In general the classifier is not really capable of properly recognizing Yue Chineese so the file ends un being just noise from Common Crawl. Some of these problems with OSCAR were already discussed [here](https://arxiv.org/pdf/2103.12028.pdf) but we are working on explicitly documenting the problems by language on our website. In fact, could please you open an issue on [our repo](https://github.com/oscar-corpus/oscar-website/issues) as well so that we can track it?" ]
1,621,775,162,000
1,623,938,077,000
null
CONTRIBUTOR
null
![image](https://user-images.githubusercontent.com/50871412/119260850-4f876b80-bc07-11eb-8894-124302600643.png) ![image](https://user-images.githubusercontent.com/50871412/119260875-675eef80-bc07-11eb-9da4-ee27567054ac.png) From the [official site ](https://oscar-corpus.com/), the Yue Chinese dataset should have 2.2KB data. 7 training instances is obviously not a right number. As I can read Yue Chinese, I call tell the last instance is definitely not something that would appear on Common Crawl. And even if you don't read Yue Chinese, you can tell the first six instance are problematic. (It is embarrassing, as the 7 training instances look exactly like something from a pornographic novel or flitting messages in a chat of a dating app) It might not be the problem of the huggingface/datasets implementation, because when I tried to download the dataset from the official site, I found out that the zip file is corrupted. I will try to inform the host of OSCAR corpus later. Awy a remake about this dataset in huggingface/datasets is needed, perhaps after the host of the dataset fixes the issue. > Hi @jerryIsHere , sorry for the late response! Sadly this is normal, the problem comes form fasttext's classifier which we used to create the original corpus. In general the classifier is not really capable of properly recognizing Yue Chineese so the file ends un being just noise from Common Crawl. Some of these problems with OSCAR were already discussed [here](https://arxiv.org/pdf/2103.12028.pdf) but we are working on explicitly documenting the problems by language on our website. In fact, could please you open an issue on [our repo](https://github.com/oscar-corpus/oscar-website/issues) as well so that we can track it? Thanks a lot, the new post is here: https://github.com/oscar-corpus/oscar-website/issues/11
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2396/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2396/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2391
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2391/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2391/comments
https://api.github.com/repos/huggingface/datasets/issues/2391/events
https://github.com/huggingface/datasets/issues/2391
898,128,099
MDU6SXNzdWU4OTgxMjgwOTk=
2,391
Missing original answers in kilt-TriviaQA
{ "login": "PaulLerner", "id": 25532159, "node_id": "MDQ6VXNlcjI1NTMyMTU5", "avatar_url": "https://avatars.githubusercontent.com/u/25532159?v=4", "gravatar_id": "", "url": "https://api.github.com/users/PaulLerner", "html_url": "https://github.com/PaulLerner", "followers_url": "https://api.github.com/users/PaulLerner/followers", "following_url": "https://api.github.com/users/PaulLerner/following{/other_user}", "gists_url": "https://api.github.com/users/PaulLerner/gists{/gist_id}", "starred_url": "https://api.github.com/users/PaulLerner/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/PaulLerner/subscriptions", "organizations_url": "https://api.github.com/users/PaulLerner/orgs", "repos_url": "https://api.github.com/users/PaulLerner/repos", "events_url": "https://api.github.com/users/PaulLerner/events{/privacy}", "received_events_url": "https://api.github.com/users/PaulLerner/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
null
[]
null
[ "That could be useful indeed! Feel free to open a PR on the dataset card if you already have some code that runs, otherwise we'll take care of it soon :) ", "I can open a PR but there is 2 details to fix:\r\n- the name for the corresponding key (e.g. `original_answer`)\r\n- how to implement it: I’m not sure what happens when you map `lambda x: {'input': ...}` as it keeps the other keys (e.g. `output`) intact but here since we want to set a nested value (e.g. `x['output']['original_answer']`) I implemented it with a regular function (not lambda), see below\r\n\r\n```py\r\ndef add_original_answer(x, trivia_qa, triviaqa_map):\r\n i = triviaqa_map[x['id']]\r\n x['output']['original_answer'] = trivia_qa['validation'][i]['answer']['value']\r\n return x\r\n```" ]
1,621,609,027,000
1,623,691,751,000
1,623,691,751,000
CONTRIBUTOR
null
I previously opened an issue at https://github.com/facebookresearch/KILT/issues/42 but from the answer of @fabiopetroni it seems that the problem comes from HF-datasets ## Describe the bug The `answer` field in kilt-TriviaQA, e.g. `kilt_tasks['train_triviaqa'][0]['output']['answer']` contains a list of alternative answer which are accepted for the question. However it'd be nice to know the original answer to the question (the only fields in `output` are `'answer', 'meta', 'provenance'`) ## How to fix It can be fixed by retrieving the original answer from the original TriviaQA (e.g. `trivia_qa['train'][0]['answer']['value']`), perhaps at the same place as here where one retrieves the questions https://github.com/huggingface/datasets/blob/master/datasets/kilt_tasks/README.md#loading-the-kilt-knowledge-source-and-task-data cc @yjernite who previously answered to an issue about KILT and TriviaQA :)
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2391/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2391/timeline
null
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/2388
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2388/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2388/comments
https://api.github.com/repos/huggingface/datasets/issues/2388/events
https://github.com/huggingface/datasets/issues/2388
897,767,470
MDU6SXNzdWU4OTc3Njc0NzA=
2,388
Incorrect URLs for some datasets
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "id": 1935892857, "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug", "name": "bug", "color": "d73a4a", "default": true, "description": "Something isn't working" } ]
closed
false
{ "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false }
[ { "login": "lewtun", "id": 26859204, "node_id": "MDQ6VXNlcjI2ODU5MjA0", "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lewtun", "html_url": "https://github.com/lewtun", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "organizations_url": "https://api.github.com/users/lewtun/orgs", "repos_url": "https://api.github.com/users/lewtun/repos", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "received_events_url": "https://api.github.com/users/lewtun/received_events", "type": "User", "site_admin": false } ]
null
[]
1,621,581,755,000
1,622,828,385,000
1,622,828,385,000
MEMBER
null
## Describe the bug It seems that the URLs for the following datasets are invalid: - [ ] `bn_hate_speech` has been renamed: https://github.com/rezacsedu/Bengali-Hate-Speech-Dataset/commit/c67ecfc4184911e12814f6b36901f9828df8a63a - [ ] `covid_tweets_japanese` has been renamed: http://www.db.info.gifu-u.ac.jp/covid-19-twitter-dataset/ As a result we can no longer load these datasets using `load_dataset`. The simple fix is to rename the URL in the dataset script - will do this asap. ## Steps to reproduce the bug ```python from datasets import load_dataset # pick one of the datasets from the list above ds = load_dataset("bn_hate_speech") ``` ## Expected results Dataset loads without error. ## Actual results ``` Downloading: 3.36kB [00:00, 1.07MB/s] Downloading: 2.03kB [00:00, 678kB/s] Using custom data configuration default Downloading and preparing dataset bn_hate_speech/default (download: 951.48 KiB, generated: 949.84 KiB, post-processed: Unknown size, total: 1.86 MiB) to /Users/lewtun/.cache/huggingface/datasets/bn_hate_speech/default/0.0.0/a2dc726e511a2177523301bcad196af05d4d8a2cff30d2769ba8aacc1f5fdb5c... Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/load.py", line 744, in load_dataset builder_instance.download_and_prepare( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/builder.py", line 574, in download_and_prepare self._download_and_prepare( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/builder.py", line 630, in _download_and_prepare split_generators = self._split_generators(dl_manager, **split_generators_kwargs) File "/Users/lewtun/.cache/huggingface/modules/datasets_modules/datasets/bn_hate_speech/a2dc726e511a2177523301bcad196af05d4d8a2cff30d2769ba8aacc1f5fdb5c/bn_hate_speech.py", line 76, in _split_generators train_path = dl_manager.download_and_extract(_URL) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 287, in download_and_extract return self.extract(self.download(url_or_urls)) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 195, in download downloaded_path_or_paths = map_nested( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/py_utils.py", line 195, in map_nested return function(data_struct) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/download_manager.py", line 218, in _download return cached_path(url_or_filename, download_config=download_config) File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 281, in cached_path output_path = get_from_cache( File "/Users/lewtun/miniconda3/envs/hf-hub_eval/lib/python3.8/site-packages/datasets/utils/file_utils.py", line 621, in get_from_cache raise FileNotFoundError("Couldn't find file at {}".format(url)) FileNotFoundError: Couldn't find file at https://raw.githubusercontent.com/rezacsedu/Bengali-Hate-Speech-Dataset/main/Bengali_%20Hate_Speech_Dataset_Subset.csv ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.6.2.dev0 - Platform: macOS-10.16-x86_64-i386-64bit - Python version: 3.8.8 - PyArrow version: 3.0.0
{ "url": "https://api.github.com/repos/huggingface/datasets/issues/2388/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/huggingface/datasets/issues/2388/timeline
null
null
null
false