url
stringlengths
58
61
repository_url
stringclasses
1 value
labels_url
stringlengths
72
75
comments_url
stringlengths
67
70
events_url
stringlengths
65
68
html_url
stringlengths
46
51
id
int64
600M
2.05B
node_id
stringlengths
18
32
number
int64
2
6.51k
title
stringlengths
1
290
user
dict
labels
listlengths
0
4
state
stringclasses
2 values
locked
bool
1 class
assignee
dict
assignees
listlengths
0
4
milestone
dict
comments
sequencelengths
0
30
created_at
unknown
updated_at
unknown
closed_at
unknown
author_association
stringclasses
3 values
active_lock_reason
float64
draft
float64
0
1
pull_request
dict
body
stringlengths
0
228k
reactions
dict
timeline_url
stringlengths
67
70
performed_via_github_app
float64
state_reason
stringclasses
3 values
is_pull_request
bool
2 classes
https://api.github.com/repos/huggingface/datasets/issues/4143
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4143/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4143/comments
https://api.github.com/repos/huggingface/datasets/issues/4143/events
https://github.com/huggingface/datasets/issues/4143
1,199,937,961
I_kwDODunzps5HhZmp
4,143
Unable to download `Wikepedia` 20220301.en version
{ "avatar_url": "https://avatars.githubusercontent.com/u/37113676?v=4", "events_url": "https://api.github.com/users/beyondguo/events{/privacy}", "followers_url": "https://api.github.com/users/beyondguo/followers", "following_url": "https://api.github.com/users/beyondguo/following{/other_user}", "gists_url": "https://api.github.com/users/beyondguo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/beyondguo", "id": 37113676, "login": "beyondguo", "node_id": "MDQ6VXNlcjM3MTEzNjc2", "organizations_url": "https://api.github.com/users/beyondguo/orgs", "received_events_url": "https://api.github.com/users/beyondguo/received_events", "repos_url": "https://api.github.com/users/beyondguo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/beyondguo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beyondguo/subscriptions", "type": "User", "url": "https://api.github.com/users/beyondguo" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Hi! We've recently updated the Wikipedia script, so these changes are only available on master and can be fetched as follows:\r\n```python\r\ndataset_wikipedia = load_dataset(\"wikipedia\", \"20220301.en\", revision=\"master\")\r\n```", "Hi, how can I load the previous \"20200501.en\" version of wikipedia which had been downloaded to the default path? Thanks!", "@JiaQiSJTU just reinstall the previous verision of the package, e.g. `!pip install -q datasets==1.0.0`" ]
"2022-04-11T13:00:14Z"
"2022-08-17T00:37:55Z"
"2022-04-21T17:04:14Z"
NONE
null
null
null
## Describe the bug Unable to download `Wikepedia` dataset, 20220301.en version ## Steps to reproduce the bug ```python !pip install apache_beam mwparserfromhell dataset_wikipedia = load_dataset("wikipedia", "20220301.en") ``` ## Actual results ``` ValueError: BuilderConfig 20220301.en not found. Available: ['20200501.aa', '20200501.ab', '20200501.ace', '20200501.ady', '20200501.af', '20200501.ak', '20200501.als', '20200501.am', '20200501.an', '20200501.ang', '20200501.ar', '20200501.arc', '20200501.arz', '20200501.as', '20200501.ast', '20200501.atj', '20200501.av', '20200501.ay', '20200501.az', '20200501.azb', '20200501.ba', '20200501.bar', '20200501.bat-smg', '20200501.bcl', '20200501.be', '20200501.be-x-old', '20200501.bg', '20200501.bh', '20200501.bi', '20200501.bjn', '20200501.bm', '20200501.bn', '20200501.bo', '20200501.bpy', '20200501.br', '20200501.bs', '20200501.bug', '20200501.bxr', '20200501.ca', '20200501.cbk-zam', '20200501.cdo', '20200501.ce', '20200501.ceb', '20200501.ch', '20200501.cho', '20200501.chr', '20200501.chy', '20200501.ckb', '20200501.co', '20200501.cr', '20200501.crh', '20200501.cs', '20200501.csb', '20200501.cu', '20200501.cv', '20200501.cy', '20200501.da', '20200501.de', '20200501.din', '20200501.diq', '20200501.dsb', '20200501.dty', '20200501.dv', '20200501.dz', '20200501.ee', '20200501.el', '20200501.eml', '20200501.en', '20200501.eo', '20200501.es', '20200501.et', '20200501.eu', '20200501.ext', '20200501.fa', '20200501.ff', '20200501.fi', '20200501.fiu-vro', '20200501.fj', '20200501.fo', '20200501.fr', '20200501.frp', '20200501.frr', '20200501.fur', '20200501.fy', '20200501.ga', '20200501.gag', '20200501.gan', '20200501.gd', '20200501.gl', '20200501.glk', '20200501.gn', '20200501.gom', '20200501.gor', '20200501.got', '20200501.gu', '20200501.gv', '20200501.ha', '20200501.hak', '20200501.haw', '20200501.he', '20200501.hi', '20200501.hif', '20200501.ho', '20200501.hr', '20200501.hsb', '20200501.ht', '20200501.hu', '20200501.hy', '20200501.ia', '20200501.id', '20200501.ie', '20200501.ig', '20200501.ii', '20200501.ik', '20200501.ilo', '20200501.inh', '20200501.io', '20200501.is', '20200501.it', '20200501.iu', '20200501.ja', '20200501.jam', '20200501.jbo', '20200501.jv', '20200501.ka', '20200501.kaa', '20200501.kab', '20200501.kbd', '20200501.kbp', '20200501.kg', '20200501.ki', '20200501.kj', '20200501.kk', '20200501.kl', '20200501.km', '20200501.kn', '20200501.ko', '20200501.koi', '20200501.krc', '20200501.ks', '20200501.ksh', '20200501.ku', '20200501.kv', '20200501.kw', '20200501.ky', '20200501.la', '20200501.lad', '20200501.lb', '20200501.lbe', '20200501.lez', '20200501.lfn', '20200501.lg', '20200501.li', '20200501.lij', '20200501.lmo', '20200501.ln', '20200501.lo', '20200501.lrc', '20200501.lt', '20200501.ltg', '20200501.lv', '20200501.mai', '20200501.map-bms', '20200501.mdf', '20200501.mg', '20200501.mh', '20200501.mhr', '20200501.mi', '20200501.min', '20200501.mk', '20200501.ml', '20200501.mn', '20200501.mr', '20200501.mrj', '20200501.ms', '20200501.mt', '20200501.mus', '20200501.mwl', '20200501.my', '20200501.myv', '20200501.mzn', '20200501.na', '20200501.nah', '20200501.nap', '20200501.nds', '20200501.nds-nl', '20200501.ne', '20200501.new', '20200501.ng', '20200501.nl', '20200501.nn', '20200501.no', '20200501.nov', '20200501.nrm', '20200501.nso', '20200501.nv', '20200501.ny', '20200501.oc', '20200501.olo', '20200501.om', '20200501.or', '20200501.os', '20200501.pa', '20200501.pag', '20200501.pam', '20200501.pap', '20200501.pcd', '20200501.pdc', '20200501.pfl', '20200501.pi', '20200501.pih', '20200501.pl', '20200501.pms', '20200501.pnb', '20200501.pnt', '20200501.ps', '20200501.pt', '20200501.qu', '20200501.rm', '20200501.rmy', '20200501.rn', '20200501.ro', '20200501.roa-rup', '20200501.roa-tara', '20200501.ru', '20200501.rue', '20200501.rw', '20200501.sa', '20200501.sah', '20200501.sat', '20200501.sc', '20200501.scn', '20200501.sco', '20200501.sd', '20200501.se', '20200501.sg', '20200501.sh', '20200501.si', '20200501.simple', '20200501.sk', '20200501.sl', '20200501.sm', '20200501.sn', '20200501.so', '20200501.sq', '20200501.sr', '20200501.srn', '20200501.ss', '20200501.st', '20200501.stq', '20200501.su', '20200501.sv', '20200501.sw', '20200501.szl', '20200501.ta', '20200501.tcy', '20200501.te', '20200501.tet', '20200501.tg', '20200501.th', '20200501.ti', '20200501.tk', '20200501.tl', '20200501.tn', '20200501.to', '20200501.tpi', '20200501.tr', '20200501.ts', '20200501.tt', '20200501.tum', '20200501.tw', '20200501.ty', '20200501.tyv', '20200501.udm', '20200501.ug', '20200501.uk', '20200501.ur', '20200501.uz', '20200501.ve', '20200501.vec', '20200501.vep', '20200501.vi', '20200501.vls', '20200501.vo', '20200501.wa', '20200501.war', '20200501.wo', '20200501.wuu', '20200501.xal', '20200501.xh', '20200501.xmf', '20200501.yi', '20200501.yo', '20200501.za', '20200501.zea', '20200501.zh', '20200501.zh-classical', '20200501.zh-min-nan', '20200501.zh-yue', '20200501.zu'] ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.0.0 - Platform: Ubuntu - Python version: 3.6 - PyArrow version: 6.0.1
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4143/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4143/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/1586
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1586/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1586/comments
https://api.github.com/repos/huggingface/datasets/issues/1586/events
https://github.com/huggingface/datasets/pull/1586
768,864,502
MDExOlB1bGxSZXF1ZXN0NTQxMTY0MDc2
1,586
added irc disentangle dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/32560035?v=4", "events_url": "https://api.github.com/users/dhruvjoshi1998/events{/privacy}", "followers_url": "https://api.github.com/users/dhruvjoshi1998/followers", "following_url": "https://api.github.com/users/dhruvjoshi1998/following{/other_user}", "gists_url": "https://api.github.com/users/dhruvjoshi1998/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dhruvjoshi1998", "id": 32560035, "login": "dhruvjoshi1998", "node_id": "MDQ6VXNlcjMyNTYwMDM1", "organizations_url": "https://api.github.com/users/dhruvjoshi1998/orgs", "received_events_url": "https://api.github.com/users/dhruvjoshi1998/received_events", "repos_url": "https://api.github.com/users/dhruvjoshi1998/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dhruvjoshi1998/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dhruvjoshi1998/subscriptions", "type": "User", "url": "https://api.github.com/users/dhruvjoshi1998" }
[]
closed
false
null
[]
null
[ "@lhoestq sorry, this was the only way I was able to fix the pull request ", "@lhoestq Thank you for the feedback. I wondering whether I should be passing an 'id' field in the dictionary since the 'connections' reference the 'id' of the linked messages. This 'id' would just be the same as the id_ that is in the yielded tuple.", "Yes indeed it would be cool to have the ids in the dictionary. This way the dataset can be shuffled and all without losing information about the connections. Can you add it if you don't mind ?", "Thanks :) could you also add the ids in the dictionary since they're useful for the connection links ?", "Thanks !\r\nAlso it looks like the dummy_data.zip were regenerated and are now back to being too big (300KB each).\r\nCan you reduce their sizes ? You can actually just revert to the ones you had before the last commit" ]
"2020-12-16T13:25:58Z"
"2021-01-29T10:28:53Z"
"2021-01-29T10:28:53Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1586.diff", "html_url": "https://github.com/huggingface/datasets/pull/1586", "merged_at": "2021-01-29T10:28:53Z", "patch_url": "https://github.com/huggingface/datasets/pull/1586.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1586" }
added irc disentanglement dataset
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1586/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1586/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6238
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6238/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6238/comments
https://api.github.com/repos/huggingface/datasets/issues/6238/events
https://github.com/huggingface/datasets/issues/6238
1,895,207,828
I_kwDODunzps5w9pOU
6,238
`dataset.filter` ALWAYS removes the first item from the dataset when using batched=True
{ "avatar_url": "https://avatars.githubusercontent.com/u/1330693?v=4", "events_url": "https://api.github.com/users/Taytay/events{/privacy}", "followers_url": "https://api.github.com/users/Taytay/followers", "following_url": "https://api.github.com/users/Taytay/following{/other_user}", "gists_url": "https://api.github.com/users/Taytay/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Taytay", "id": 1330693, "login": "Taytay", "node_id": "MDQ6VXNlcjEzMzA2OTM=", "organizations_url": "https://api.github.com/users/Taytay/orgs", "received_events_url": "https://api.github.com/users/Taytay/received_events", "repos_url": "https://api.github.com/users/Taytay/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Taytay/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Taytay/subscriptions", "type": "User", "url": "https://api.github.com/users/Taytay" }
[]
closed
false
null
[]
null
[ "`filter` treats the function's output as a (selection) mask - `True` keeps the sample, and `False` drops it. In your case, `bool(0)` evaluates to `False`, so dropping the first sample is the correct behavior.", "Oh gosh! 🤦 I totally misunderstood the API! My apologies!" ]
"2023-09-13T20:20:37Z"
"2023-09-17T07:05:07Z"
"2023-09-17T07:05:07Z"
NONE
null
null
null
### Describe the bug If you call batched=True when calling `filter`, the first item is _always_ filtered out, regardless of the filter condition. ### Steps to reproduce the bug Here's a minimal example: ```python def filter_batch_always_true(batch, indices): print("First index being passed into this filter function: ", indices[0]) return indices # Keep all indices data = {"value": list(range(10))} dataset = Dataset.from_dict(data) filtered_dataset = dataset.filter(filter_batch_always_true, with_indices=True, batched=True) print("Length of original dataset: ", len(dataset)) print("Length of filtered_dataset: ", len(filtered_dataset)) print("Is equal to original? ", len(filtered_dataset) == len(dataset)) print("First item of filtered dataset: ", filtered_dataset[0]) print("Last item of filtered dataset: ", filtered_dataset[-1]) ``` prints: ``` First index being passed into this filter function: 0 Length of original dataset: 10 Length of filtered_dataset: 9 Is equal to original? False First item of filtered dataset: {'value': 1} Last item of filtered dataset: {'value': 9} ``` ### Expected behavior Filter should respect the filter condition. ### Environment info - `datasets` version: 2.14.4 - Platform: macOS-13.5-arm64-arm-64bit - Python version: 3.9.18 - Huggingface_hub version: 0.17.1 - PyArrow version: 10.0.1 - Pandas version: 2.0.2
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6238/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6238/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3438
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3438/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3438/comments
https://api.github.com/repos/huggingface/datasets/issues/3438/events
https://github.com/huggingface/datasets/pull/3438
1,081,302,203
PR_kwDODunzps4v52Va
3,438
Update supported versions of Python in setup.py
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[]
"2021-12-15T17:30:12Z"
"2021-12-20T14:22:13Z"
"2021-12-20T14:22:12Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3438.diff", "html_url": "https://github.com/huggingface/datasets/pull/3438", "merged_at": "2021-12-20T14:22:12Z", "patch_url": "https://github.com/huggingface/datasets/pull/3438.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3438" }
Update the list of supported versions of Python in `setup.py` to keep the PyPI project description updated.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3438/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3438/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1767
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1767/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1767/comments
https://api.github.com/repos/huggingface/datasets/issues/1767/events
https://github.com/huggingface/datasets/pull/1767
792,068,497
MDExOlB1bGxSZXF1ZXN0NTYwMDE2MzE2
1,767
Add Librispeech ASR
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[ "> Awesome thank you !\r\n> \r\n> The dummy data are quite big but it was expected given that the raw files are flac files.\r\n> Given that the script doesn't even read the flac files I think we can remove them. Or maybe use empty flac files (see [here](https://hydrogenaud.io/index.php?topic=118685.0) for example). What do you think ?\r\n> \r\n> We'll find a better solution to be able to have bigger dummy_data (max 1MB instead of a few KB, maybe using git LFS.\r\n\r\nHmm, I already made the dummy data as small as possible (a single flac filie per split only). I'd like to keep them at least to have complete dummy data and don't think 500KB for all datasets together is a problem (the long-range summarization datasets are similarly heavy). The moment we allow dummy data to be loaded directly for testing, we need the flac files IMO.\r\n\r\nBut I agree that longterm, we need a better solution for the dummy data (maybe stop hosting it on github to not make the repo too heavy)" ]
"2021-01-22T14:54:37Z"
"2021-01-25T20:38:07Z"
"2021-01-25T20:37:42Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1767.diff", "html_url": "https://github.com/huggingface/datasets/pull/1767", "merged_at": "2021-01-25T20:37:42Z", "patch_url": "https://github.com/huggingface/datasets/pull/1767.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1767" }
This PR adds the librispeech asr dataset: https://www.tensorflow.org/datasets/catalog/librispeech There are 2 configs: "clean" and "other" whereas there are two "train" datasets for "clean", hence the name "train.100" and "train.360". As suggested by @lhoestq, due to the enormous size of the dataset in `.arrow` format, the speech files are not directly prepared to a float32-array, but instead just the path to the array file is stored.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1767/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1767/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3775
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3775/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3775/comments
https://api.github.com/repos/huggingface/datasets/issues/3775/events
https://github.com/huggingface/datasets/pull/3775
1,146,849,454
PR_kwDODunzps4zSEd4
3,775
Update gigaword card and info
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "I think it actually comes from an issue here:\r\n\r\nhttps://github.com/huggingface/datasets/blob/810b12f763f5cf02f2e43565b8890d278b7398cd/src/datasets/utils/file_utils.py#L575-L579\r\n\r\nand \r\n\r\nhttps://github.com/huggingface/datasets/blob/810b12f763f5cf02f2e43565b8890d278b7398cd/src/datasets/utils/streaming_download_manager.py#L386-L389\r\n\r\nThis code doesn't seem to work anymore. This can probably be fixed with\r\n\r\n```python\r\nif url.startswith(\"https://drive.google.com/\"): \r\n url += \"&confirm=t\"\r\n cookies = response.cookies \r\n```\r\n\r\nbecause Google Drive doesn't return the `download_warning` cookie anymore.", "Actually it seems that is has been fixed already in https://github.com/huggingface/datasets/pull/3787 :)\r\n\r\nI think it should have fixed the gigaword dataset loading", "@lhoestq The linked PR indeed fixes the issue. This PR is still worth merging IMO to update `gigaword`'s card." ]
"2022-02-22T12:27:16Z"
"2022-02-28T11:35:24Z"
"2022-02-28T11:35:24Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3775.diff", "html_url": "https://github.com/huggingface/datasets/pull/3775", "merged_at": "2022-02-28T11:35:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/3775.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3775" }
Reported on the forum: https://discuss.huggingface.co/t/error-loading-dataset/14999
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3775/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3775/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2739
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2739/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2739/comments
https://api.github.com/repos/huggingface/datasets/issues/2739/events
https://github.com/huggingface/datasets/pull/2739
957,751,260
MDExOlB1bGxSZXF1ZXN0NzAxMTI0ODQ3
2,739
Pass tokenize to sacrebleu only if explicitly passed by user
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[]
"2021-08-02T05:09:05Z"
"2021-08-03T04:23:37Z"
"2021-08-03T04:23:37Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2739.diff", "html_url": "https://github.com/huggingface/datasets/pull/2739", "merged_at": "2021-08-03T04:23:37Z", "patch_url": "https://github.com/huggingface/datasets/pull/2739.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2739" }
Next `sacrebleu` release (v2.0.0) will remove `sacrebleu.DEFAULT_TOKENIZER`: https://github.com/mjpost/sacrebleu/pull/152/files#diff-2553a315bb1f7e68c9c1b00d56eaeb74f5205aeb3a189bc3e527b122c6078795L17-R15 This PR passes `tokenize` to `sacrebleu` only if explicitly passed by the user, otherwise it will not pass it (and `sacrebleu` will use its default, no matter where it is and how it is called). Close: #2737.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2739/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2739/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5573
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5573/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5573/comments
https://api.github.com/repos/huggingface/datasets/issues/5573/events
https://github.com/huggingface/datasets/pull/5573
1,597,400,836
PR_kwDODunzps5Kop7n
5,573
Use soundfile for mp3 decoding instead of torchaudio
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "@mariosasko thank you for the review! do you have any idea why `test_hash_torch_tensor` fails on \"ubuntu-latest deps-minimum\"? I removed the `torchaudio<0.12.0` test dependency so it uses the latest `torch` now, might it be connected?", "@polinaeterna The failure is due to `torch.from_numpy` not being picklable in newer versions of PyTorch. You can replace the current definition of `_save_tensor` in `utils/py_utils.py` with the following one to fix it: \r\n\r\n```python\r\n@pklregister(obj_type)\r\ndef _save_tensor(pickler, obj):\r\n # `torch.from_numpy` is not picklable in `torch>=1.11.0`\r\n def _create_tensor(np_array):\r\n return torch.from_numpy(np_array)\r\n\r\n dill_log(pickler, f\"To: {obj}\")\r\n args = (obj.detach().cpu().numpy(),)\r\n pickler.save_reduce(_create_tensor, args, obj=obj)\r\n dill_log(pickler, \"# To\")\r\n return\r\n```", "(doing a patch release now - please wait before merging ^^)", "@mariosasko génial, merci!! i've integrated all your changes, can you pls take a look one more time?", "Patch release is done (I did it from another branch than `main` anyway)", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==6.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.010927 / 0.011353 (-0.000426) | 0.006232 / 0.011008 (-0.004776) | 0.119815 / 0.038508 (0.081307) | 0.034138 / 0.023109 (0.011029) | 0.349945 / 0.275898 (0.074047) | 0.404967 / 0.323480 (0.081487) | 0.008672 / 0.007986 (0.000687) | 0.005010 / 0.004328 (0.000681) | 0.091931 / 0.004250 (0.087680) | 0.042534 / 0.037052 (0.005482) | 0.374701 / 0.258489 (0.116212) | 0.401027 / 0.293841 (0.107186) | 0.053523 / 0.128546 (-0.075024) | 0.019704 / 0.075646 (-0.055942) | 0.384207 / 0.419271 (-0.035064) | 0.065350 / 0.043533 (0.021817) | 0.375074 / 0.255139 (0.119935) | 0.390458 / 0.283200 (0.107259) | 0.110549 / 0.141683 (-0.031134) | 1.719812 / 1.452155 (0.267657) | 1.748906 / 1.492716 (0.256190) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.210051 / 0.018006 (0.192045) | 0.546503 / 0.000490 (0.546013) | 0.004078 / 0.000200 (0.003878) | 0.000111 / 0.000054 (0.000056) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.030212 / 0.037411 (-0.007199) | 0.121845 / 0.014526 (0.107319) | 0.136309 / 0.176557 (-0.040247) | 0.204667 / 0.737135 (-0.532468) | 0.157327 / 0.296338 (-0.139012) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.672548 / 0.215209 (0.457339) | 6.239409 / 2.077655 (4.161754) | 2.462441 / 1.504120 (0.958322) | 2.063985 / 1.541195 (0.522791) | 2.098858 / 1.468490 (0.630368) | 1.262600 / 4.584777 (-3.322177) | 5.478462 / 3.745712 (1.732750) | 5.454672 / 5.269862 (0.184810) | 2.991866 / 4.565676 (-1.573810) | 0.153415 / 0.424275 (-0.270861) | 0.015061 / 0.007607 (0.007454) | 0.796115 / 0.226044 (0.570071) | 8.206858 / 2.268929 (5.937930) | 3.226395 / 55.444624 (-52.218229) | 2.503522 / 6.876477 (-4.372955) | 2.547489 / 2.142072 (0.405417) | 1.504776 / 4.805227 (-3.300451) | 0.256536 / 6.500664 (-6.244128) | 0.078543 / 0.075469 (0.003073) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.591109 / 1.841788 (-0.250678) | 18.153317 / 8.074308 (10.079008) | 20.465684 / 10.191392 (10.274292) | 0.229808 / 0.680424 (-0.450616) | 0.045263 / 0.534201 (-0.488938) | 0.556760 / 0.579283 (-0.022524) | 0.614985 / 0.434364 (0.180622) | 0.635675 / 0.540337 (0.095337) | 0.729817 / 1.386936 (-0.657119) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.011247 / 0.011353 (-0.000106) | 0.006823 / 0.011008 (-0.004185) | 0.101989 / 0.038508 (0.063481) | 0.036077 / 0.023109 (0.012968) | 0.413469 / 0.275898 (0.137571) | 0.505560 / 0.323480 (0.182080) | 0.007506 / 0.007986 (-0.000480) | 0.006369 / 0.004328 (0.002040) | 0.099597 / 0.004250 (0.095346) | 0.058115 / 0.037052 (0.021063) | 0.414735 / 0.258489 (0.156246) | 0.466801 / 0.293841 (0.172960) | 0.064771 / 0.128546 (-0.063775) | 0.021100 / 0.075646 (-0.054546) | 0.135407 / 0.419271 (-0.283864) | 0.068784 / 0.043533 (0.025251) | 0.410467 / 0.255139 (0.155328) | 0.465993 / 0.283200 (0.182794) | 0.119404 / 0.141683 (-0.022279) | 1.767107 / 1.452155 (0.314952) | 1.938342 / 1.492716 (0.445626) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.227038 / 0.018006 (0.209032) | 0.511389 / 0.000490 (0.510899) | 0.006723 / 0.000200 (0.006523) | 0.000118 / 0.000054 (0.000064) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.033078 / 0.037411 (-0.004333) | 0.133159 / 0.014526 (0.118633) | 0.147928 / 0.176557 (-0.028629) | 0.214005 / 0.737135 (-0.523130) | 0.151655 / 0.296338 (-0.144683) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.634829 / 0.215209 (0.419620) | 6.578640 / 2.077655 (4.500985) | 2.673598 / 1.504120 (1.169478) | 2.338671 / 1.541195 (0.797476) | 2.389104 / 1.468490 (0.920614) | 1.274938 / 4.584777 (-3.309839) | 5.746524 / 3.745712 (2.000812) | 5.992084 / 5.269862 (0.722222) | 3.092090 / 4.565676 (-1.473587) | 0.150375 / 0.424275 (-0.273900) | 0.015470 / 0.007607 (0.007863) | 0.792962 / 0.226044 (0.566918) | 8.057491 / 2.268929 (5.788563) | 3.483966 / 55.444624 (-51.960659) | 2.715038 / 6.876477 (-4.161438) | 2.747186 / 2.142072 (0.605114) | 1.532951 / 4.805227 (-3.272276) | 0.262214 / 6.500664 (-6.238450) | 0.081308 / 0.075469 (0.005839) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.698448 / 1.841788 (-0.143340) | 18.590002 / 8.074308 (10.515694) | 20.584508 / 10.191392 (10.393116) | 0.227237 / 0.680424 (-0.453187) | 0.028445 / 0.534201 (-0.505756) | 0.527874 / 0.579283 (-0.051409) | 0.602844 / 0.434364 (0.168480) | 0.672948 / 0.540337 (0.132611) | 0.788103 / 1.386936 (-0.598833) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f96547708a889c09ca8a02ed7aadd8c5690503c5 \"CML watermark\")\n" ]
"2023-02-23T19:19:44Z"
"2023-02-28T20:25:14Z"
"2023-02-28T20:16:02Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5573.diff", "html_url": "https://github.com/huggingface/datasets/pull/5573", "merged_at": "2023-02-28T20:16:02Z", "patch_url": "https://github.com/huggingface/datasets/pull/5573.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5573" }
I've removed `torchaudio` completely and switched to use `soundfile` for everything. With the new version of `soundfile` package this should work smoothly because the `libsndfile` C library is bundled, in Linux wheels too. Let me know if you think it's too harsh and we should continue to support `torchaudio` decoding. I decided that we can drop it completely because: 1. it's always something wrong with `torchaudio` (for example recently https://github.com/huggingface/datasets/issues/5488 ) 2. the results of mp3 decoding are different depending on `torchaudio` version 3. `soundfile` is slightly faster then the latest `torchaudio` 4. anyway users can pass any custom decoding function with any library they want if needed (worth putting a snippet in the docs). cc @sanchit-gandhi @vaibhavad
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 3, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/5573/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5573/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/628
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/628/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/628/comments
https://api.github.com/repos/huggingface/datasets/issues/628/events
https://github.com/huggingface/datasets/pull/628
701,496,053
MDExOlB1bGxSZXF1ZXN0NDg2OTQyNzgx
628
Update docs links in the contribution guideline
{ "avatar_url": "https://avatars.githubusercontent.com/u/9285264?v=4", "events_url": "https://api.github.com/users/M-Salti/events{/privacy}", "followers_url": "https://api.github.com/users/M-Salti/followers", "following_url": "https://api.github.com/users/M-Salti/following{/other_user}", "gists_url": "https://api.github.com/users/M-Salti/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/M-Salti", "id": 9285264, "login": "M-Salti", "node_id": "MDQ6VXNlcjkyODUyNjQ=", "organizations_url": "https://api.github.com/users/M-Salti/orgs", "received_events_url": "https://api.github.com/users/M-Salti/received_events", "repos_url": "https://api.github.com/users/M-Salti/repos", "site_admin": false, "starred_url": "https://api.github.com/users/M-Salti/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/M-Salti/subscriptions", "type": "User", "url": "https://api.github.com/users/M-Salti" }
[]
closed
false
null
[]
null
[ "Thanks!" ]
"2020-09-14T23:27:19Z"
"2020-11-02T21:03:23Z"
"2020-09-15T06:19:35Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/628.diff", "html_url": "https://github.com/huggingface/datasets/pull/628", "merged_at": "2020-09-15T06:19:35Z", "patch_url": "https://github.com/huggingface/datasets/pull/628.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/628" }
Fixed the `add a dataset` and `share a dataset` links in the contribution guideline to refer to the new docs website.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/628/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/628/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3992
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3992/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3992/comments
https://api.github.com/repos/huggingface/datasets/issues/3992/events
https://github.com/huggingface/datasets/issues/3992
1,177,946,153
I_kwDODunzps5GNggp
3,992
Image column is not decoded in map when using with with_transform
{ "avatar_url": "https://avatars.githubusercontent.com/u/5902432?v=4", "events_url": "https://api.github.com/users/phihung/events{/privacy}", "followers_url": "https://api.github.com/users/phihung/followers", "following_url": "https://api.github.com/users/phihung/following{/other_user}", "gists_url": "https://api.github.com/users/phihung/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/phihung", "id": 5902432, "login": "phihung", "node_id": "MDQ6VXNlcjU5MDI0MzI=", "organizations_url": "https://api.github.com/users/phihung/orgs", "received_events_url": "https://api.github.com/users/phihung/received_events", "repos_url": "https://api.github.com/users/phihung/repos", "site_admin": false, "starred_url": "https://api.github.com/users/phihung/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/phihung/subscriptions", "type": "User", "url": "https://api.github.com/users/phihung" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" } ]
null
[ "Hi! This behavior stems from this line: https://github.com/huggingface/datasets/blob/799b817d97590ddc97cbd38d07469403e030de8c/src/datasets/arrow_dataset.py#L1919\r\nBasically, the `Image`/`Audio` columns are decoded only if the `format_type` attribute is `None` (`set_format`/`with_format` and `set_transform`/`with_transform` assign a non-`None` value to it) and the `input_columns` param is not specified (see https://github.com/huggingface/datasets/issues/3756). We will remove these limitations soon.\r\n\r\n\r\n\r\n" ]
"2022-03-23T10:51:13Z"
"2022-12-13T16:59:06Z"
"2022-12-13T16:59:06Z"
NONE
null
null
null
## Describe the bug Image column is not _decoded_ in **map** when using with `with_transform` ## Steps to reproduce the bug ```python from datasets import Image, Dataset def add_C(batch): batch["C"] = batch["A"] return batch ds = Dataset.from_dict({"A": ["image.png"]}).cast_column("A", Image()) ds = ds.with_transform(lambda x: x) # <= This line causes the problem ds = ds.map(add_C, batched=True) print(ds[0]) ``` ## Expected results ``` {'C': <PIL.PngImagePlugin.PngImageFile>, ...} ``` ## Actual results ``` {'C': {'bytes': None, 'path': 'image.png'}, ...} ``` If we remove the `with_transform` line, we get the expected result. ## Environment info - `datasets` version: 2.0.0 - Platform: Mac OSX - Python version: 3.8.12 - PyArrow version: 7.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3992/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3992/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5749
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5749/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5749/comments
https://api.github.com/repos/huggingface/datasets/issues/5749/events
https://github.com/huggingface/datasets/issues/5749
1,668,016,321
I_kwDODunzps5ja-jB
5,749
AttributeError: 'Version' object has no attribute 'match'
{ "avatar_url": "https://avatars.githubusercontent.com/u/54584290?v=4", "events_url": "https://api.github.com/users/gulnaz-zh/events{/privacy}", "followers_url": "https://api.github.com/users/gulnaz-zh/followers", "following_url": "https://api.github.com/users/gulnaz-zh/following{/other_user}", "gists_url": "https://api.github.com/users/gulnaz-zh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gulnaz-zh", "id": 54584290, "login": "gulnaz-zh", "node_id": "MDQ6VXNlcjU0NTg0Mjkw", "organizations_url": "https://api.github.com/users/gulnaz-zh/orgs", "received_events_url": "https://api.github.com/users/gulnaz-zh/received_events", "repos_url": "https://api.github.com/users/gulnaz-zh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gulnaz-zh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gulnaz-zh/subscriptions", "type": "User", "url": "https://api.github.com/users/gulnaz-zh" }
[]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "I got the same error, and the official website for visual genome is down. Did you solve this problem? ", "I am in the same situation now :( ", "Thanks for reporting, @gulnaz-zh.\r\n\r\nI am investigating it.", "The host server is down: https://visualgenome.org/\r\n\r\nWe are contacting the dataset authors.", "Apart form data host server being down, there is an additional issue with the `datasets` library introduced by this PR:\r\n- #5238\r\n\r\nI am working to fix it.", "PR that fixes the AttributeError: https://huggingface.co/datasets/visual_genome/discussions/2", "For the issue with their data host server being down, I have opened a discussion in the \"Community\" tab of the Hub dataset: https://huggingface.co/datasets/visual_genome/discussions/3\r\nLet's continue the discussion there.", "The authors just replied to us with their new URL: https://homes.cs.washington.edu/~ranjay/visualgenome/\r\n\r\nWe have fixed the datasets loading script, which is operative again." ]
"2023-04-14T10:48:06Z"
"2023-06-30T11:31:17Z"
"2023-04-18T12:57:08Z"
NONE
null
null
null
### Describe the bug When I run from datasets import load_dataset data = load_dataset("visual_genome", 'region_descriptions_v1.2.0') AttributeError: 'Version' object has no attribute 'match' ### Steps to reproduce the bug from datasets import load_dataset data = load_dataset("visual_genome", 'region_descriptions_v1.2.0') ### Expected behavior This is error trace: Downloading and preparing dataset visual_genome/region_descriptions_v1.2.0 to C:/Users/Acer/.cache/huggingface/datasets/visual_genome/region_descriptions_v1.2.0/1.2.0/136fe5b83f6691884566c5530313288171e053a3b33bfe3ea2e4c8b39abaf7f3... --------------------------------------------------------------------------- AttributeError Traceback (most recent call last) Cell In[6], line 1 ----> 1 data = load_dataset("visual_genome", 'region_descriptions_v1.2.0') File ~\.conda\envs\aai\Lib\site-packages\datasets\load.py:1791, in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, verification_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, num_proc, storage_options, **config_kwargs) 1788 try_from_hf_gcs = path not in _PACKAGED_DATASETS_MODULES 1790 # Download and prepare data -> 1791 builder_instance.download_and_prepare( 1792 download_config=download_config, 1793 download_mode=download_mode, 1794 verification_mode=verification_mode, 1795 try_from_hf_gcs=try_from_hf_gcs, 1796 num_proc=num_proc, 1797 storage_options=storage_options, 1798 ) 1800 # Build dataset for splits 1801 keep_in_memory = ( 1802 keep_in_memory if keep_in_memory is not None else is_small_dataset(builder_instance.info.dataset_size) 1803 ) File ~\.conda\envs\aai\Lib\site-packages\datasets\builder.py:891, in DatasetBuilder.download_and_prepare(self, output_dir, download_config, download_mode, verification_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, file_format, max_shard_size, num_proc, storage_options, **download_and_prepare_kwargs) 889 if num_proc is not None: 890 prepare_split_kwargs["num_proc"] = num_proc --> 891 self._download_and_prepare( 892 dl_manager=dl_manager, 893 verification_mode=verification_mode, 894 **prepare_split_kwargs, 895 **download_and_prepare_kwargs, 896 ) 897 # Sync info 898 self.info.dataset_size = sum(split.num_bytes for split in self.info.splits.values()) File ~\.conda\envs\aai\Lib\site-packages\datasets\builder.py:1651, in GeneratorBasedBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs) 1650 def _download_and_prepare(self, dl_manager, verification_mode, **prepare_splits_kwargs): -> 1651 super()._download_and_prepare( 1652 dl_manager, 1653 verification_mode, 1654 check_duplicate_keys=verification_mode == VerificationMode.BASIC_CHECKS 1655 or verification_mode == VerificationMode.ALL_CHECKS, 1656 **prepare_splits_kwargs, 1657 ) File ~\.conda\envs\aai\Lib\site-packages\datasets\builder.py:964, in DatasetBuilder._download_and_prepare(self, dl_manager, verification_mode, **prepare_split_kwargs) 962 split_dict = SplitDict(dataset_name=self.name) 963 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 964 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 966 # Checksums verification 967 if verification_mode == VerificationMode.ALL_CHECKS and dl_manager.record_checksums: File ~\.cache\huggingface\modules\datasets_modules\datasets\visual_genome\136fe5b83f6691884566c5530313288171e053a3b33bfe3ea2e4c8b39abaf7f3\visual_genome.py:377, in VisualGenome._split_generators(self, dl_manager) 375 def _split_generators(self, dl_manager): 376 # Download image meta datas. --> 377 image_metadatas_dir = dl_manager.download_and_extract(self.config.image_metadata_url) 378 image_metadatas_file = os.path.join( 379 image_metadatas_dir, _get_decompressed_filename_from_url(self.config.image_metadata_url) 380 ) 382 # Download annotations File ~\.cache\huggingface\modules\datasets_modules\datasets\visual_genome\136fe5b83f6691884566c5530313288171e053a3b33bfe3ea2e4c8b39abaf7f3\visual_genome.py:328, in VisualGenomeConfig.image_metadata_url(self) 326 @property 327 def image_metadata_url(self): --> 328 if not self.version.match(_LATEST_VERSIONS["image_metadata"]): 329 logger.warning( 330 f"Latest image metadata version is {_LATEST_VERSIONS['image_metadata']}. Trying to generate a dataset of version: {self.version}. Please double check that image data are unchanged between the two versions." 331 ) 332 return f"{_BASE_ANNOTATION_URL}/image_data.json.zip" ### Environment info datasets 2.11.0 python 3.11.3
{ "+1": 3, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/5749/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5749/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/780
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/780/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/780/comments
https://api.github.com/repos/huggingface/datasets/issues/780/events
https://github.com/huggingface/datasets/pull/780
732,738,647
MDExOlB1bGxSZXF1ZXN0NTEyNjM0MzI0
780
Add ASNQ dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/2992022?v=4", "events_url": "https://api.github.com/users/mkserge/events{/privacy}", "followers_url": "https://api.github.com/users/mkserge/followers", "following_url": "https://api.github.com/users/mkserge/following{/other_user}", "gists_url": "https://api.github.com/users/mkserge/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mkserge", "id": 2992022, "login": "mkserge", "node_id": "MDQ6VXNlcjI5OTIwMjI=", "organizations_url": "https://api.github.com/users/mkserge/orgs", "received_events_url": "https://api.github.com/users/mkserge/received_events", "repos_url": "https://api.github.com/users/mkserge/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mkserge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mkserge/subscriptions", "type": "User", "url": "https://api.github.com/users/mkserge" }
[]
closed
false
null
[]
null
[ "Very nice !\r\nWhat do the `sentence1` and `sentence2` correspond to exactly ?\r\nAlso maybe you could use the `ClassLabel` feature type for the `label` field (see [snli](https://github.com/huggingface/datasets/blob/master/datasets/snli/snli.py) for example)", "> What do the `sentence1` and `sentence2` correspond to exactly ?\r\n\r\n`sentence1` is a question, and `sentence2` is a candidate answer sentence. The labels are [1, 2, 3, 4] defining a relation between the answer sentence and the question. For example, label 4 means that the answer sentence is inside the _long_answer_ passage AND that the _short_answer_ is within the answer sentence. All the other labels are the negatives with different characteristics. (the short_answer, long_answer terminology is borrowed from Google's NQ dataset)\r\n\r\nShould I label them simply as `question` and `answer`? I was going more with what I saw in the examples/run_glue.py script, but I realize now there is no restriction around this.\r\n\r\n> Also maybe you could use the `ClassLabel` feature type for the `label` field (see [snli](https://github.com/huggingface/datasets/blob/master/datasets/snli/snli.py) for example)\r\n\r\nI am finding it difficult to assign names to each class, but perhaps it's possible. Here's the description of each class from the paper.\r\n\r\n1. Sentences from the document that are in the long answer but do not contain the annotated short answers. It is possible that these sentences might contain the short answer.\r\n2. Sentences from the document that are not in the long answer but contain the short answer string, that is, such occurrence is purely accidental.\r\n3. Sentences from the document that are neither in the long answer nor contain the short answer.\r\n4. Sentences from the document that are in the long answer and do contain the annotated short answers.\r\n\r\nAny ideas?\r\n\r\n", "Yes it's better to have explicit feature names. Maybe go with question/answer or question/sentence.\r\nI read in the paper that 1,2 and 3 are considered negative and 4 positive.\r\nWe could have a binary classification label `label` (either positive of negative) and then two boolean fields `short_answser_in_sentence` and `sentence_in_long_answer`. What do you think ?", "> Yes it's better to have explicit feature names. Maybe go with question/answer or question/sentence.\r\n> I read in the paper that 1,2 and 3 are considered negative and 4 positive.\r\n> We could have a binary classification label `label` (either positive of negative) and then two boolean fields `short_answser_in_sentence` and `sentence_in_long_answer`. What do you think ?\r\n\r\nOk, sounds good. I went with `sentence` to keep it consistent with `short_answer_in_sentence` and `sentence_in_long_answer`. \r\n\r\nI changed it to a ClassLabel with pos and neg classes and added the two above as features. Let me know if this is not what you had in mind.\r\n\r\n" ]
"2020-10-29T23:31:56Z"
"2020-11-10T09:26:23Z"
"2020-11-10T09:26:23Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/780.diff", "html_url": "https://github.com/huggingface/datasets/pull/780", "merged_at": "2020-11-10T09:26:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/780.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/780" }
This pull request adds the ASNQ dataset. It is a dataset for answer sentence selection derived from Google Natural Questions (NQ) dataset (Kwiatkowski et al. 2019). The dataset details can be found in the paper at https://arxiv.org/abs/1911.04118 The dataset is authored by Siddhant Garg, Thuy Vu and Alessandro Moschitti. _Please note that I have no affiliation with the authors._ Repo: https://github.com/alexa/wqa_tanda
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/780/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/780/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1930
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1930/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1930/comments
https://api.github.com/repos/huggingface/datasets/issues/1930/events
https://github.com/huggingface/datasets/pull/1930
814,055,198
MDExOlB1bGxSZXF1ZXN0NTc4MTAwNzI0
1,930
updated the wino_bias dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/22306304?v=4", "events_url": "https://api.github.com/users/JieyuZhao/events{/privacy}", "followers_url": "https://api.github.com/users/JieyuZhao/followers", "following_url": "https://api.github.com/users/JieyuZhao/following{/other_user}", "gists_url": "https://api.github.com/users/JieyuZhao/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/JieyuZhao", "id": 22306304, "login": "JieyuZhao", "node_id": "MDQ6VXNlcjIyMzA2MzA0", "organizations_url": "https://api.github.com/users/JieyuZhao/orgs", "received_events_url": "https://api.github.com/users/JieyuZhao/received_events", "repos_url": "https://api.github.com/users/JieyuZhao/repos", "site_admin": false, "starred_url": "https://api.github.com/users/JieyuZhao/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JieyuZhao/subscriptions", "type": "User", "url": "https://api.github.com/users/JieyuZhao" }
[]
closed
false
null
[]
null
[ "Hi @JieyuZhao ! Have you had a chance to add the different configurations ?\r\nThanks again for your help on this !", "> Hi @JieyuZhao ! Have you had a chance to add the different configurations ?\r\n> Thanks again for your help on this !\r\n\r\nHi @lhoestq Yes, I've updated the code. Now the configuration will have dev/test splits.", "> Cool thanks !\r\n> This looks perfect this way.\r\n> \r\n> Now we just need to update the dataset_infos.json (it contains the metadata of the dataset) and add dummy data to be able to test this script automatically.\r\n> \r\n> To update the dataset_infos.json you just need delete the current one at `./datasets/wino_biais/dataset_infos.json`, and then run this command:\r\n> \r\n> ```\r\n> datasets-cli test ./datasets/wino_biais --save_infos --all_configs --ignore_verifications\r\n> ```\r\n> \r\n> To add the dummy data there's also a tool to add them automatically.\r\n> First delete the folder at `./datasets/wino_biais/dummy` and then run\r\n> \r\n> ```\r\n> datasets-cli dummy_data ./datasets/wino_biais --auto_generate --match_text_files \"*conll\" --n_lines 15\r\n> ```\r\n> \r\n> Let me know if you have questions :)\r\n> Also don't forget to run `make style` to format the code properly.\r\n\r\nThanks for the instruction! I've updated the metadata and the dummy data and also do the formatting. Please let me know if more is needed. :)" ]
"2021-02-23T03:07:40Z"
"2021-04-07T15:24:56Z"
"2021-04-07T15:24:56Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1930.diff", "html_url": "https://github.com/huggingface/datasets/pull/1930", "merged_at": "2021-04-07T15:24:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/1930.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1930" }
Updated the wino_bias.py script. - updated the data_url - added different configurations for different data splits - added the coreference_cluster to the data features
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1930/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1930/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1253
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1253/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1253/comments
https://api.github.com/repos/huggingface/datasets/issues/1253/events
https://github.com/huggingface/datasets/pull/1253
758,517,391
MDExOlB1bGxSZXF1ZXN0NTMzNjc4MDE1
1,253
add thainer
{ "avatar_url": "https://avatars.githubusercontent.com/u/15519308?v=4", "events_url": "https://api.github.com/users/cstorm125/events{/privacy}", "followers_url": "https://api.github.com/users/cstorm125/followers", "following_url": "https://api.github.com/users/cstorm125/following{/other_user}", "gists_url": "https://api.github.com/users/cstorm125/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/cstorm125", "id": 15519308, "login": "cstorm125", "node_id": "MDQ6VXNlcjE1NTE5MzA4", "organizations_url": "https://api.github.com/users/cstorm125/orgs", "received_events_url": "https://api.github.com/users/cstorm125/received_events", "repos_url": "https://api.github.com/users/cstorm125/repos", "site_admin": false, "starred_url": "https://api.github.com/users/cstorm125/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cstorm125/subscriptions", "type": "User", "url": "https://api.github.com/users/cstorm125" }
[]
closed
false
null
[]
null
[]
"2020-12-07T13:41:54Z"
"2020-12-08T14:44:49Z"
"2020-12-08T14:44:49Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1253.diff", "html_url": "https://github.com/huggingface/datasets/pull/1253", "merged_at": "2020-12-08T14:44:49Z", "patch_url": "https://github.com/huggingface/datasets/pull/1253.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1253" }
ThaiNER (v1.3) is a 6,456-sentence named entity recognition dataset created from expanding the 2,258-sentence [unnamed dataset](http://pioneer.chula.ac.th/~awirote/Data-Nutcha.zip) by [Tirasaroj and Aroonmanakun (2012)](http://pioneer.chula.ac.th/~awirote/publications/). It is used to train NER taggers in [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp). The NER tags are annotated by [Tirasaroj and Aroonmanakun (2012)]((http://pioneer.chula.ac.th/~awirote/publications/)) for 2,258 sentences and the rest by [@wannaphong](https://github.com/wannaphong/). The POS tags are done by [PyThaiNLP](https://github.com/PyThaiNLP/pythainlp)'s `perceptron` engine trained on `orchid_ud`. [@wannaphong](https://github.com/wannaphong/) is now the only maintainer of this dataset.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1253/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1253/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1231
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1231/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1231/comments
https://api.github.com/repos/huggingface/datasets/issues/1231/events
https://github.com/huggingface/datasets/pull/1231
758,121,398
MDExOlB1bGxSZXF1ZXN0NTMzMzQzMzAz
1,231
Add Urdu Sentiment Corpus (USC)
{ "avatar_url": "https://avatars.githubusercontent.com/u/44389205?v=4", "events_url": "https://api.github.com/users/chaitnayabasava/events{/privacy}", "followers_url": "https://api.github.com/users/chaitnayabasava/followers", "following_url": "https://api.github.com/users/chaitnayabasava/following{/other_user}", "gists_url": "https://api.github.com/users/chaitnayabasava/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/chaitnayabasava", "id": 44389205, "login": "chaitnayabasava", "node_id": "MDQ6VXNlcjQ0Mzg5MjA1", "organizations_url": "https://api.github.com/users/chaitnayabasava/orgs", "received_events_url": "https://api.github.com/users/chaitnayabasava/received_events", "repos_url": "https://api.github.com/users/chaitnayabasava/repos", "site_admin": false, "starred_url": "https://api.github.com/users/chaitnayabasava/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/chaitnayabasava/subscriptions", "type": "User", "url": "https://api.github.com/users/chaitnayabasava" }
[]
closed
false
null
[]
null
[]
"2020-12-07T03:25:20Z"
"2020-12-07T18:05:16Z"
"2020-12-07T16:43:23Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1231.diff", "html_url": "https://github.com/huggingface/datasets/pull/1231", "merged_at": "2020-12-07T16:43:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/1231.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1231" }
@lhoestq opened a clean PR containing only relevant files. old PR #1140
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1231/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1231/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4406
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4406/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4406/comments
https://api.github.com/repos/huggingface/datasets/issues/4406/events
https://github.com/huggingface/datasets/pull/4406
1,248,626,622
PR_kwDODunzps44ePLU
4,406
Improve language tag for PIAF dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/58078086?v=4", "events_url": "https://api.github.com/users/lbourdois/events{/privacy}", "followers_url": "https://api.github.com/users/lbourdois/followers", "following_url": "https://api.github.com/users/lbourdois/following{/other_user}", "gists_url": "https://api.github.com/users/lbourdois/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lbourdois", "id": 58078086, "login": "lbourdois", "node_id": "MDQ6VXNlcjU4MDc4MDg2", "organizations_url": "https://api.github.com/users/lbourdois/orgs", "received_events_url": "https://api.github.com/users/lbourdois/received_events", "repos_url": "https://api.github.com/users/lbourdois/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lbourdois/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lbourdois/subscriptions", "type": "User", "url": "https://api.github.com/users/lbourdois" }
[]
closed
false
null
[]
null
[]
"2022-05-25T19:41:55Z"
"2022-05-27T14:51:23Z"
"2022-05-27T14:51:23Z"
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4406.diff", "html_url": "https://github.com/huggingface/datasets/pull/4406", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/4406.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4406" }
Hi, As pointed out by @lhoestq in this discussion (https://huggingface.co/datasets/asi/wikitext_fr/discussions/1), it is not yet possible to edit datasets outside of a namespace with the Hub PR feature and that you have to go through GitHub. This modification should allow better referencing since only the xx language tags are currently taken into account and not the xx-xx.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4406/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4406/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4986
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4986/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4986/comments
https://api.github.com/repos/huggingface/datasets/issues/4986/events
https://github.com/huggingface/datasets/pull/4986
1,375,895,035
PR_kwDODunzps4_GNSd
4,986
[doc] Fix broken snippet that had too many quotes
{ "avatar_url": "https://avatars.githubusercontent.com/u/37621491?v=4", "events_url": "https://api.github.com/users/tomaarsen/events{/privacy}", "followers_url": "https://api.github.com/users/tomaarsen/followers", "following_url": "https://api.github.com/users/tomaarsen/following{/other_user}", "gists_url": "https://api.github.com/users/tomaarsen/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tomaarsen", "id": 37621491, "login": "tomaarsen", "node_id": "MDQ6VXNlcjM3NjIxNDkx", "organizations_url": "https://api.github.com/users/tomaarsen/orgs", "received_events_url": "https://api.github.com/users/tomaarsen/received_events", "repos_url": "https://api.github.com/users/tomaarsen/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tomaarsen/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tomaarsen/subscriptions", "type": "User", "url": "https://api.github.com/users/tomaarsen" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Spent the day familiarising myself with the huggingface line of products, and happened to run into some small issues here and there. Magically, I've found exactly one small issue in `transformers`, one in `accelerate` and now one in `datasets`, hah!\r\n\r\nAs for this PR, the issue seems solved according to the [new PR documentation](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4986/en/process#map):\r\n![image](https://user-images.githubusercontent.com/37621491/190646405-6afa06fa-9eac-48f6-ab30-2677944fb7b6.png)\r\n" ]
"2022-09-16T12:41:07Z"
"2022-09-16T22:12:21Z"
"2022-09-16T17:32:14Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4986.diff", "html_url": "https://github.com/huggingface/datasets/pull/4986", "merged_at": "2022-09-16T17:32:14Z", "patch_url": "https://github.com/huggingface/datasets/pull/4986.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4986" }
Hello! ### Pull request overview * Fix broken snippet in https://huggingface.co/docs/datasets/main/en/process that has too many quotes ### Details The snippet in question can be found here: https://huggingface.co/docs/datasets/main/en/process#map This screenshot shows the issue, there is a quote too many, causing the snippet to be colored incorrectly: ![image](https://user-images.githubusercontent.com/37621491/190640627-f7587362-0e44-4464-a5d1-a0b98df6986f.png) The change speaks for itself. Thank you for the detailed documentation, by the way. - Tom Aarsen
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4986/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4986/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2812
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2812/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2812/comments
https://api.github.com/repos/huggingface/datasets/issues/2812/events
https://github.com/huggingface/datasets/issues/2812
972,936,889
MDU6SXNzdWU5NzI5MzY4ODk=
2,812
arXiv Dataset verification problem
{ "avatar_url": "https://avatars.githubusercontent.com/u/13485709?v=4", "events_url": "https://api.github.com/users/eladsegal/events{/privacy}", "followers_url": "https://api.github.com/users/eladsegal/followers", "following_url": "https://api.github.com/users/eladsegal/following{/other_user}", "gists_url": "https://api.github.com/users/eladsegal/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/eladsegal", "id": 13485709, "login": "eladsegal", "node_id": "MDQ6VXNlcjEzNDg1NzA5", "organizations_url": "https://api.github.com/users/eladsegal/orgs", "received_events_url": "https://api.github.com/users/eladsegal/received_events", "repos_url": "https://api.github.com/users/eladsegal/repos", "site_admin": false, "starred_url": "https://api.github.com/users/eladsegal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/eladsegal/subscriptions", "type": "User", "url": "https://api.github.com/users/eladsegal" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
open
false
null
[]
null
[]
"2021-08-17T18:01:48Z"
"2022-01-19T14:15:35Z"
null
CONTRIBUTOR
null
null
null
## Describe the bug `dataset_infos.json` for `arxiv_dataset` contains a fixed number of training examples, however the data (downloaded from an external source) is updated every week with additional examples. Therefore, loading the dataset without `ignore_verifications=True` results in a verification error.
{ "+1": 2, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/2812/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2812/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/4442
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4442/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4442/comments
https://api.github.com/repos/huggingface/datasets/issues/4442/events
https://github.com/huggingface/datasets/issues/4442
1,258,589,276
I_kwDODunzps5LBIxc
4,442
Dataset Viewer issue for amazon_polarity
{ "avatar_url": "https://avatars.githubusercontent.com/u/26859204?v=4", "events_url": "https://api.github.com/users/lewtun/events{/privacy}", "followers_url": "https://api.github.com/users/lewtun/followers", "following_url": "https://api.github.com/users/lewtun/following{/other_user}", "gists_url": "https://api.github.com/users/lewtun/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lewtun", "id": 26859204, "login": "lewtun", "node_id": "MDQ6VXNlcjI2ODU5MjA0", "organizations_url": "https://api.github.com/users/lewtun/orgs", "received_events_url": "https://api.github.com/users/lewtun/received_events", "repos_url": "https://api.github.com/users/lewtun/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lewtun/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lewtun/subscriptions", "type": "User", "url": "https://api.github.com/users/lewtun" }
[ { "color": "E5583E", "default": false, "description": "Related to the dataset viewer on huggingface.co", "id": 3470211881, "name": "dataset-viewer", "node_id": "LA_kwDODunzps7O1zsp", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset-viewer" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/1676121?v=4", "events_url": "https://api.github.com/users/severo/events{/privacy}", "followers_url": "https://api.github.com/users/severo/followers", "following_url": "https://api.github.com/users/severo/following{/other_user}", "gists_url": "https://api.github.com/users/severo/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/severo", "id": 1676121, "login": "severo", "node_id": "MDQ6VXNlcjE2NzYxMjE=", "organizations_url": "https://api.github.com/users/severo/orgs", "received_events_url": "https://api.github.com/users/severo/received_events", "repos_url": "https://api.github.com/users/severo/repos", "site_admin": false, "starred_url": "https://api.github.com/users/severo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/severo/subscriptions", "type": "User", "url": "https://api.github.com/users/severo" } ]
null
[ "Thanks, looking at it", "Not sure what happened 😬, but it's fixed" ]
"2022-06-02T19:18:38Z"
"2022-06-07T18:50:37Z"
"2022-06-07T18:50:37Z"
MEMBER
null
null
null
### Link https://huggingface.co/datasets/amazon_polarity/viewer/amazon_polarity/test ### Description For some reason the train split is OK but the test split is not for this dataset: ``` Server error Status code: 400 Exception: FileNotFoundError Message: [Errno 2] No such file or directory: '/cache/modules/datasets_modules/datasets/amazon_polarity/__init__.py' ``` ### Owner No
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4442/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4442/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5057
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5057/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5057/comments
https://api.github.com/repos/huggingface/datasets/issues/5057/events
https://github.com/huggingface/datasets/pull/5057
1,394,827,216
PR_kwDODunzps5AD4c6
5,057
Support `converters` in `CsvBuilder`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
"2022-10-03T14:23:21Z"
"2022-10-04T11:19:28Z"
"2022-10-04T11:17:32Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5057.diff", "html_url": "https://github.com/huggingface/datasets/pull/5057", "merged_at": "2022-10-04T11:17:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/5057.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5057" }
Add the `converters` param to `CsvBuilder`, to help in situations like [this one](https://discuss.huggingface.co/t/typeerror-in-load-dataset-related-to-a-sequence-of-strings/23545).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5057/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5057/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1610
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1610/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1610/comments
https://api.github.com/repos/huggingface/datasets/issues/1610/events
https://github.com/huggingface/datasets/issues/1610
771,453,599
MDU6SXNzdWU3NzE0NTM1OTk=
1,610
shuffle does not accept seed
{ "avatar_url": "https://avatars.githubusercontent.com/u/6278280?v=4", "events_url": "https://api.github.com/users/rabeehk/events{/privacy}", "followers_url": "https://api.github.com/users/rabeehk/followers", "following_url": "https://api.github.com/users/rabeehk/following{/other_user}", "gists_url": "https://api.github.com/users/rabeehk/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/rabeehk", "id": 6278280, "login": "rabeehk", "node_id": "MDQ6VXNlcjYyNzgyODA=", "organizations_url": "https://api.github.com/users/rabeehk/orgs", "received_events_url": "https://api.github.com/users/rabeehk/received_events", "repos_url": "https://api.github.com/users/rabeehk/repos", "site_admin": false, "starred_url": "https://api.github.com/users/rabeehk/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rabeehk/subscriptions", "type": "User", "url": "https://api.github.com/users/rabeehk" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Hi, did you check the doc on `shuffle`?\r\nhttps://huggingface.co/docs/datasets/package_reference/main_classes.html?datasets.Dataset.shuffle#datasets.Dataset.shuffle", "Hi Thomas\r\nthanks for reponse, yes, I did checked it, but this does not work for me please see \r\n\r\n```\r\n(internship) rkarimi@italix17:/idiap/user/rkarimi/dev$ python \r\nPython 3.7.9 (default, Aug 31 2020, 12:42:55) \r\n[GCC 7.3.0] :: Anaconda, Inc. on linux\r\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\r\n>>> import datasets \r\n2020-12-20 01:48:50.766004: W tensorflow/stream_executor/platform/default/dso_loader.cc:60] Could not load dynamic library 'libcudart.so.11.0'; dlerror: libcudart.so.11.0: cannot open shared object file: No such file or directory\r\n2020-12-20 01:48:50.766029: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine.\r\n>>> data = datasets.load_dataset(\"scitail\", \"snli_format\")\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\ncahce dir /idiap/temp/rkarimi/cache_home_1/datasets\r\nReusing dataset scitail (/idiap/temp/rkarimi/cache_home_1/datasets/scitail/snli_format/1.1.0/fd8ccdfc3134ce86eb4ef10ba7f21ee2a125c946e26bb1dd3625fe74f48d3b90)\r\n>>> data.shuffle(seed=2)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\nTypeError: shuffle() got an unexpected keyword argument 'seed'\r\n\r\n```\r\n\r\ndatasets version\r\n`datasets 1.1.2 <pip>\r\n`\r\n", "Thanks for reporting ! \r\n\r\nIndeed it looks like an issue with `suffle` on `DatasetDict`. We're going to fix that.\r\nIn the meantime you can shuffle each split (train, validation, test) separately:\r\n```python\r\nshuffled_train_dataset = data[\"train\"].shuffle(seed=42)\r\n```\r\n" ]
"2020-12-19T20:59:39Z"
"2021-01-04T10:00:03Z"
"2021-01-04T10:00:03Z"
CONTRIBUTOR
null
null
null
Hi I need to shuffle the dataset, but this needs to be based on epoch+seed to be consistent across the cores, when I pass seed to shuffle, this does not accept seed, could you assist me with this? thanks @lhoestq
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1610/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1610/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/1804
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1804/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1804/comments
https://api.github.com/repos/huggingface/datasets/issues/1804/events
https://github.com/huggingface/datasets/pull/1804
798,483,881
MDExOlB1bGxSZXF1ZXN0NTY1MjkzMTc3
1,804
Add SICK dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/36051308?v=4", "events_url": "https://api.github.com/users/calpt/events{/privacy}", "followers_url": "https://api.github.com/users/calpt/followers", "following_url": "https://api.github.com/users/calpt/following{/other_user}", "gists_url": "https://api.github.com/users/calpt/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/calpt", "id": 36051308, "login": "calpt", "node_id": "MDQ6VXNlcjM2MDUxMzA4", "organizations_url": "https://api.github.com/users/calpt/orgs", "received_events_url": "https://api.github.com/users/calpt/received_events", "repos_url": "https://api.github.com/users/calpt/repos", "site_admin": false, "starred_url": "https://api.github.com/users/calpt/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/calpt/subscriptions", "type": "User", "url": "https://api.github.com/users/calpt" }
[]
closed
false
null
[]
null
[]
"2021-02-01T15:57:44Z"
"2021-02-05T17:46:28Z"
"2021-02-05T15:49:25Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1804.diff", "html_url": "https://github.com/huggingface/datasets/pull/1804", "merged_at": "2021-02-05T15:49:25Z", "patch_url": "https://github.com/huggingface/datasets/pull/1804.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1804" }
Adds the SICK dataset (http://marcobaroni.org/composes/sick.html). Closes #1772. Edit: also closes #1632, which is the original issue requesting the dataset. The newer one is a duplicate.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1804/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1804/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4992
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4992/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4992/comments
https://api.github.com/repos/huggingface/datasets/issues/4992/events
https://github.com/huggingface/datasets/pull/4992
1,379,031,842
PR_kwDODunzps4_QVw4
4,992
Support streaming iwslt2017 dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
"2022-09-20T08:35:41Z"
"2022-09-20T09:27:55Z"
"2022-09-20T09:15:24Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4992.diff", "html_url": "https://github.com/huggingface/datasets/pull/4992", "merged_at": "2022-09-20T09:15:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/4992.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4992" }
Support streaming iwslt2017 dataset. Once this PR is merged: - [x] Remove old ".tgz" data files from the Hub.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4992/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4992/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3685
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3685/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3685/comments
https://api.github.com/repos/huggingface/datasets/issues/3685/events
https://github.com/huggingface/datasets/pull/3685
1,126,240,444
PR_kwDODunzps4yLw3m
3,685
Add support for `Audio` and `Image` feature in `push_to_hub`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "> Cool thanks !\r\n> \r\n> Also cc @patrickvonplaten @anton-l it means that when calling push_to_hub, the audio bytes are embedded in the parquet files (we don't upload the audio files themselves)\r\n\r\nJust to verify quickly the size of the dataset doesn't change in this case no? E.g. if a dataset has say 20GB in size when stored in `.mp3` format it could have up to 100GB when stored in WAV. But since we are just taking the bytes here a 20GB .mp3 dataset would also have 20GB when stored in parquet no?", "@lhoestq I've addressed your comments. Additionally, I've modified `cast_storage` to account for possible null (`None`) values.\r\n\r\n@patrickvonplaten Yes, the dataset size stays the same (at least because Parquet files are compressed).", "Feel free to merge if it's all good to you :)" ]
"2022-02-07T16:47:16Z"
"2022-02-14T18:14:57Z"
"2022-02-14T18:04:58Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3685.diff", "html_url": "https://github.com/huggingface/datasets/pull/3685", "merged_at": "2022-02-14T18:04:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/3685.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3685" }
Add support for the `Audio` and the `Image` feature in `push_to_hub`. The idea is to remove local path information and store file content under "bytes" in the Arrow table before the push. My initial approach (https://github.com/huggingface/datasets/commit/34c652afeff9686b6b8bf4e703c84d2205d670aa) was to use a map transform similar to [`decode_nested_example`](https://github.com/huggingface/datasets/blob/5e0f6068741464f833ff1802e24ecc2064aaea9f/src/datasets/features/features.py#L1023-L1056) while having decoding turned off, but I wasn't satisfied with the code quality, so I ended up using the `temporary_assignment` decorator to override `cast_storage`, which allows me to directly modify the underlying storage (the final op is similar to `Dataset.cast`) and results in a much simpler code. Additionally, I added the `allow_cast` flag that can disable this behavior in the situations where it's not needed (e.g. the dataset is already in the correct format for the Hub, etc.) EDIT: `allow_cast` renamed to `embed_external_files`
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3685/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3685/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3702
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3702/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3702/comments
https://api.github.com/repos/huggingface/datasets/issues/3702/events
https://github.com/huggingface/datasets/pull/3702
1,130,666,707
PR_kwDODunzps4yahKc
3,702
Update data URL of lm1b dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/7105134?v=4", "events_url": "https://api.github.com/users/yazdanbakhsh/events{/privacy}", "followers_url": "https://api.github.com/users/yazdanbakhsh/followers", "following_url": "https://api.github.com/users/yazdanbakhsh/following{/other_user}", "gists_url": "https://api.github.com/users/yazdanbakhsh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/yazdanbakhsh", "id": 7105134, "login": "yazdanbakhsh", "node_id": "MDQ6VXNlcjcxMDUxMzQ=", "organizations_url": "https://api.github.com/users/yazdanbakhsh/orgs", "received_events_url": "https://api.github.com/users/yazdanbakhsh/received_events", "repos_url": "https://api.github.com/users/yazdanbakhsh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/yazdanbakhsh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yazdanbakhsh/subscriptions", "type": "User", "url": "https://api.github.com/users/yazdanbakhsh" }
[ { "color": "0e8a16", "default": false, "description": "Contribution to a dataset script", "id": 4564477500, "name": "dataset contribution", "node_id": "LA_kwDODunzps8AAAABEBBmPA", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20contribution" } ]
closed
false
null
[]
null
[ "Hi ! I'm getting some 503 from both the http and https addresses. Do you think we could host this data somewhere else ? (please check if there is a license and if it allows redistribution)", "Both HTTP and HTTPS links are working now.\r\n\r\nWe are closing this PR." ]
"2022-02-10T18:46:30Z"
"2022-09-23T11:52:39Z"
"2022-09-23T11:52:39Z"
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3702.diff", "html_url": "https://github.com/huggingface/datasets/pull/3702", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/3702.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3702" }
The http address doesn't work anymore
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3702/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3702/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3427
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3427/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3427/comments
https://api.github.com/repos/huggingface/datasets/issues/3427/events
https://github.com/huggingface/datasets/pull/3427
1,078,782,159
PR_kwDODunzps4vxb_y
3,427
Add The Pile Enron Emails subset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[]
"2021-12-13T17:14:16Z"
"2021-12-14T17:30:59Z"
"2021-12-14T17:30:57Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3427.diff", "html_url": "https://github.com/huggingface/datasets/pull/3427", "merged_at": "2021-12-14T17:30:55Z", "patch_url": "https://github.com/huggingface/datasets/pull/3427.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3427" }
Add: - Enron Emails subset of The Pile: "enron_emails" config Close bigscience-workshop/data_tooling#310. CC: @StellaAthena
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3427/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3427/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2570
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2570/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2570/comments
https://api.github.com/repos/huggingface/datasets/issues/2570/events
https://github.com/huggingface/datasets/pull/2570
933,402,521
MDExOlB1bGxSZXF1ZXN0NjgwNjEzNzc0
2,570
Minor fix docs format for bertscore
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[]
"2021-06-30T07:42:12Z"
"2021-06-30T15:31:01Z"
"2021-06-30T15:31:01Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2570.diff", "html_url": "https://github.com/huggingface/datasets/pull/2570", "merged_at": "2021-06-30T15:31:01Z", "patch_url": "https://github.com/huggingface/datasets/pull/2570.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2570" }
Minor fix docs format for bertscore: - link to README - format of KWARGS_DESCRIPTION
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2570/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2570/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5608
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5608/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5608/comments
https://api.github.com/repos/huggingface/datasets/issues/5608/events
https://github.com/huggingface/datasets/issues/5608
1,609,996,563
I_kwDODunzps5f9pkT
5,608
audiofolder only creates dataset of 13 rows (files) when the data folder it's reading from has 20,000 mp3 files.
{ "avatar_url": "https://avatars.githubusercontent.com/u/107211437?v=4", "events_url": "https://api.github.com/users/jcho19/events{/privacy}", "followers_url": "https://api.github.com/users/jcho19/followers", "following_url": "https://api.github.com/users/jcho19/following{/other_user}", "gists_url": "https://api.github.com/users/jcho19/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jcho19", "id": 107211437, "login": "jcho19", "node_id": "U_kgDOBmPqrQ", "organizations_url": "https://api.github.com/users/jcho19/orgs", "received_events_url": "https://api.github.com/users/jcho19/received_events", "repos_url": "https://api.github.com/users/jcho19/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jcho19/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jcho19/subscriptions", "type": "User", "url": "https://api.github.com/users/jcho19" }
[]
closed
false
null
[]
null
[ "Hi!\r\n\r\n> naming convention of mp3 files\r\n\r\nYes, this could be the problem. MP3 files should end with `.mp3`/`.MP3` to be recognized as audio files.\r\n\r\nIf the file names are not the culprit, can you paste the audio folder's directory structure to help us reproduce the error (e.g., by running the `tree \"x\"` command)?", "Hi! I'm sorry, I don't want to reveal my entire dataset, but here's a snippet (all of the mp3 files below are some of the ones not being recognized by audiofolder. Also, for another dataset, audiofolder loaded zero mp3 files because \"train\" was in the name of one of the mp3 files. \r\nmy_dataset\r\n├── data\r\n│   ├── VHA_Innovation_Stories_-_Day_2-123.mp3\r\n│   ├── VHA_Innovation_Stories_-_Day_2-124.mp3\r\n│   ├── ASSOCIATION_OF_GENERAL_PRACTITIONERS_OF_JAMAICA_NEPHROLOGY_CONFERENCE_-_JULY_3,_2022-93.mp3\r\n│   ├── ASSOCIATION_OF_GENERAL_PRACTITIONERS_OF_JAMAICA_NEPHROLOGY_CONFERENCE_-_JULY_3,_2022-94.mp3\r\n│   ├── ASSOCIATION_OF_GENERAL_PRACTITIONERS_OF_JAMAICA_NEPHROLOGY_CONFERENCE_-_JULY_3,_2022-95.mp3\r\n│   ├── Your_Impact\\357\\274\\232_Neurosurgery_equipment-5.mp3\r\n│   └── Your_Impact\\357\\274\\232_Neurosurgery_equipment-6.mp3\r\n└── metadata.csv\r\n\r\nHere's a few of the 13 files recognized by the dataset:\r\nBritish_Heart_Foundation_-_Your_guide_to_a_Coronary_Angiogram,_a_test_for_heart_disease-1.mp3\r\nBritish_Heart_Foundation_-_Your_guide_to_a_Coronary_Angiogram,_a_test_for_heart_disease-2.mp3\r\nBritish_Heart_Foundation_-_Your_guide_to_a_Coronary_Angiogram,_a_test_for_heart_disease-3.mp3\r\nIVP_⧸_IVU_test_Procedure_for_Kidneys_intravenous_pyelogram_-_medical_radiology_X-ray_ivp-1.mp3\r\nIVP_⧸_IVU_test_Procedure_for_Kidneys_intravenous_pyelogram_-_medical_radiology_X-ray_ivp-2.mp3" ]
"2023-03-05T00:14:45Z"
"2023-03-12T00:02:57Z"
"2023-03-12T00:02:57Z"
NONE
null
null
null
### Describe the bug x = load_dataset("audiofolder", data_dir="x") When running this, x is a dataset of 13 rows (files) when it should be 20,000 rows (files) as the data_dir "x" has 20,000 mp3 files. Does anyone know what could possibly cause this (naming convention of mp3 files, etc.) ### Steps to reproduce the bug x = load_dataset("audiofolder", data_dir="x") ### Expected behavior x = load_dataset("audiofolder", data_dir="x") should create a dataset of 20,000 rows (files). ### Environment info - `datasets` version: 2.9.0 - Platform: Linux-3.10.0-1160.80.1.el7.x86_64-x86_64-with-glibc2.17 - Python version: 3.9.16 - PyArrow version: 11.0.0 - Pandas version: 1.5.3
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5608/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5608/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/1633
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1633/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1633/comments
https://api.github.com/repos/huggingface/datasets/issues/1633/events
https://github.com/huggingface/datasets/issues/1633
774,422,603
MDU6SXNzdWU3NzQ0MjI2MDM=
1,633
social_i_qa wrong format of labels
{ "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/ghost", "id": 10137, "login": "ghost", "node_id": "MDQ6VXNlcjEwMTM3", "organizations_url": "https://api.github.com/users/ghost/orgs", "received_events_url": "https://api.github.com/users/ghost/received_events", "repos_url": "https://api.github.com/users/ghost/repos", "site_admin": false, "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "type": "User", "url": "https://api.github.com/users/ghost" }
[]
closed
false
null
[]
null
[ "@lhoestq, should I raise a PR for this? Just a minor change while reading labels text file", "Sure feel free to open a PR thanks !" ]
"2020-12-24T13:11:54Z"
"2020-12-30T17:18:49Z"
"2020-12-30T17:18:49Z"
NONE
null
null
null
Hi, there is extra "\n" in labels of social_i_qa datasets, no big deal, but I was wondering if you could remove it to make it consistent. so label is 'label': '1\n', not '1' thanks ``` >>> import datasets >>> from datasets import load_dataset >>> dataset = load_dataset( ... 'social_i_qa') cahce dir /julia/cache/datasets Downloading: 4.72kB [00:00, 3.52MB/s] cahce dir /julia/cache/datasets Downloading: 2.19kB [00:00, 1.81MB/s] Using custom data configuration default Reusing dataset social_i_qa (/julia/datasets/social_i_qa/default/0.1.0/4a4190cc2d2482d43416c2167c0c5dccdd769d4482e84893614bd069e5c3ba06) >>> dataset['train'][0] {'answerA': 'like attending', 'answerB': 'like staying home', 'answerC': 'a good friend to have', 'context': 'Cameron decided to have a barbecue and gathered her friends together.', 'label': '1\n', 'question': 'How would Others feel as a result?'} ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1633/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1633/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/3942
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3942/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3942/comments
https://api.github.com/repos/huggingface/datasets/issues/3942/events
https://github.com/huggingface/datasets/issues/3942
1,171,177,122
I_kwDODunzps5Fzr6i
3,942
reddit_tifu dataset: Checksums didn't match for dataset source files
{ "avatar_url": "https://avatars.githubusercontent.com/u/8507585?v=4", "events_url": "https://api.github.com/users/XingxingZhang/events{/privacy}", "followers_url": "https://api.github.com/users/XingxingZhang/followers", "following_url": "https://api.github.com/users/XingxingZhang/following{/other_user}", "gists_url": "https://api.github.com/users/XingxingZhang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/XingxingZhang", "id": 8507585, "login": "XingxingZhang", "node_id": "MDQ6VXNlcjg1MDc1ODU=", "organizations_url": "https://api.github.com/users/XingxingZhang/orgs", "received_events_url": "https://api.github.com/users/XingxingZhang/received_events", "repos_url": "https://api.github.com/users/XingxingZhang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/XingxingZhang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/XingxingZhang/subscriptions", "type": "User", "url": "https://api.github.com/users/XingxingZhang" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists", "id": 1935892865, "name": "duplicate", "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate" } ]
closed
false
null
[]
null
[ "Hi @XingxingZhang, \r\n\r\nWe have already fixed this. You should update `datasets` version to at least 1.18.4:\r\n```shell\r\npip install -U datasets\r\n```\r\nAnd then force the redownload:\r\n```python\r\nload_dataset(\"...\", download_mode=\"force_redownload\")\r\n```\r\n\r\nDuplicate of:\r\n- #3773", "thanks @albertvillanova . by upgrading to 1.18.4 and using `load_dataset(\"...\", download_mode=\"force_redownload\")` fixed \r\n the bug.\r\n\r\nusing the following as you suggested in another thread can also fixed the bug\r\n```\r\npip install git+https://github.com/huggingface/datasets#egg=datasets\r\n```\r\n", "The latter solution (installing from GitHub) was proposed because the fix was not released yet. But last week we made the 1.18.4 patch release (with the fix), so no longer necessary to install from GitHub.\r\n\r\nYou can now install from PyPI, as usual:\r\n```shell\r\npip install -U datasets\r\n```\r\n" ]
"2022-03-16T15:23:30Z"
"2022-03-16T15:57:43Z"
"2022-03-16T15:39:25Z"
NONE
null
null
null
## Describe the bug When loading the reddit_tifu dataset, it throws the exception "Checksums didn't match for dataset source files" ## Steps to reproduce the bug ```python import datasets from datasets import load_dataset print(datasets.__version__) # load_dataset('billsum') load_dataset('reddit_tifu', 'short') ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 1.17.0 - Platform: mac os - Python version: Python 3.7.6 - PyArrow version: 3.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3942/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3942/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/1202
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1202/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1202/comments
https://api.github.com/repos/huggingface/datasets/issues/1202/events
https://github.com/huggingface/datasets/pull/1202
757,934,408
MDExOlB1bGxSZXF1ZXN0NTMzMjAyNjE0
1,202
Medical question pairs
{ "avatar_url": "https://avatars.githubusercontent.com/u/46425391?v=4", "events_url": "https://api.github.com/users/tuner007/events{/privacy}", "followers_url": "https://api.github.com/users/tuner007/followers", "following_url": "https://api.github.com/users/tuner007/following{/other_user}", "gists_url": "https://api.github.com/users/tuner007/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/tuner007", "id": 46425391, "login": "tuner007", "node_id": "MDQ6VXNlcjQ2NDI1Mzkx", "organizations_url": "https://api.github.com/users/tuner007/orgs", "received_events_url": "https://api.github.com/users/tuner007/received_events", "repos_url": "https://api.github.com/users/tuner007/repos", "site_admin": false, "starred_url": "https://api.github.com/users/tuner007/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tuner007/subscriptions", "type": "User", "url": "https://api.github.com/users/tuner007" }
[]
closed
false
null
[]
null
[]
"2020-12-06T14:09:07Z"
"2020-12-06T17:41:28Z"
"2020-12-06T17:41:28Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1202.diff", "html_url": "https://github.com/huggingface/datasets/pull/1202", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1202.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1202" }
This dataset consists of 3048 similar and dissimilar medical question pairs hand-generated and labeled by Curai's doctors. Dataset : https://github.com/curai/medical-question-pair-dataset Paper : https://drive.google.com/file/d/1CHPGBXkvZuZc8hpr46HeHU6U6jnVze-s/view **No splits added**
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1202/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1202/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4080
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4080/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4080/comments
https://api.github.com/repos/huggingface/datasets/issues/4080/events
https://github.com/huggingface/datasets/issues/4080
1,189,667,296
I_kwDODunzps5G6OHg
4,080
NonMatchingChecksumError for downloading conll2012_ontonotesv5 dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/17963619?v=4", "events_url": "https://api.github.com/users/richarddwang/events{/privacy}", "followers_url": "https://api.github.com/users/richarddwang/followers", "following_url": "https://api.github.com/users/richarddwang/following{/other_user}", "gists_url": "https://api.github.com/users/richarddwang/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/richarddwang", "id": 17963619, "login": "richarddwang", "node_id": "MDQ6VXNlcjE3OTYzNjE5", "organizations_url": "https://api.github.com/users/richarddwang/orgs", "received_events_url": "https://api.github.com/users/richarddwang/received_events", "repos_url": "https://api.github.com/users/richarddwang/repos", "site_admin": false, "starred_url": "https://api.github.com/users/richarddwang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/richarddwang/subscriptions", "type": "User", "url": "https://api.github.com/users/richarddwang" }
[ { "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists", "id": 1935892865, "name": "duplicate", "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate" }, { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "Hi @richarddwang,\r\n\r\n\r\nIndeed, we have recently updated the loading script of that dataset (and fixed that bug as well):\r\n- #4002\r\n\r\nThat fix will be available in our next `datasets` library release. In the meantime, you can incorporate that fix by:\r\n- installing `datasets` from our GitHub repo:\r\n```bash\r\npip install git+https://github.com/huggingface/datasets#egg=datasets\r\n```\r\n- forcing the data files to be redownloaded\r\n```python\r\nds = load_dataset('conll2012_ontonotesv5', 'english_v4', split=\"test\", download_mode=\"force_redownload\")\r\n```\r\n\r\nFeel free to re-open this issue if the problem persists. \r\n\r\nDuplicate of:\r\n- #4031" ]
"2022-04-01T11:34:28Z"
"2022-04-01T13:59:10Z"
"2022-04-01T13:59:10Z"
CONTRIBUTOR
null
null
null
## Steps to reproduce the bug ```python datasets.load_dataset("conll2012_ontonotesv5", "english_v12") ``` ## Actual results ``` Downloading builder script: 32.2kB [00:00, 9.72MB/s] Downloading metadata: 20.0kB [00:00, 10.4MB/s] Downloading and preparing dataset conll2012_ontonotesv5/english_v12 (download: 174.83 MiB, generated: 204.29 MiB, post-processed: Unknown size , total: 379.12 MiB) to ... Traceback (most recent call last): [315/390] File "/home/yisiang/lgtn/conll2012/run.py", line 86, in <module> train() File "/home/yisiang/lgtn/conll2012/run.py", line 65, in train trainer.fit(model, datamodule=dm) File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 740, in fit self._call_and_handle_interrupt( File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 685, in _call_and_handle_inte rrupt return trainer_fn(*args, **kwargs) File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 777, in _fit_impl self._run(model, ckpt_path=ckpt_path) File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/pytorch_lightning/trainer/trainer.py", line 1131, in _run self._data_connector.prepare_data() File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/pytorch_lightning/trainer/connectors/data_connector.py", line 154, in pre pare_data self.trainer.datamodule.prepare_data() File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/pytorch_lightning/core/datamodule.py", line 474, in wrapped_fn fn(*args, **kwargs) File "/home/yisiang/lgtn/_abstract_task/data.py", line 43, in prepare_data raw_dsets = datasets.load_dataset(**load_dataset_kwargs) File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/datasets/load.py", line 1687, in load_dataset builder_instance.download_and_prepare( File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/datasets/builder.py", line 605, in download_and_prepare self._download_and_prepare( File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/datasets/builder.py", line 1104, in _download_and_prepare super()._download_and_prepare(dl_manager, verify_infos, check_duplicate_keys=verify_infos) File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/datasets/builder.py", line 676, in _download_and_prepare verify_checksums( File "/home/yisiang/miniconda3/envs/ai/lib/python3.9/site-packages/datasets/utils/info_utils.py", line 40, in verify_checksums raise NonMatchingChecksumError(error_msg + str(bad_urls)) datasets.utils.info_utils.NonMatchingChecksumError: Checksums didn't match for dataset source files: ['https://md-datasets-cache-zipfiles-prod.s3.eu-west-1.amazonaws.com/zmycy7t9h9-1.zip'] ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.0.0
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4080/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4080/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/285
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/285/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/285/comments
https://api.github.com/repos/huggingface/datasets/issues/285/events
https://github.com/huggingface/datasets/pull/285
641,360,702
MDExOlB1bGxSZXF1ZXN0NDM2NjAyMjk4
285
Consistent formatting of citations
{ "avatar_url": "https://avatars.githubusercontent.com/u/38249783?v=4", "events_url": "https://api.github.com/users/mariamabarham/events{/privacy}", "followers_url": "https://api.github.com/users/mariamabarham/followers", "following_url": "https://api.github.com/users/mariamabarham/following{/other_user}", "gists_url": "https://api.github.com/users/mariamabarham/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariamabarham", "id": 38249783, "login": "mariamabarham", "node_id": "MDQ6VXNlcjM4MjQ5Nzgz", "organizations_url": "https://api.github.com/users/mariamabarham/orgs", "received_events_url": "https://api.github.com/users/mariamabarham/received_events", "repos_url": "https://api.github.com/users/mariamabarham/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariamabarham/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariamabarham/subscriptions", "type": "User", "url": "https://api.github.com/users/mariamabarham" }
[]
closed
false
null
[]
null
[ "Circle CI shuold be green :-) " ]
"2020-06-18T16:25:23Z"
"2020-06-22T08:09:25Z"
"2020-06-22T08:09:24Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/285.diff", "html_url": "https://github.com/huggingface/datasets/pull/285", "merged_at": "2020-06-22T08:09:23Z", "patch_url": "https://github.com/huggingface/datasets/pull/285.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/285" }
#283
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/285/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/285/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6424
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6424/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6424/comments
https://api.github.com/repos/huggingface/datasets/issues/6424/events
https://github.com/huggingface/datasets/pull/6424
1,995,224,516
PR_kwDODunzps5fiwDC
6,424
[docs] troubleshooting guide
{ "avatar_url": "https://avatars.githubusercontent.com/u/1065417?v=4", "events_url": "https://api.github.com/users/MKhalusova/events{/privacy}", "followers_url": "https://api.github.com/users/MKhalusova/followers", "following_url": "https://api.github.com/users/MKhalusova/following{/other_user}", "gists_url": "https://api.github.com/users/MKhalusova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MKhalusova", "id": 1065417, "login": "MKhalusova", "node_id": "MDQ6VXNlcjEwNjU0MTc=", "organizations_url": "https://api.github.com/users/MKhalusova/orgs", "received_events_url": "https://api.github.com/users/MKhalusova/received_events", "repos_url": "https://api.github.com/users/MKhalusova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MKhalusova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MKhalusova/subscriptions", "type": "User", "url": "https://api.github.com/users/MKhalusova" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6424). All of your documentation changes will be reflected on that endpoint.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005323 / 0.011353 (-0.006030) | 0.003560 / 0.011008 (-0.007448) | 0.062572 / 0.038508 (0.024064) | 0.049549 / 0.023109 (0.026440) | 0.236522 / 0.275898 (-0.039376) | 0.260601 / 0.323480 (-0.062879) | 0.002887 / 0.007986 (-0.005099) | 0.003225 / 0.004328 (-0.001103) | 0.048210 / 0.004250 (0.043960) | 0.038783 / 0.037052 (0.001731) | 0.242506 / 0.258489 (-0.015983) | 0.273906 / 0.293841 (-0.019935) | 0.027202 / 0.128546 (-0.101344) | 0.010577 / 0.075646 (-0.065069) | 0.211669 / 0.419271 (-0.207603) | 0.035727 / 0.043533 (-0.007806) | 0.242303 / 0.255139 (-0.012836) | 0.260468 / 0.283200 (-0.022732) | 0.020109 / 0.141683 (-0.121573) | 1.089603 / 1.452155 (-0.362552) | 1.149899 / 1.492716 (-0.342817) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088768 / 0.018006 (0.070761) | 0.300300 / 0.000490 (0.299810) | 0.000212 / 0.000200 (0.000013) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018758 / 0.037411 (-0.018653) | 0.060097 / 0.014526 (0.045571) | 0.074060 / 0.176557 (-0.102496) | 0.119977 / 0.737135 (-0.617158) | 0.075298 / 0.296338 (-0.221040) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.278640 / 0.215209 (0.063431) | 2.715574 / 2.077655 (0.637919) | 1.466644 / 1.504120 (-0.037476) | 1.344470 / 1.541195 (-0.196725) | 1.386984 / 1.468490 (-0.081506) | 0.575796 / 4.584777 (-4.008981) | 2.392324 / 3.745712 (-1.353388) | 2.826284 / 5.269862 (-2.443578) | 1.758997 / 4.565676 (-2.806679) | 0.062474 / 0.424275 (-0.361801) | 0.004930 / 0.007607 (-0.002678) | 0.332595 / 0.226044 (0.106551) | 3.240076 / 2.268929 (0.971147) | 1.785283 / 55.444624 (-53.659341) | 1.527594 / 6.876477 (-5.348882) | 1.562840 / 2.142072 (-0.579233) | 0.655474 / 4.805227 (-4.149754) | 0.116682 / 6.500664 (-6.383983) | 0.042664 / 0.075469 (-0.032805) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.936306 / 1.841788 (-0.905481) | 11.561239 / 8.074308 (3.486931) | 10.341918 / 10.191392 (0.150526) | 0.140602 / 0.680424 (-0.539822) | 0.013857 / 0.534201 (-0.520344) | 0.294241 / 0.579283 (-0.285042) | 0.268359 / 0.434364 (-0.166005) | 0.326344 / 0.540337 (-0.213993) | 0.430936 / 1.386936 (-0.956000) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005197 / 0.011353 (-0.006156) | 0.003543 / 0.011008 (-0.007465) | 0.049051 / 0.038508 (0.010542) | 0.052742 / 0.023109 (0.029633) | 0.277032 / 0.275898 (0.001134) | 0.300799 / 0.323480 (-0.022681) | 0.003922 / 0.007986 (-0.004064) | 0.002573 / 0.004328 (-0.001755) | 0.047270 / 0.004250 (0.043019) | 0.039782 / 0.037052 (0.002730) | 0.282780 / 0.258489 (0.024291) | 0.308858 / 0.293841 (0.015017) | 0.028641 / 0.128546 (-0.099905) | 0.010516 / 0.075646 (-0.065131) | 0.056367 / 0.419271 (-0.362904) | 0.032346 / 0.043533 (-0.011186) | 0.277591 / 0.255139 (0.022452) | 0.298539 / 0.283200 (0.015339) | 0.018168 / 0.141683 (-0.123515) | 1.104331 / 1.452155 (-0.347823) | 1.187691 / 1.492716 (-0.305025) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.089511 / 0.018006 (0.071505) | 0.301309 / 0.000490 (0.300820) | 0.000213 / 0.000200 (0.000013) | 0.000049 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021466 / 0.037411 (-0.015945) | 0.069917 / 0.014526 (0.055391) | 0.081105 / 0.176557 (-0.095452) | 0.119619 / 0.737135 (-0.617516) | 0.083928 / 0.296338 (-0.212410) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296471 / 0.215209 (0.081262) | 2.912139 / 2.077655 (0.834484) | 1.588861 / 1.504120 (0.084741) | 1.452148 / 1.541195 (-0.089047) | 1.475388 / 1.468490 (0.006898) | 0.555779 / 4.584777 (-4.028998) | 2.425599 / 3.745712 (-1.320113) | 2.792848 / 5.269862 (-2.477013) | 1.718757 / 4.565676 (-2.846919) | 0.077687 / 0.424275 (-0.346588) | 0.007522 / 0.007607 (-0.000085) | 0.348254 / 0.226044 (0.122210) | 3.439315 / 2.268929 (1.170386) | 1.925907 / 55.444624 (-53.518717) | 1.646163 / 6.876477 (-5.230314) | 1.662148 / 2.142072 (-0.479924) | 0.637277 / 4.805227 (-4.167950) | 0.116159 / 6.500664 (-6.384505) | 0.041518 / 0.075469 (-0.033952) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.966358 / 1.841788 (-0.875430) | 12.125201 / 8.074308 (4.050892) | 10.629939 / 10.191392 (0.438547) | 0.132439 / 0.680424 (-0.547984) | 0.015622 / 0.534201 (-0.518579) | 0.288824 / 0.579283 (-0.290459) | 0.277634 / 0.434364 (-0.156730) | 0.327200 / 0.540337 (-0.213138) | 0.549679 / 1.386936 (-0.837257) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0850f663f5498e0f296461e99a345dfd65e3358f \"CML watermark\")\n" ]
"2023-11-15T17:28:14Z"
"2023-11-30T17:29:55Z"
"2023-11-30T17:23:46Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6424.diff", "html_url": "https://github.com/huggingface/datasets/pull/6424", "merged_at": "2023-11-30T17:23:46Z", "patch_url": "https://github.com/huggingface/datasets/pull/6424.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6424" }
Hi all! This is a PR adding a troubleshooting guide for Datasets docs. I went through the library's GitHub Issues and Forum questions and identified a few issues that are common enough that I think it would be valuable to include them in the troubleshooting guide. These are: - creating a dataset from a folder and not following the required format - authentication issues when using `push_to_hub` - `Too Many Requests` with `push_to_hub` - Pickling issues when using Dataset.from_generator() There's also a section on asking for help. Please let me know if there are other common issues or advice that we can include here.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/6424/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6424/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4578
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4578/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4578/comments
https://api.github.com/repos/huggingface/datasets/issues/4578/events
https://github.com/huggingface/datasets/issues/4578
1,286,086,400
I_kwDODunzps5MqB8A
4,578
[Multi Configs] Use directories to differentiate between subsets/configurations
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[ "I want to be able to create folders in a model.", "How to set new split names, instead of train/test/validation? For example, I have a local dataset, consists of several subsets, named \"A\", \"B\", and \"C\". How can I create a huggingface dataset, with splits A/B/C ?\r\n\r\nThe document in https://huggingface.co/docs/datasets/dataset_script only tells me how to create datasets with subsets that is hosted on another server. How to do it if my datasets are local?", "> The document in https://huggingface.co/docs/datasets/dataset_script only tells me how to create datasets with subsets that is hosted on another server. How to do it if my datasets are local?\r\n\r\nIt works the same - you just need to use local paths instead of URLs" ]
"2022-06-27T16:55:11Z"
"2023-06-14T15:43:05Z"
null
MEMBER
null
null
null
Currently to define several subsets/configurations of your dataset, you need to use a dataset script. However it would be nice to have a no-code way to to this. For example we could specify different configurations of a dataset (for example, if a dataset contains different languages) with one directory per configuration. These structures are not supported right now, but would be nice to have: ``` my_dataset_repository/ ├── README.md ├── en/ │ ├── train.csv │ └── test.csv └── fr/ ├── train.csv └── test.csv ``` Or with one directory per split: ``` my_dataset_repository/ ├── README.md ├── en/ │ ├── train/ │ │ ├── shard_0.csv │ │ └── shard_1.csv │ └── test/ │ ├── shard_0.csv │ └── shard_1.csv └── fr/ ├── train/ │ ├── shard_0.csv │ └── shard_1.csv └── test/ ├── shard_0.csv └── shard_1.csv ``` cc @stevhliu @albertvillanova This can be specified in the README as YAML with ``` configs: - config_name: en data_dir: en - config_name: fr data_dir: fr ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 5, "heart": 9, "hooray": 0, "laugh": 0, "rocket": 5, "total_count": 19, "url": "https://api.github.com/repos/huggingface/datasets/issues/4578/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4578/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6457
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6457/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6457/comments
https://api.github.com/repos/huggingface/datasets/issues/6457/events
https://github.com/huggingface/datasets/issues/6457
2,015,650,563
I_kwDODunzps54JGMD
6,457
`TypeError`: huggingface_hub.hf_file_system.HfFileSystem.find() got multiple values for keyword argument 'maxdepth'
{ "avatar_url": "https://avatars.githubusercontent.com/u/79070834?v=4", "events_url": "https://api.github.com/users/wasertech/events{/privacy}", "followers_url": "https://api.github.com/users/wasertech/followers", "following_url": "https://api.github.com/users/wasertech/following{/other_user}", "gists_url": "https://api.github.com/users/wasertech/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/wasertech", "id": 79070834, "login": "wasertech", "node_id": "MDQ6VXNlcjc5MDcwODM0", "organizations_url": "https://api.github.com/users/wasertech/orgs", "received_events_url": "https://api.github.com/users/wasertech/received_events", "repos_url": "https://api.github.com/users/wasertech/repos", "site_admin": false, "starred_url": "https://api.github.com/users/wasertech/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/wasertech/subscriptions", "type": "User", "url": "https://api.github.com/users/wasertech" }
[]
closed
false
null
[]
null
[ "Updating `fsspec>=2023.10.0` did solve the issue.", "May be it should be pinned somewhere?", "> Maybe this should go in datasets directly... anyways you can easily fix this error by updating datasets>=2.15.1.dev0.\r\n\r\n@lhoestq @mariosasko for what I understand this is a bug fixed in `datasets` already, right? No need to do anything in `huggingface_hub`?", "I've opened a PR with a fix in `huggingface_hub`: https://github.com/huggingface/huggingface_hub/pull/1875", "Thanks! PR is merged and will be shipped in next release of `huggingface_hub`." ]
"2023-11-29T01:57:36Z"
"2023-11-29T15:39:03Z"
"2023-11-29T02:02:38Z"
NONE
null
null
null
### Describe the bug Please see https://github.com/huggingface/huggingface_hub/issues/1872 ### Steps to reproduce the bug Please see https://github.com/huggingface/huggingface_hub/issues/1872 ### Expected behavior Please see https://github.com/huggingface/huggingface_hub/issues/1872 ### Environment info Please see https://github.com/huggingface/huggingface_hub/issues/1872
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6457/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6457/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2453
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2453/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2453/comments
https://api.github.com/repos/huggingface/datasets/issues/2453/events
https://github.com/huggingface/datasets/pull/2453
913,729,258
MDExOlB1bGxSZXF1ZXN0NjYzNzE3NTk2
2,453
Keep original features order
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
{ "closed_at": "2021-07-09T05:50:07Z", "closed_issues": 12, "created_at": "2021-05-31T16:13:06Z", "creator": { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }, "description": "Next minor release", "due_on": "2021-07-08T07:00:00Z", "html_url": "https://github.com/huggingface/datasets/milestone/5", "id": 6808903, "labels_url": "https://api.github.com/repos/huggingface/datasets/milestones/5/labels", "node_id": "MDk6TWlsZXN0b25lNjgwODkwMw==", "number": 5, "open_issues": 0, "state": "closed", "title": "1.9", "updated_at": "2021-07-12T14:12:00Z", "url": "https://api.github.com/repos/huggingface/datasets/milestones/5" }
[ "The arrow writer was supposing that the columns were always in the sorted order. I just pushed a fix to reorder the arrays accordingly to the schema. It was failing for many datasets like squad", "and obviously it broke everything", "Feel free to revert my commit. I can investigate this in the coming days", "@lhoestq I do not understand when you say:\r\n> It was failing for many datasets like squad\r\n\r\nAll the tests were green after my last commit.", "> All the tests were green after my last commit.\r\n\r\nYes but loading the actual squad dataset was failing :/\r\n" ]
"2021-06-07T16:26:38Z"
"2021-06-15T18:05:36Z"
"2021-06-15T15:43:48Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2453.diff", "html_url": "https://github.com/huggingface/datasets/pull/2453", "merged_at": "2021-06-15T15:43:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/2453.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2453" }
When loading a Dataset from a JSON file whose column names are not sorted alphabetically, we should get the same column name order, whether we pass features (in the same order as in the file) or not. I found this issue while working on #2366.
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2453/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2453/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1076
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1076/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1076/comments
https://api.github.com/repos/huggingface/datasets/issues/1076/events
https://github.com/huggingface/datasets/pull/1076
756,584,328
MDExOlB1bGxSZXF1ZXN0NTMyMTExNDU5
1,076
quac quac / coin coin
{ "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VictorSanh", "id": 16107619, "login": "VictorSanh", "node_id": "MDQ6VXNlcjE2MTA3NjE5", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "repos_url": "https://api.github.com/users/VictorSanh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "type": "User", "url": "https://api.github.com/users/VictorSanh" }
[]
closed
false
null
[]
null
[ "pan" ]
"2020-12-03T20:55:29Z"
"2020-12-04T16:36:39Z"
"2020-12-04T09:15:20Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1076.diff", "html_url": "https://github.com/huggingface/datasets/pull/1076", "merged_at": "2020-12-04T09:15:20Z", "patch_url": "https://github.com/huggingface/datasets/pull/1076.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1076" }
Add QUAC (Question Answering in Context) I linearized most of the dictionnaries to lists. Referenced to the authors' datasheet for the dataset card. 🦆🦆🦆 Coin coin
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1076/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1076/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3627
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3627/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3627/comments
https://api.github.com/repos/huggingface/datasets/issues/3627/events
https://github.com/huggingface/datasets/pull/3627
1,113,556,837
PR_kwDODunzps4xitGe
3,627
Fix host URL in The Pile datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "We should also update the `bookcorpusopen` download url (see #3561) , no? ", "For `the_pile_openwebtext2` and `the_pile_stack_exchange` I did not regenerate the JSON files, but instead I just changed the download_checksums URL. ", "Seems like the mystic URL is now broken and the original should be used. ", "Also if I git clone and edit the repo or reset it before this PR it is still trying to pull using mystic? Why is this? " ]
"2022-01-25T08:11:28Z"
"2022-07-20T20:54:42Z"
"2022-02-14T08:40:58Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/3627.diff", "html_url": "https://github.com/huggingface/datasets/pull/3627", "merged_at": "2022-02-14T08:40:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/3627.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/3627" }
This PR fixes the host URL in The Pile datasets, once they have mirrored their data in another server. Fix #3626.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3627/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3627/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6098
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6098/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6098/comments
https://api.github.com/repos/huggingface/datasets/issues/6098/events
https://github.com/huggingface/datasets/pull/6098
1,827,655,071
PR_kwDODunzps5WuCn1
6,098
Expanduser in save_to_disk()
{ "avatar_url": "https://avatars.githubusercontent.com/u/51715864?v=4", "events_url": "https://api.github.com/users/Unknown3141592/events{/privacy}", "followers_url": "https://api.github.com/users/Unknown3141592/followers", "following_url": "https://api.github.com/users/Unknown3141592/following{/other_user}", "gists_url": "https://api.github.com/users/Unknown3141592/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/Unknown3141592", "id": 51715864, "login": "Unknown3141592", "node_id": "MDQ6VXNlcjUxNzE1ODY0", "organizations_url": "https://api.github.com/users/Unknown3141592/orgs", "received_events_url": "https://api.github.com/users/Unknown3141592/received_events", "repos_url": "https://api.github.com/users/Unknown3141592/repos", "site_admin": false, "starred_url": "https://api.github.com/users/Unknown3141592/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Unknown3141592/subscriptions", "type": "User", "url": "https://api.github.com/users/Unknown3141592" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "> I am not sure why the case distinction between local and remote filesystems is even necessary for DatasetDict when saving to disk. Imo this could be removed (leaving only fs.makedirs(dataset_dict_path, exist_ok=True)).\r\n\r\nIndeed. But it's better to address this in a separate PR.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007696 / 0.011353 (-0.003656) | 0.004497 / 0.011008 (-0.006511) | 0.099302 / 0.038508 (0.060794) | 0.083360 / 0.023109 (0.060251) | 0.393483 / 0.275898 (0.117585) | 0.450505 / 0.323480 (0.127025) | 0.004610 / 0.007986 (-0.003376) | 0.003637 / 0.004328 (-0.000692) | 0.075752 / 0.004250 (0.071501) | 0.064034 / 0.037052 (0.026982) | 0.397785 / 0.258489 (0.139296) | 0.462948 / 0.293841 (0.169107) | 0.035902 / 0.128546 (-0.092644) | 0.009640 / 0.075646 (-0.066007) | 0.342299 / 0.419271 (-0.076973) | 0.059586 / 0.043533 (0.016053) | 0.404918 / 0.255139 (0.149779) | 0.440889 / 0.283200 (0.157690) | 0.028981 / 0.141683 (-0.112702) | 1.775380 / 1.452155 (0.323226) | 1.866663 / 1.492716 (0.373946) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.249080 / 0.018006 (0.231074) | 0.456460 / 0.000490 (0.455970) | 0.028145 / 0.000200 (0.027945) | 0.000402 / 0.000054 (0.000347) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.030373 / 0.037411 (-0.007038) | 0.088562 / 0.014526 (0.074036) | 0.122837 / 0.176557 (-0.053720) | 0.167122 / 0.737135 (-0.570014) | 0.103953 / 0.296338 (-0.192385) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.431714 / 0.215209 (0.216505) | 4.182224 / 2.077655 (2.104570) | 2.025650 / 1.504120 (0.521530) | 1.838905 / 1.541195 (0.297710) | 1.868710 / 1.468490 (0.400219) | 0.538422 / 4.584777 (-4.046355) | 4.038941 / 3.745712 (0.293228) | 3.717695 / 5.269862 (-1.552166) | 2.313197 / 4.565676 (-2.252479) | 0.061060 / 0.424275 (-0.363215) | 0.008248 / 0.007607 (0.000641) | 0.497438 / 0.226044 (0.271394) | 4.946663 / 2.268929 (2.677734) | 2.571841 / 55.444624 (-52.872784) | 2.155894 / 6.876477 (-4.720583) | 2.183180 / 2.142072 (0.041107) | 0.639810 / 4.805227 (-4.165417) | 0.153273 / 6.500664 (-6.347391) | 0.068606 / 0.075469 (-0.006863) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.376152 / 1.841788 (-0.465635) | 20.747088 / 8.074308 (12.672780) | 15.200311 / 10.191392 (5.008919) | 0.166380 / 0.680424 (-0.514043) | 0.021417 / 0.534201 (-0.512784) | 0.435677 / 0.579283 (-0.143606) | 0.460412 / 0.434364 (0.026048) | 0.509978 / 0.540337 (-0.030359) | 0.702506 / 1.386936 (-0.684430) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.007378 / 0.011353 (-0.003975) | 0.003938 / 0.011008 (-0.007070) | 0.067095 / 0.038508 (0.028587) | 0.082252 / 0.023109 (0.059143) | 0.420317 / 0.275898 (0.144419) | 0.477496 / 0.323480 (0.154017) | 0.006259 / 0.007986 (-0.001727) | 0.003513 / 0.004328 (-0.000816) | 0.072107 / 0.004250 (0.067856) | 0.061737 / 0.037052 (0.024684) | 0.444142 / 0.258489 (0.185653) | 0.488926 / 0.293841 (0.195085) | 0.033623 / 0.128546 (-0.094923) | 0.008091 / 0.075646 (-0.067555) | 0.073997 / 0.419271 (-0.345274) | 0.051295 / 0.043533 (0.007762) | 0.442551 / 0.255139 (0.187412) | 0.462713 / 0.283200 (0.179513) | 0.023115 / 0.141683 (-0.118568) | 1.645759 / 1.452155 (0.193604) | 1.758121 / 1.492716 (0.265405) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.233450 / 0.018006 (0.215444) | 0.445384 / 0.000490 (0.444894) | 0.006412 / 0.000200 (0.006212) | 0.000111 / 0.000054 (0.000056) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032446 / 0.037411 (-0.004965) | 0.098515 / 0.014526 (0.083989) | 0.109095 / 0.176557 (-0.067462) | 0.167645 / 0.737135 (-0.569490) | 0.110403 / 0.296338 (-0.185936) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.470189 / 0.215209 (0.254980) | 4.663224 / 2.077655 (2.585569) | 2.504474 / 1.504120 (1.000354) | 2.282867 / 1.541195 (0.741673) | 2.331598 / 1.468490 (0.863108) | 0.554421 / 4.584777 (-4.030356) | 4.078657 / 3.745712 (0.332945) | 3.516339 / 5.269862 (-1.753523) | 2.239134 / 4.565676 (-2.326542) | 0.062690 / 0.424275 (-0.361585) | 0.008406 / 0.007607 (0.000799) | 0.533827 / 0.226044 (0.307782) | 5.423984 / 2.268929 (3.155055) | 2.972784 / 55.444624 (-52.471840) | 2.699056 / 6.876477 (-4.177421) | 2.844403 / 2.142072 (0.702331) | 0.639194 / 4.805227 (-4.166033) | 0.142097 / 6.500664 (-6.358567) | 0.064646 / 0.075469 (-0.010823) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.544640 / 1.841788 (-0.297148) | 21.453429 / 8.074308 (13.379121) | 15.610723 / 10.191392 (5.419331) | 0.207796 / 0.680424 (-0.472628) | 0.021912 / 0.534201 (-0.512289) | 0.430472 / 0.579283 (-0.148811) | 0.467530 / 0.434364 (0.033166) | 0.541339 / 0.540337 (0.001002) | 0.721976 / 1.386936 (-0.664960) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#691c4bc65888005f3aadea5c104fdbc87694882d \"CML watermark\")\n" ]
"2023-07-29T20:50:45Z"
"2023-10-27T14:14:11Z"
"2023-10-27T14:04:36Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6098.diff", "html_url": "https://github.com/huggingface/datasets/pull/6098", "merged_at": "2023-10-27T14:04:36Z", "patch_url": "https://github.com/huggingface/datasets/pull/6098.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6098" }
Fixes #5651. The same problem occurs when loading from disk so I fixed it there too. I am not sure why the case distinction between local and remote filesystems is even necessary for `DatasetDict` when saving to disk. Imo this could be removed (leaving only `fs.makedirs(dataset_dict_path, exist_ok=True)`).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6098/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6098/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/493
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/493/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/493/comments
https://api.github.com/repos/huggingface/datasets/issues/493/events
https://github.com/huggingface/datasets/pull/493
676,527,351
MDExOlB1bGxSZXF1ZXN0NDY1ODIxOTA0
493
Fix wmt zh-en url
{ "avatar_url": "https://avatars.githubusercontent.com/u/6045025?v=4", "events_url": "https://api.github.com/users/sshleifer/events{/privacy}", "followers_url": "https://api.github.com/users/sshleifer/followers", "following_url": "https://api.github.com/users/sshleifer/following{/other_user}", "gists_url": "https://api.github.com/users/sshleifer/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/sshleifer", "id": 6045025, "login": "sshleifer", "node_id": "MDQ6VXNlcjYwNDUwMjU=", "organizations_url": "https://api.github.com/users/sshleifer/orgs", "received_events_url": "https://api.github.com/users/sshleifer/received_events", "repos_url": "https://api.github.com/users/sshleifer/repos", "site_admin": false, "starred_url": "https://api.github.com/users/sshleifer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sshleifer/subscriptions", "type": "User", "url": "https://api.github.com/users/sshleifer" }
[]
closed
false
null
[]
null
[ "this doesn't work. I can decompress the file after download locally." ]
"2020-08-11T02:14:52Z"
"2020-08-11T02:22:28Z"
"2020-08-11T02:22:12Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/493.diff", "html_url": "https://github.com/huggingface/datasets/pull/493", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/493.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/493" }
I verified that ``` wget https://stuncorpusprod.blob.core.windows.net/corpusfiles/UNv1.0.en-zh.tar.gz.00 ``` runs in 2 minutes.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/493/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/493/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5642
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5642/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5642/comments
https://api.github.com/repos/huggingface/datasets/issues/5642/events
https://github.com/huggingface/datasets/pull/5642
1,626,043,177
PR_kwDODunzps5MIjw9
5,642
Bump hfh to 0.11.0
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006334 / 0.011353 (-0.005018) | 0.004447 / 0.011008 (-0.006561) | 0.099287 / 0.038508 (0.060779) | 0.027426 / 0.023109 (0.004317) | 0.322638 / 0.275898 (0.046740) | 0.370501 / 0.323480 (0.047021) | 0.004775 / 0.007986 (-0.003210) | 0.003289 / 0.004328 (-0.001040) | 0.076531 / 0.004250 (0.072280) | 0.037485 / 0.037052 (0.000432) | 0.335634 / 0.258489 (0.077145) | 0.384031 / 0.293841 (0.090190) | 0.031258 / 0.128546 (-0.097288) | 0.011619 / 0.075646 (-0.064027) | 0.326309 / 0.419271 (-0.092963) | 0.042513 / 0.043533 (-0.001020) | 0.340817 / 0.255139 (0.085678) | 0.369846 / 0.283200 (0.086646) | 0.084904 / 0.141683 (-0.056779) | 1.481739 / 1.452155 (0.029584) | 1.566593 / 1.492716 (0.073877) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.186424 / 0.018006 (0.168418) | 0.400879 / 0.000490 (0.400389) | 0.003520 / 0.000200 (0.003320) | 0.000079 / 0.000054 (0.000024) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023287 / 0.037411 (-0.014124) | 0.097767 / 0.014526 (0.083241) | 0.103271 / 0.176557 (-0.073286) | 0.165414 / 0.737135 (-0.571722) | 0.106437 / 0.296338 (-0.189901) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.422711 / 0.215209 (0.207502) | 4.221382 / 2.077655 (2.143727) | 1.906807 / 1.504120 (0.402687) | 1.709595 / 1.541195 (0.168400) | 1.720452 / 1.468490 (0.251962) | 0.699477 / 4.584777 (-3.885300) | 3.415840 / 3.745712 (-0.329873) | 2.835669 / 5.269862 (-2.434192) | 1.501775 / 4.565676 (-3.063901) | 0.082896 / 0.424275 (-0.341379) | 0.012855 / 0.007607 (0.005248) | 0.514373 / 0.226044 (0.288329) | 5.190000 / 2.268929 (2.921071) | 2.302539 / 55.444624 (-53.142086) | 1.963410 / 6.876477 (-4.913067) | 2.020944 / 2.142072 (-0.121128) | 0.805919 / 4.805227 (-3.999308) | 0.150604 / 6.500664 (-6.350060) | 0.065977 / 0.075469 (-0.009492) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.206487 / 1.841788 (-0.635300) | 13.631513 / 8.074308 (5.557205) | 13.800258 / 10.191392 (3.608866) | 0.146914 / 0.680424 (-0.533509) | 0.016454 / 0.534201 (-0.517747) | 0.377752 / 0.579283 (-0.201532) | 0.384312 / 0.434364 (-0.050052) | 0.434912 / 0.540337 (-0.105425) | 0.522507 / 1.386936 (-0.864429) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006328 / 0.011353 (-0.005025) | 0.004406 / 0.011008 (-0.006602) | 0.077951 / 0.038508 (0.039443) | 0.026716 / 0.023109 (0.003607) | 0.337303 / 0.275898 (0.061405) | 0.372036 / 0.323480 (0.048556) | 0.004800 / 0.007986 (-0.003185) | 0.003153 / 0.004328 (-0.001175) | 0.076823 / 0.004250 (0.072573) | 0.035873 / 0.037052 (-0.001179) | 0.340243 / 0.258489 (0.081754) | 0.380183 / 0.293841 (0.086342) | 0.032185 / 0.128546 (-0.096361) | 0.011545 / 0.075646 (-0.064101) | 0.086887 / 0.419271 (-0.332384) | 0.041560 / 0.043533 (-0.001973) | 0.338716 / 0.255139 (0.083577) | 0.363080 / 0.283200 (0.079881) | 0.088375 / 0.141683 (-0.053308) | 1.499004 / 1.452155 (0.046850) | 1.585904 / 1.492716 (0.093188) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.211645 / 0.018006 (0.193639) | 0.403707 / 0.000490 (0.403218) | 0.000415 / 0.000200 (0.000215) | 0.000058 / 0.000054 (0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024972 / 0.037411 (-0.012440) | 0.097996 / 0.014526 (0.083470) | 0.105941 / 0.176557 (-0.070616) | 0.155521 / 0.737135 (-0.581615) | 0.108246 / 0.296338 (-0.188092) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.442316 / 0.215209 (0.227107) | 4.417977 / 2.077655 (2.340322) | 2.078324 / 1.504120 (0.574205) | 1.863678 / 1.541195 (0.322483) | 1.917149 / 1.468490 (0.448659) | 0.697628 / 4.584777 (-3.887149) | 3.412810 / 3.745712 (-0.332902) | 1.866473 / 5.269862 (-3.403389) | 1.155923 / 4.565676 (-3.409754) | 0.082831 / 0.424275 (-0.341444) | 0.012367 / 0.007607 (0.004760) | 0.540018 / 0.226044 (0.313974) | 5.420472 / 2.268929 (3.151544) | 2.508540 / 55.444624 (-52.936084) | 2.166397 / 6.876477 (-4.710080) | 2.153486 / 2.142072 (0.011414) | 0.804860 / 4.805227 (-4.000367) | 0.151178 / 6.500664 (-6.349486) | 0.067870 / 0.075469 (-0.007599) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.310387 / 1.841788 (-0.531400) | 13.908916 / 8.074308 (5.834608) | 14.136895 / 10.191392 (3.945503) | 0.139389 / 0.680424 (-0.541035) | 0.016687 / 0.534201 (-0.517514) | 0.379624 / 0.579283 (-0.199659) | 0.382634 / 0.434364 (-0.051730) | 0.439632 / 0.540337 (-0.100706) | 0.524913 / 1.386936 (-0.862023) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f8f2143b4ed39b58ed415029e7838d767662da91 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006365 / 0.011353 (-0.004988) | 0.004457 / 0.011008 (-0.006551) | 0.097989 / 0.038508 (0.059481) | 0.027686 / 0.023109 (0.004577) | 0.357412 / 0.275898 (0.081514) | 0.368573 / 0.323480 (0.045093) | 0.004859 / 0.007986 (-0.003127) | 0.003262 / 0.004328 (-0.001066) | 0.076487 / 0.004250 (0.072237) | 0.035526 / 0.037052 (-0.001527) | 0.332862 / 0.258489 (0.074373) | 0.369334 / 0.293841 (0.075493) | 0.030750 / 0.128546 (-0.097796) | 0.011503 / 0.075646 (-0.064143) | 0.323289 / 0.419271 (-0.095982) | 0.042302 / 0.043533 (-0.001231) | 0.334009 / 0.255139 (0.078870) | 0.354150 / 0.283200 (0.070951) | 0.082895 / 0.141683 (-0.058788) | 1.499727 / 1.452155 (0.047572) | 1.574123 / 1.492716 (0.081407) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.192583 / 0.018006 (0.174577) | 0.408136 / 0.000490 (0.407646) | 0.001272 / 0.000200 (0.001072) | 0.000070 / 0.000054 (0.000015) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022883 / 0.037411 (-0.014528) | 0.095710 / 0.014526 (0.081185) | 0.106545 / 0.176557 (-0.070011) | 0.165784 / 0.737135 (-0.571352) | 0.108594 / 0.296338 (-0.187744) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.429483 / 0.215209 (0.214274) | 4.292338 / 2.077655 (2.214683) | 1.917759 / 1.504120 (0.413639) | 1.711489 / 1.541195 (0.170294) | 1.735668 / 1.468490 (0.267178) | 0.707602 / 4.584777 (-3.877175) | 3.369643 / 3.745712 (-0.376070) | 1.874517 / 5.269862 (-3.395344) | 1.248560 / 4.565676 (-3.317117) | 0.083247 / 0.424275 (-0.341028) | 0.012606 / 0.007607 (0.004999) | 0.519342 / 0.226044 (0.293297) | 5.225462 / 2.268929 (2.956533) | 2.433230 / 55.444624 (-53.011394) | 2.006005 / 6.876477 (-4.870471) | 2.093156 / 2.142072 (-0.048916) | 0.809372 / 4.805227 (-3.995855) | 0.151691 / 6.500664 (-6.348973) | 0.066680 / 0.075469 (-0.008789) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.226283 / 1.841788 (-0.615505) | 13.604338 / 8.074308 (5.530030) | 13.953245 / 10.191392 (3.761853) | 0.132904 / 0.680424 (-0.547520) | 0.016420 / 0.534201 (-0.517781) | 0.395316 / 0.579283 (-0.183967) | 0.385003 / 0.434364 (-0.049361) | 0.483303 / 0.540337 (-0.057034) | 0.578459 / 1.386936 (-0.808477) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006218 / 0.011353 (-0.005135) | 0.004451 / 0.011008 (-0.006557) | 0.076892 / 0.038508 (0.038384) | 0.027017 / 0.023109 (0.003908) | 0.356976 / 0.275898 (0.081078) | 0.396083 / 0.323480 (0.072603) | 0.005510 / 0.007986 (-0.002476) | 0.003265 / 0.004328 (-0.001063) | 0.075771 / 0.004250 (0.071521) | 0.037117 / 0.037052 (0.000064) | 0.362181 / 0.258489 (0.103692) | 0.401771 / 0.293841 (0.107931) | 0.032062 / 0.128546 (-0.096484) | 0.011453 / 0.075646 (-0.064194) | 0.085773 / 0.419271 (-0.333498) | 0.041679 / 0.043533 (-0.001854) | 0.355120 / 0.255139 (0.099981) | 0.390170 / 0.283200 (0.106970) | 0.088210 / 0.141683 (-0.053473) | 1.526434 / 1.452155 (0.074279) | 1.586019 / 1.492716 (0.093302) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.196836 / 0.018006 (0.178830) | 0.401161 / 0.000490 (0.400671) | 0.002880 / 0.000200 (0.002680) | 0.000080 / 0.000054 (0.000025) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024445 / 0.037411 (-0.012966) | 0.100187 / 0.014526 (0.085661) | 0.106391 / 0.176557 (-0.070165) | 0.159764 / 0.737135 (-0.577372) | 0.109828 / 0.296338 (-0.186511) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.444228 / 0.215209 (0.229018) | 4.420769 / 2.077655 (2.343114) | 2.069437 / 1.504120 (0.565318) | 1.862587 / 1.541195 (0.321392) | 1.934627 / 1.468490 (0.466137) | 0.699681 / 4.584777 (-3.885095) | 3.352540 / 3.745712 (-0.393172) | 2.613172 / 5.269862 (-2.656689) | 1.445116 / 4.565676 (-3.120561) | 0.083086 / 0.424275 (-0.341189) | 0.012715 / 0.007607 (0.005108) | 0.537450 / 0.226044 (0.311405) | 5.403052 / 2.268929 (3.134123) | 2.506703 / 55.444624 (-52.937921) | 2.170198 / 6.876477 (-4.706279) | 2.201909 / 2.142072 (0.059837) | 0.799555 / 4.805227 (-4.005672) | 0.150825 / 6.500664 (-6.349839) | 0.067234 / 0.075469 (-0.008235) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.293097 / 1.841788 (-0.548691) | 13.817133 / 8.074308 (5.742825) | 14.247231 / 10.191392 (4.055839) | 0.128422 / 0.680424 (-0.552002) | 0.016541 / 0.534201 (-0.517660) | 0.382466 / 0.579283 (-0.196817) | 0.380560 / 0.434364 (-0.053804) | 0.439061 / 0.540337 (-0.101276) | 0.521865 / 1.386936 (-0.865071) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#69e60be438c334919f590512fd664436bd6b3667 \"CML watermark\")\n", "I also took the liberty of removing `_hf_hub_fixes.py` completely :)\r\n\r\n> Do you think this is really necessary and convenient? I would naively say that 5% of the users is not a negligible number...\r\n\r\nI think it's ok. Most of them are using old versions of `datasets` anyway.\r\n\r\n", "merging, but lmk if you have other concerns", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006810 / 0.011353 (-0.004543) | 0.004683 / 0.011008 (-0.006325) | 0.100889 / 0.038508 (0.062381) | 0.030135 / 0.023109 (0.007026) | 0.356407 / 0.275898 (0.080509) | 0.389175 / 0.323480 (0.065695) | 0.005358 / 0.007986 (-0.002627) | 0.004760 / 0.004328 (0.000432) | 0.075904 / 0.004250 (0.071654) | 0.040341 / 0.037052 (0.003288) | 0.357363 / 0.258489 (0.098874) | 0.394185 / 0.293841 (0.100344) | 0.031322 / 0.128546 (-0.097224) | 0.011636 / 0.075646 (-0.064010) | 0.327327 / 0.419271 (-0.091944) | 0.042494 / 0.043533 (-0.001039) | 0.338079 / 0.255139 (0.082940) | 0.363388 / 0.283200 (0.080189) | 0.087102 / 0.141683 (-0.054581) | 1.505686 / 1.452155 (0.053531) | 1.562112 / 1.492716 (0.069396) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.203630 / 0.018006 (0.185624) | 0.425986 / 0.000490 (0.425496) | 0.003786 / 0.000200 (0.003586) | 0.000071 / 0.000054 (0.000017) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.024138 / 0.037411 (-0.013274) | 0.101752 / 0.014526 (0.087226) | 0.105436 / 0.176557 (-0.071121) | 0.165385 / 0.737135 (-0.571750) | 0.114510 / 0.296338 (-0.181828) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.447561 / 0.215209 (0.232352) | 4.449212 / 2.077655 (2.371557) | 2.169472 / 1.504120 (0.665352) | 1.989025 / 1.541195 (0.447831) | 2.036267 / 1.468490 (0.567776) | 0.698647 / 4.584777 (-3.886130) | 3.483281 / 3.745712 (-0.262431) | 1.949306 / 5.269862 (-3.320555) | 1.290313 / 4.565676 (-3.275363) | 0.083079 / 0.424275 (-0.341196) | 0.012759 / 0.007607 (0.005152) | 0.540944 / 0.226044 (0.314899) | 5.473391 / 2.268929 (3.204463) | 2.632037 / 55.444624 (-52.812587) | 2.327396 / 6.876477 (-4.549081) | 2.428880 / 2.142072 (0.286808) | 0.808918 / 4.805227 (-3.996309) | 0.153283 / 6.500664 (-6.347381) | 0.068325 / 0.075469 (-0.007145) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.212527 / 1.841788 (-0.629260) | 14.306444 / 8.074308 (6.232136) | 14.904980 / 10.191392 (4.713588) | 0.142796 / 0.680424 (-0.537628) | 0.016829 / 0.534201 (-0.517372) | 0.384806 / 0.579283 (-0.194477) | 0.390505 / 0.434364 (-0.043859) | 0.441734 / 0.540337 (-0.098603) | 0.526159 / 1.386936 (-0.860777) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006950 / 0.011353 (-0.004403) | 0.004647 / 0.011008 (-0.006362) | 0.078925 / 0.038508 (0.040417) | 0.028081 / 0.023109 (0.004971) | 0.343420 / 0.275898 (0.067522) | 0.380567 / 0.323480 (0.057087) | 0.005286 / 0.007986 (-0.002700) | 0.004816 / 0.004328 (0.000487) | 0.077332 / 0.004250 (0.073081) | 0.042131 / 0.037052 (0.005078) | 0.345371 / 0.258489 (0.086882) | 0.390232 / 0.293841 (0.096392) | 0.032395 / 0.128546 (-0.096152) | 0.011669 / 0.075646 (-0.063978) | 0.087649 / 0.419271 (-0.331622) | 0.042465 / 0.043533 (-0.001068) | 0.342863 / 0.255139 (0.087724) | 0.368947 / 0.283200 (0.085748) | 0.091725 / 0.141683 (-0.049958) | 1.477435 / 1.452155 (0.025280) | 1.563449 / 1.492716 (0.070733) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.208016 / 0.018006 (0.190010) | 0.428387 / 0.000490 (0.427898) | 0.000443 / 0.000200 (0.000243) | 0.000060 / 0.000054 (0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026963 / 0.037411 (-0.010449) | 0.103854 / 0.014526 (0.089328) | 0.109068 / 0.176557 (-0.067488) | 0.160107 / 0.737135 (-0.577028) | 0.112843 / 0.296338 (-0.183496) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.437161 / 0.215209 (0.221952) | 4.396178 / 2.077655 (2.318523) | 2.067597 / 1.504120 (0.563477) | 1.875247 / 1.541195 (0.334053) | 1.962451 / 1.468490 (0.493961) | 0.701427 / 4.584777 (-3.883350) | 3.459564 / 3.745712 (-0.286148) | 1.959482 / 5.269862 (-3.310380) | 1.191866 / 4.565676 (-3.373810) | 0.083243 / 0.424275 (-0.341032) | 0.012740 / 0.007607 (0.005133) | 0.535236 / 0.226044 (0.309191) | 5.351715 / 2.268929 (3.082786) | 2.490868 / 55.444624 (-52.953756) | 2.195680 / 6.876477 (-4.680797) | 2.233854 / 2.142072 (0.091781) | 0.809041 / 4.805227 (-3.996187) | 0.151498 / 6.500664 (-6.349166) | 0.068297 / 0.075469 (-0.007172) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.303596 / 1.841788 (-0.538192) | 14.712746 / 8.074308 (6.638438) | 14.778412 / 10.191392 (4.587020) | 0.147093 / 0.680424 (-0.533331) | 0.017105 / 0.534201 (-0.517096) | 0.381687 / 0.579283 (-0.197596) | 0.402435 / 0.434364 (-0.031929) | 0.453538 / 0.540337 (-0.086800) | 0.538866 / 1.386936 (-0.848070) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#10f637c3a598c8042865b31f779e315a3da5337e \"CML watermark\")\n" ]
"2023-03-15T18:26:07Z"
"2023-03-20T12:34:09Z"
"2023-03-20T12:26:58Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5642.diff", "html_url": "https://github.com/huggingface/datasets/pull/5642", "merged_at": "2023-03-20T12:26:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/5642.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5642" }
to fix errors like ``` requests.exceptions.HTTPError: 400 Client Error: Bad Request for url: https://hub-ci.huggingface.co/api/datasets/__DUMMY_TRANSFORMERS_USER__/... ``` (e.g. from this [failing CI](https://github.com/huggingface/datasets/actions/runs/4428956210/jobs/7769160997)) 0.11.0 is the current minimum version in `transformers` around 5% of users are currently using versions `<0.11.0`
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5642/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5642/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/643
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/643/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/643/comments
https://api.github.com/repos/huggingface/datasets/issues/643/events
https://github.com/huggingface/datasets/issues/643
704,477,164
MDU6SXNzdWU3MDQ0NzcxNjQ=
643
Caching processed dataset at wrong folder
{ "avatar_url": "https://avatars.githubusercontent.com/u/3653789?v=4", "events_url": "https://api.github.com/users/mrm8488/events{/privacy}", "followers_url": "https://api.github.com/users/mrm8488/followers", "following_url": "https://api.github.com/users/mrm8488/following{/other_user}", "gists_url": "https://api.github.com/users/mrm8488/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mrm8488", "id": 3653789, "login": "mrm8488", "node_id": "MDQ6VXNlcjM2NTM3ODk=", "organizations_url": "https://api.github.com/users/mrm8488/orgs", "received_events_url": "https://api.github.com/users/mrm8488/received_events", "repos_url": "https://api.github.com/users/mrm8488/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mrm8488/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mrm8488/subscriptions", "type": "User", "url": "https://api.github.com/users/mrm8488" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Thanks for reporting !\r\nIt uses a temporary file to write the data.\r\nHowever it looks like the temporary file is not placed in the right directory during the processing", "Well actually I just tested and the temporary file is placed in the same directory, so it should work as expected.\r\nWhich version of `datasets` are you using ?", "`datasets-1.0.1`\r\nHere you can reproduce it here:\r\nhttps://colab.research.google.com/drive/1O0KcepTFsmpkBbrbLLMq42iwTKmQh8d5?usp=sharing\r\n", "It looks like a pyarrow issue with google colab.\r\nFor some reason this code increases the disk usage of google colab while it actually writes into google drive:\r\n\r\n```python\r\nimport pyarrow as pa\r\n\r\nstream = pa.OSFile(\"/content/drive/My Drive/path/to/file.arrow\", \"wb\")\r\nwriter = pa.RecordBatchStreamWriter(stream, schema=pa.schema({\"text\": pa.string()}))\r\nwriter.write_table(pa.Table.from_pydict({\"text\": [\"a\"*511 + \"\\n\"] * ((1 << 30) // 512)})) # 1GiB\r\nwriter.close()\r\nstream.close()\r\n```\r\n\r\nMoreover if I `rm` the file on google drive, it frees disk space on google colab.", "It looks like replacing `pa.OSFile` by `open` fixes it, I'm going to open a PR", "Ok. Thank you so much!", "Actually I did more tests it doesn't >.<\r\nI'll let you know if I find a way to fix that", "Actually I also have the issue when writing a regular text file\r\n\r\n```python\r\nf = open(\"/content/drive/My Drive/path/to/file\", \"w\")\r\nf.write((\"a\"*511 + \"\\n\") * ((1 << 30) // 512)) # 1GiB\r\nf.close()\r\n```\r\n\r\nIs that supposed to happen ?", "The code you wrote should write a 1GB file in the Google Drive folder. Doesn't it? ", "Yes it does, but the disk usage of google colab also increases by 1GB", "I could check it and as you say as I write to te Drive disk the colab disk also increases...", "To reproduce it: \r\n```bash\r\n!df -h | grep sda1\r\n```\r\n```python\r\nf = open(\"/content/drive/My Drive/test_to_remove.txt\", \"w\")\r\nf.write((\"a\"*511 + \"\\n\") * ((1 << 30) // 512)) # 1GiB\r\nf.write((\"a\"*511 + \"\\n\") * ((1 << 30) // 512)) # 1GiB\r\nf.close()\r\n```\r\n```bash\r\n!ls -lh /content/drive/My\\ Drive/test_to_remove.txt\r\n\r\n!df -h | grep sda1\r\n\r\n!rm -rf /content/drive/My\\ Drive/test_to_remove.txt\r\n\r\n```\r\n[Colab](https://colab.research.google.com/drive/1D0UiweCYQwwWZ65EEhuqqbaDDbhJYXfm?usp=sharing)\r\n\r\n\r\n", "Apparently, Colab uses a local cache of the data files read/written from Google Drive. See:\r\n- https://github.com/googlecolab/colabtools/issues/2087#issuecomment-860818457\r\n- https://github.com/googlecolab/colabtools/issues/1915#issuecomment-804234540\r\n- https://github.com/googlecolab/colabtools/issues/2147#issuecomment-885052636" ]
"2020-09-18T15:41:26Z"
"2022-02-16T14:53:29Z"
"2022-02-16T14:53:29Z"
CONTRIBUTOR
null
null
null
Hi guys, I run this on my Colab (PRO): ```python from datasets import load_dataset dataset = load_dataset('text', data_files='/content/corpus.txt', cache_dir='/content/drive/My Drive', split='train') def encode(examples): return tokenizer(examples['text'], truncation=True, padding='max_length') dataset = dataset.map(encode, batched=True) ``` The file is about 4 GB, so I cannot process it on the Colab HD because there is no enough space. So I decided to mount my Google Drive fs and do it on it. The dataset is cached in the right place but by processing it (applying `encode` function) seems to use a different folder because Colab HD starts to grow and it crashes when it should be done in the Drive fs. What gets me crazy, it prints it is processing/encoding the dataset in the right folder: ``` Testing the mapped function outputs Testing finished, running the mapping function on the dataset Caching processed dataset at /content/drive/My Drive/text/default-ad3e69d6242ee916/0.0.0/7e13bc0fa76783d4ef197f079dc8acfe54c3efda980f2c9adfab046ede2f0ff7/cache-b16341780a59747d.arrow ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/643/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/643/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5679
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5679/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5679/comments
https://api.github.com/repos/huggingface/datasets/issues/5679/events
https://github.com/huggingface/datasets/issues/5679
1,645,184,622
I_kwDODunzps5iD4Zu
5,679
Allow load_dataset to take a working dir for intermediate data
{ "avatar_url": "https://avatars.githubusercontent.com/u/38018689?v=4", "events_url": "https://api.github.com/users/lu-wang-dl/events{/privacy}", "followers_url": "https://api.github.com/users/lu-wang-dl/followers", "following_url": "https://api.github.com/users/lu-wang-dl/following{/other_user}", "gists_url": "https://api.github.com/users/lu-wang-dl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lu-wang-dl", "id": 38018689, "login": "lu-wang-dl", "node_id": "MDQ6VXNlcjM4MDE4Njg5", "organizations_url": "https://api.github.com/users/lu-wang-dl/orgs", "received_events_url": "https://api.github.com/users/lu-wang-dl/received_events", "repos_url": "https://api.github.com/users/lu-wang-dl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lu-wang-dl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lu-wang-dl/subscriptions", "type": "User", "url": "https://api.github.com/users/lu-wang-dl" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
open
false
null
[]
null
[ "Hi ! AFAIK a dataset must be present on a local disk to be able to efficiently memory map the datasets Arrow files. What makes you think that it is possible to load from a cloud storage and have good performance ?\r\n\r\nAnyway it's already possible to download_and_prepare a dataset as Arrow files in a cloud storage with:\r\n```python\r\nbuilder = load_dataset_builder(..., cache_dir=\"/temp/dir\")\r\nbuilder.download_and_prepare(\"/cloud_dir\")\r\n```\r\n\r\nbut then \r\n```python\r\nds = builder.as_dataset()\r\n```\r\nwould fail if \"/cloud_dir\" is not a local directory.", "In my use case, I am trying to mount the S3 bucket as local system with S3FS-FUSE / [goofys](https://github.com/kahing/goofys). I want to use S3 to save the download data and save checkpoint for training for persistent. Setting the s3 location as cache directory is not fast enough. That is why I want to set a work directory for temp data for memory map and only save the final result to s3 cache. ", "You can try setting `HF_DATASETS_DOWNLOADED_DATASETS_PATH` and `HF_DATASETS_EXTRACTED_DATASETS_PATH` to S3, and `HF_DATASETS_CACHE` to your local disk.\r\n\r\nThis way all your downloaded and extracted data are on your mounted S3, but the datasets Arrow files are on your local disk", "If we hope to also persist the Arrow files on the mounted S3 but work with the efficiency of local disk, is there any recommended way to do this, other than copying the Arrow files from local disk to S3?" ]
"2023-03-29T07:21:09Z"
"2023-04-12T22:30:25Z"
null
NONE
null
null
null
### Feature request As a user, I can set a working dir for intermediate data creation. The processed files will be moved to the cache dir, like ``` load_dataset(…, working_dir=”/temp/dir”, cache_dir=”/cloud_dir”). ``` ### Motivation This will help the use case for using datasets with cloud storage as cache. It will help boost the performance. ### Your contribution I can provide a PR to fix this if the proposal seems reasonable.
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/5679/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5679/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/1428
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1428/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1428/comments
https://api.github.com/repos/huggingface/datasets/issues/1428/events
https://github.com/huggingface/datasets/pull/1428
760,736,726
MDExOlB1bGxSZXF1ZXN0NTM1NTE4MzIy
1,428
Add twi wordsim353
{ "avatar_url": "https://avatars.githubusercontent.com/u/23586676?v=4", "events_url": "https://api.github.com/users/dadelani/events{/privacy}", "followers_url": "https://api.github.com/users/dadelani/followers", "following_url": "https://api.github.com/users/dadelani/following{/other_user}", "gists_url": "https://api.github.com/users/dadelani/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/dadelani", "id": 23586676, "login": "dadelani", "node_id": "MDQ6VXNlcjIzNTg2Njc2", "organizations_url": "https://api.github.com/users/dadelani/orgs", "received_events_url": "https://api.github.com/users/dadelani/received_events", "repos_url": "https://api.github.com/users/dadelani/repos", "site_admin": false, "starred_url": "https://api.github.com/users/dadelani/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dadelani/subscriptions", "type": "User", "url": "https://api.github.com/users/dadelani" }
[]
closed
false
null
[]
null
[]
"2020-12-09T22:59:19Z"
"2020-12-11T13:57:32Z"
"2020-12-11T13:57:32Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1428.diff", "html_url": "https://github.com/huggingface/datasets/pull/1428", "merged_at": "2020-12-11T13:57:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/1428.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1428" }
Add twi WordSim 353
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1428/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1428/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/953
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/953/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/953/comments
https://api.github.com/repos/huggingface/datasets/issues/953/events
https://github.com/huggingface/datasets/pull/953
754,359,942
MDExOlB1bGxSZXF1ZXN0NTMwMjczMzg5
953
added health_fact dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/19718818?v=4", "events_url": "https://api.github.com/users/bhavitvyamalik/events{/privacy}", "followers_url": "https://api.github.com/users/bhavitvyamalik/followers", "following_url": "https://api.github.com/users/bhavitvyamalik/following{/other_user}", "gists_url": "https://api.github.com/users/bhavitvyamalik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bhavitvyamalik", "id": 19718818, "login": "bhavitvyamalik", "node_id": "MDQ6VXNlcjE5NzE4ODE4", "organizations_url": "https://api.github.com/users/bhavitvyamalik/orgs", "received_events_url": "https://api.github.com/users/bhavitvyamalik/received_events", "repos_url": "https://api.github.com/users/bhavitvyamalik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bhavitvyamalik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bhavitvyamalik/subscriptions", "type": "User", "url": "https://api.github.com/users/bhavitvyamalik" }
[]
closed
false
null
[]
null
[ "Hi @lhoestq,\r\nInitially I tried int(-1) only in place of nan labels and missing values but I kept on getting this error ```pyarrow.lib.ArrowTypeError: Expected bytes, got a 'int' object``` maybe because I'm sending int values (-1) to objects which are string type" ]
"2020-12-01T12:37:44Z"
"2020-12-01T23:11:33Z"
"2020-12-01T23:11:33Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/953.diff", "html_url": "https://github.com/huggingface/datasets/pull/953", "merged_at": "2020-12-01T23:11:33Z", "patch_url": "https://github.com/huggingface/datasets/pull/953.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/953" }
Added dataset Explainable Fact-Checking for Public Health Claims (dataset_id: health_fact)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/953/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/953/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2129
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2129/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2129/comments
https://api.github.com/repos/huggingface/datasets/issues/2129/events
https://github.com/huggingface/datasets/issues/2129
843,033,656
MDU6SXNzdWU4NDMwMzM2NTY=
2,129
How to train BERT model with next sentence prediction?
{ "avatar_url": "https://avatars.githubusercontent.com/u/836541?v=4", "events_url": "https://api.github.com/users/jnishi/events{/privacy}", "followers_url": "https://api.github.com/users/jnishi/followers", "following_url": "https://api.github.com/users/jnishi/following{/other_user}", "gists_url": "https://api.github.com/users/jnishi/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/jnishi", "id": 836541, "login": "jnishi", "node_id": "MDQ6VXNlcjgzNjU0MQ==", "organizations_url": "https://api.github.com/users/jnishi/orgs", "received_events_url": "https://api.github.com/users/jnishi/received_events", "repos_url": "https://api.github.com/users/jnishi/repos", "site_admin": false, "starred_url": "https://api.github.com/users/jnishi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/jnishi/subscriptions", "type": "User", "url": "https://api.github.com/users/jnishi" }
[]
closed
false
null
[]
null
[ "Hi !\r\nWe're not using `TextDatasetForNextSentencePrediction` in `datasets`.\r\nAlthough you can probably use the `TextDatasetForNextSentencePrediction.create_examples_from_document` on a dataset to prepare it for next sentence prediction.", "Thanks.\r\n\r\nDo you mean that `TextDatasetForNextSentencePrediction.create_exapmles_from_document` can be applied to dataset object other than `TextDatasetForNextSentencePrediction` e.g. a `Dataset` object which is loaded by `datasets.load_dataset`?", "It would probably require a bit of tweaking, but you can apply it to a dataset, yes.\r\nThis should give you a new dataset with sentence pairs you can train a model on.\r\n\r\nYou can find the documentation about dataset processing here:\r\nhttps://huggingface.co/docs/datasets/processing.html#processing-data-with-map", "Thank you for detail information.\r\n\r\nI'll try to apply `create_examples_from_document` to `Dataset` object.\r\n" ]
"2021-03-29T06:48:03Z"
"2021-04-01T04:58:40Z"
"2021-04-01T04:58:40Z"
NONE
null
null
null
Hello. I'm trying to pretrain the BERT model with next sentence prediction. Is there any function that supports next sentence prediction like ` TextDatasetForNextSentencePrediction` of `huggingface/transformers` ?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2129/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2129/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5647
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5647/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5647/comments
https://api.github.com/repos/huggingface/datasets/issues/5647/events
https://github.com/huggingface/datasets/issues/5647
1,628,225,544
I_kwDODunzps5hDMAI
5,647
Make all print statements optional
{ "avatar_url": "https://avatars.githubusercontent.com/u/49101362?v=4", "events_url": "https://api.github.com/users/gagan3012/events{/privacy}", "followers_url": "https://api.github.com/users/gagan3012/followers", "following_url": "https://api.github.com/users/gagan3012/following{/other_user}", "gists_url": "https://api.github.com/users/gagan3012/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gagan3012", "id": 49101362, "login": "gagan3012", "node_id": "MDQ6VXNlcjQ5MTAxMzYy", "organizations_url": "https://api.github.com/users/gagan3012/orgs", "received_events_url": "https://api.github.com/users/gagan3012/received_events", "repos_url": "https://api.github.com/users/gagan3012/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gagan3012/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gagan3012/subscriptions", "type": "User", "url": "https://api.github.com/users/gagan3012" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" } ]
closed
false
null
[]
null
[ "related to #5444 ", "We now log these messages instead of printing them (addressed in #6019), so I'm closing this issue." ]
"2023-03-16T20:30:07Z"
"2023-07-21T14:20:25Z"
"2023-07-21T14:20:24Z"
NONE
null
null
null
### Feature request Make all print statements optional to speed up the development ### Motivation Im loading multiple tiny datasets and all the print statements make the loading slower ### Your contribution I can help contribute
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5647/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5647/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5346
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5346/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5346/comments
https://api.github.com/repos/huggingface/datasets/issues/5346/events
https://github.com/huggingface/datasets/issues/5346
1,486,884,983
I_kwDODunzps5YoBB3
5,346
[Quick poll] Give your opinion on the future of the Hugging Face Open Source ecosystem!
{ "avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4", "events_url": "https://api.github.com/users/LysandreJik/events{/privacy}", "followers_url": "https://api.github.com/users/LysandreJik/followers", "following_url": "https://api.github.com/users/LysandreJik/following{/other_user}", "gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LysandreJik", "id": 30755778, "login": "LysandreJik", "node_id": "MDQ6VXNlcjMwNzU1Nzc4", "organizations_url": "https://api.github.com/users/LysandreJik/orgs", "received_events_url": "https://api.github.com/users/LysandreJik/received_events", "repos_url": "https://api.github.com/users/LysandreJik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions", "type": "User", "url": "https://api.github.com/users/LysandreJik" }
[]
closed
false
null
[]
null
[ "As the survey is finished, can we close this issue, @LysandreJik ?", "Yes! I'll post a public summary on the forums shortly.", "Is the summary available? I would be interested in reading your findings." ]
"2022-12-09T14:48:02Z"
"2023-06-02T20:24:44Z"
"2023-01-25T19:35:40Z"
MEMBER
null
null
null
Thanks to all of you, Datasets is just about to pass 15k stars! Since the last survey, a lot has happened: the [diffusers](https://github.com/huggingface/diffusers), [evaluate](https://github.com/huggingface/evaluate) and [skops](https://github.com/skops-dev/skops) libraries were born. `timm` joined the Hugging Face ecosystem. There were 25 new releases of `transformers`, 21 new releases of `datasets`, 13 new releases of `accelerate`. If you have a couple of minutes and want to participate in shaping the future of the ecosystem, please share your thoughts: [**hf.co/oss-survey**](https://docs.google.com/forms/d/e/1FAIpQLSf4xFQKtpjr6I_l7OfNofqiR8s-WG6tcNbkchDJJf5gYD72zQ/viewform?usp=sf_link) (please reply in the above feedback form rather than to this thread) Thank you all on behalf of the HuggingFace team! 🤗
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 3, "total_count": 3, "url": "https://api.github.com/repos/huggingface/datasets/issues/5346/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5346/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2821
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2821/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2821/comments
https://api.github.com/repos/huggingface/datasets/issues/2821/events
https://github.com/huggingface/datasets/issues/2821
975,556,032
MDU6SXNzdWU5NzU1NTYwMzI=
2,821
Cannot load linnaeus dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/48327001?v=4", "events_url": "https://api.github.com/users/NielsRogge/events{/privacy}", "followers_url": "https://api.github.com/users/NielsRogge/followers", "following_url": "https://api.github.com/users/NielsRogge/following{/other_user}", "gists_url": "https://api.github.com/users/NielsRogge/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/NielsRogge", "id": 48327001, "login": "NielsRogge", "node_id": "MDQ6VXNlcjQ4MzI3MDAx", "organizations_url": "https://api.github.com/users/NielsRogge/orgs", "received_events_url": "https://api.github.com/users/NielsRogge/received_events", "repos_url": "https://api.github.com/users/NielsRogge/repos", "site_admin": false, "starred_url": "https://api.github.com/users/NielsRogge/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/NielsRogge/subscriptions", "type": "User", "url": "https://api.github.com/users/NielsRogge" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[ "Thanks for reporting ! #2852 fixed this error\r\n\r\nWe'll do a new release of `datasets` soon :)" ]
"2021-08-20T12:15:15Z"
"2021-08-31T13:13:02Z"
"2021-08-31T13:12:09Z"
CONTRIBUTOR
null
null
null
## Describe the bug The [linnaeus](https://huggingface.co/datasets/linnaeus) dataset cannot be loaded. To reproduce: ``` from datasets import load_dataset datasets = load_dataset("linnaeus") ``` This results in: ``` Downloading and preparing dataset linnaeus/linnaeus (download: 17.36 MiB, generated: 8.74 MiB, post-processed: Unknown size, total: 26.10 MiB) to /root/.cache/huggingface/datasets/linnaeus/linnaeus/1.0.0/2ff05dbc256108233262f596e09e322dbc3db067202de14286913607cd9cb704... --------------------------------------------------------------------------- ConnectionError Traceback (most recent call last) <ipython-input-4-7ef3a88f6276> in <module>() 1 from datasets import load_dataset 2 ----> 3 datasets = load_dataset("linnaeus") 11 frames /usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py in get_from_cache(url, cache_dir, force_download, proxies, etag_timeout, resume_download, user_agent, local_files_only, use_etag, max_retries, use_auth_token) 603 raise FileNotFoundError("Couldn't find file at {}".format(url)) 604 _raise_if_offline_mode_is_enabled(f"Tried to reach {url}") --> 605 raise ConnectionError("Couldn't reach {}".format(url)) 606 607 # Try a second time ConnectionError: Couldn't reach https://drive.google.com/u/0/uc?id=1OletxmPYNkz2ltOr9pyT0b0iBtUWxslh&export=download/ ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2821/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2821/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2973
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2973/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2973/comments
https://api.github.com/repos/huggingface/datasets/issues/2973/events
https://github.com/huggingface/datasets/pull/2973
1,007,894,592
PR_kwDODunzps4sTRvk
2,973
Fix JSON metadata of masakhaner dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[]
"2021-09-27T09:09:08Z"
"2021-09-27T12:59:59Z"
"2021-09-27T12:59:59Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2973.diff", "html_url": "https://github.com/huggingface/datasets/pull/2973", "merged_at": "2021-09-27T12:59:58Z", "patch_url": "https://github.com/huggingface/datasets/pull/2973.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2973" }
Fix #2971.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2973/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2973/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3240
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3240/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3240/comments
https://api.github.com/repos/huggingface/datasets/issues/3240/events
https://github.com/huggingface/datasets/issues/3240
1,048,376,021
I_kwDODunzps4-fPLV
3,240
Couldn't reach data file for disaster_response_messages
{ "avatar_url": "https://avatars.githubusercontent.com/u/81331791?v=4", "events_url": "https://api.github.com/users/pandya6988/events{/privacy}", "followers_url": "https://api.github.com/users/pandya6988/followers", "following_url": "https://api.github.com/users/pandya6988/following{/other_user}", "gists_url": "https://api.github.com/users/pandya6988/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/pandya6988", "id": 81331791, "login": "pandya6988", "node_id": "MDQ6VXNlcjgxMzMxNzkx", "organizations_url": "https://api.github.com/users/pandya6988/orgs", "received_events_url": "https://api.github.com/users/pandya6988/received_events", "repos_url": "https://api.github.com/users/pandya6988/repos", "site_admin": false, "starred_url": "https://api.github.com/users/pandya6988/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pandya6988/subscriptions", "type": "User", "url": "https://api.github.com/users/pandya6988" }
[ { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
null
[]
null
[ "It looks like the dataset isn't available anymore on appen.com\r\n\r\nThe CSV files appear to still be available at https://www.kaggle.com/landlord/multilingual-disaster-response-messages though. It says that the data are under the CC0 license so I guess we can host the dataset elsewhere instead ?" ]
"2021-11-09T09:26:42Z"
"2021-12-14T14:38:29Z"
"2021-12-14T14:38:29Z"
NONE
null
null
null
## Describe the bug Following command gives an ConnectionError. ## Steps to reproduce the bug ```python disaster = load_dataset('disaster_response_messages') ``` ## Error ``` ConnectionError: Couldn't reach https://datasets.appen.com/appen_datasets/disaster_response_data/disaster_response_messages_training.csv ``` ## Expected results It should load dataset without an error ## Actual results Specify the actual results or traceback. ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: - Platform: Google Colab - Python version: 3.7 - PyArrow version:
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3240/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3240/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2352
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2352/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2352/comments
https://api.github.com/repos/huggingface/datasets/issues/2352/events
https://github.com/huggingface/datasets/pull/2352
889,810,100
MDExOlB1bGxSZXF1ZXN0NjQyOTI4NTgz
2,352
Set to_json default to JSON lines
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "This is perfect, @albertvillanova - thank you! Tested it to work.\r\n\r\nMight it be a good idea to document the args to `to_json`?\r\n\r\nand also even a very basic progress bar? took 10min for 8M large records for `openwebtext` so perhaps some indication of it's being alive every min or so?", "@lhoestq I added tests for both `lines` and `orient`." ]
"2021-05-12T08:19:25Z"
"2021-05-21T09:01:14Z"
"2021-05-21T09:01:13Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2352.diff", "html_url": "https://github.com/huggingface/datasets/pull/2352", "merged_at": "2021-05-21T09:01:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/2352.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2352" }
With this PR, the method `Dataset.to_json`: - is added to the docs - defaults to JSON lines
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 1, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2352/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2352/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/104
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/104/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/104/comments
https://api.github.com/repos/huggingface/datasets/issues/104/events
https://github.com/huggingface/datasets/pull/104
618,277,081
MDExOlB1bGxSZXF1ZXN0NDE4MDMzOTY0
104
Add trivia_q
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[]
closed
false
null
[]
null
[]
"2020-05-14T14:27:19Z"
"2020-07-12T05:34:20Z"
"2020-05-14T20:23:32Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/104.diff", "html_url": "https://github.com/huggingface/datasets/pull/104", "merged_at": "2020-05-14T20:23:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/104.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/104" }
Currently tested only for one config to pass tests. Needs to add more dummy data later.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/104/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/104/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4128
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4128/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4128/comments
https://api.github.com/repos/huggingface/datasets/issues/4128/events
https://github.com/huggingface/datasets/pull/4128
1,197,326,311
PR_kwDODunzps4138I6
4,128
More robust `cast_to_python_objects` in `TypedSequence`
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
"2022-04-08T13:33:35Z"
"2022-04-13T14:07:41Z"
"2022-04-13T14:01:16Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4128.diff", "html_url": "https://github.com/huggingface/datasets/pull/4128", "merged_at": "2022-04-13T14:01:16Z", "patch_url": "https://github.com/huggingface/datasets/pull/4128.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4128" }
Adds a fallback to run an expensive version of `cast_to_python_objects` which exhaustively checks entire lists to avoid the `ArrowInvalid: Could not convert` error in `TypedSequence`. Currently, this error can happen in situations where only some images are decoded in `map`, in which case `cast_to_python_objects` fails to recognize that it needs to cast `PIL.Image` objects if they are not at the beginning of the sequence and stops after the first image dictionary (e.g., if `data` is `[{"bytes": None, "path": "some path"}, PIL.Image(), ...]`) Fix #4124
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4128/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4128/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1144
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1144/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1144/comments
https://api.github.com/repos/huggingface/datasets/issues/1144/events
https://github.com/huggingface/datasets/pull/1144
757,452,831
MDExOlB1bGxSZXF1ZXN0NTMyODI3OTI4
1,144
Add JFLEG
{ "avatar_url": "https://avatars.githubusercontent.com/u/22435209?v=4", "events_url": "https://api.github.com/users/j-chim/events{/privacy}", "followers_url": "https://api.github.com/users/j-chim/followers", "following_url": "https://api.github.com/users/j-chim/following{/other_user}", "gists_url": "https://api.github.com/users/j-chim/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/j-chim", "id": 22435209, "login": "j-chim", "node_id": "MDQ6VXNlcjIyNDM1MjA5", "organizations_url": "https://api.github.com/users/j-chim/orgs", "received_events_url": "https://api.github.com/users/j-chim/received_events", "repos_url": "https://api.github.com/users/j-chim/repos", "site_admin": false, "starred_url": "https://api.github.com/users/j-chim/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/j-chim/subscriptions", "type": "User", "url": "https://api.github.com/users/j-chim" }
[]
closed
false
null
[]
null
[ "Hi @j-chim ! You're right it does feel redundant: your option works better, but I'd even suggest having the references in a Sequence feature, which you can declare as:\r\n```\r\n\t features=datasets.Features(\r\n {\r\n \"sentence\": datasets.Value(\"string\"),\r\n \"corrections\": datasets.Sequence(datasets.Value(\"string\")),\r\n }\r\n ),\r\n```\r\n\r\nTo create the dummy data, you just need to tell the generator which files it should use, which you can do with:\r\n`python datasets-cli dummy_data datasets/<your-dataset-folder> --auto_generate --match_text_files \"train*,dev*,test*\"`\r\n", "Many thanks for this @yjernite! I've incorporated your feedback and sorted out the dummy data." ]
"2020-12-04T22:36:38Z"
"2020-12-06T18:16:04Z"
"2020-12-06T18:16:04Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1144.diff", "html_url": "https://github.com/huggingface/datasets/pull/1144", "merged_at": "2020-12-06T18:16:04Z", "patch_url": "https://github.com/huggingface/datasets/pull/1144.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1144" }
This PR adds [JFLEG ](https://www.aclweb.org/anthology/E17-2037/), an English grammatical error correction benchmark. The tests were successful on real data, although it would be great if I can get some guidance on the **dummy data**. Basically, **for each source sentence there are 4 possible gold standard target sentences**. The original dataset comprise files in a flat structure, labelled by split then by source/target (e.g., dev.src, dev.ref0, ..., dev.ref3). Not sure what is the best way of adding this. I imagine I can treat each distinct source-target pair as its own split? But having so many copies of the source sentence feels redundant, and it would make it less convenient to end-users who might want to access multiple gold standard targets simultaneously.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1144/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1144/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6473
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6473/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6473/comments
https://api.github.com/repos/huggingface/datasets/issues/6473/events
https://github.com/huggingface/datasets/pull/6473
2,026,495,084
PR_kwDODunzps5hMbvz
6,473
Fix CI quality
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6473). All of your documentation changes will be reflected on that endpoint.", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005270 / 0.011353 (-0.006083) | 0.003471 / 0.011008 (-0.007537) | 0.061942 / 0.038508 (0.023434) | 0.052671 / 0.023109 (0.029562) | 0.250541 / 0.275898 (-0.025357) | 0.270677 / 0.323480 (-0.052803) | 0.002933 / 0.007986 (-0.005053) | 0.003264 / 0.004328 (-0.001064) | 0.048055 / 0.004250 (0.043804) | 0.037459 / 0.037052 (0.000407) | 0.254926 / 0.258489 (-0.003563) | 0.292547 / 0.293841 (-0.001294) | 0.027959 / 0.128546 (-0.100587) | 0.010762 / 0.075646 (-0.064884) | 0.204961 / 0.419271 (-0.214310) | 0.035488 / 0.043533 (-0.008045) | 0.254102 / 0.255139 (-0.001037) | 0.273654 / 0.283200 (-0.009546) | 0.018126 / 0.141683 (-0.123556) | 1.082330 / 1.452155 (-0.369825) | 1.147179 / 1.492716 (-0.345538) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093223 / 0.018006 (0.075217) | 0.301912 / 0.000490 (0.301422) | 0.000219 / 0.000200 (0.000019) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018407 / 0.037411 (-0.019004) | 0.060412 / 0.014526 (0.045886) | 0.074063 / 0.176557 (-0.102494) | 0.118743 / 0.737135 (-0.618392) | 0.076484 / 0.296338 (-0.219854) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289929 / 0.215209 (0.074720) | 2.825096 / 2.077655 (0.747442) | 1.511444 / 1.504120 (0.007324) | 1.394812 / 1.541195 (-0.146383) | 1.419751 / 1.468490 (-0.048739) | 0.569995 / 4.584777 (-4.014782) | 2.402586 / 3.745712 (-1.343126) | 2.826223 / 5.269862 (-2.443639) | 1.751554 / 4.565676 (-2.814123) | 0.064266 / 0.424275 (-0.360009) | 0.005047 / 0.007607 (-0.002561) | 0.341513 / 0.226044 (0.115469) | 3.372106 / 2.268929 (1.103177) | 1.872693 / 55.444624 (-53.571931) | 1.588200 / 6.876477 (-5.288276) | 1.630800 / 2.142072 (-0.511272) | 0.654266 / 4.805227 (-4.150961) | 0.124292 / 6.500664 (-6.376372) | 0.042876 / 0.075469 (-0.032593) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.948406 / 1.841788 (-0.893382) | 11.652947 / 8.074308 (3.578639) | 10.218195 / 10.191392 (0.026803) | 0.128447 / 0.680424 (-0.551976) | 0.014092 / 0.534201 (-0.520109) | 0.287631 / 0.579283 (-0.291652) | 0.264843 / 0.434364 (-0.169521) | 0.329997 / 0.540337 (-0.210340) | 0.439597 / 1.386936 (-0.947339) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005418 / 0.011353 (-0.005935) | 0.003589 / 0.011008 (-0.007419) | 0.050074 / 0.038508 (0.011566) | 0.052566 / 0.023109 (0.029456) | 0.293447 / 0.275898 (0.017549) | 0.320518 / 0.323480 (-0.002962) | 0.004094 / 0.007986 (-0.003892) | 0.002690 / 0.004328 (-0.001639) | 0.048200 / 0.004250 (0.043949) | 0.040692 / 0.037052 (0.003640) | 0.297086 / 0.258489 (0.038597) | 0.323827 / 0.293841 (0.029986) | 0.029511 / 0.128546 (-0.099035) | 0.011079 / 0.075646 (-0.064568) | 0.058562 / 0.419271 (-0.360709) | 0.032897 / 0.043533 (-0.010636) | 0.297244 / 0.255139 (0.042105) | 0.316812 / 0.283200 (0.033612) | 0.018468 / 0.141683 (-0.123215) | 1.140948 / 1.452155 (-0.311207) | 1.195453 / 1.492716 (-0.297263) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092677 / 0.018006 (0.074671) | 0.300775 / 0.000490 (0.300285) | 0.000225 / 0.000200 (0.000025) | 0.000054 / 0.000054 (0.000000) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021617 / 0.037411 (-0.015794) | 0.077135 / 0.014526 (0.062610) | 0.079848 / 0.176557 (-0.096709) | 0.118475 / 0.737135 (-0.618661) | 0.081174 / 0.296338 (-0.215164) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294424 / 0.215209 (0.079215) | 2.863989 / 2.077655 (0.786334) | 1.590604 / 1.504120 (0.086484) | 1.474345 / 1.541195 (-0.066849) | 1.482120 / 1.468490 (0.013630) | 0.567829 / 4.584777 (-4.016948) | 2.493782 / 3.745712 (-1.251930) | 2.823460 / 5.269862 (-2.446402) | 1.732677 / 4.565676 (-2.833000) | 0.065518 / 0.424275 (-0.358757) | 0.004923 / 0.007607 (-0.002684) | 0.349313 / 0.226044 (0.123268) | 3.428618 / 2.268929 (1.159689) | 1.970641 / 55.444624 (-53.473983) | 1.655884 / 6.876477 (-5.220593) | 1.657151 / 2.142072 (-0.484921) | 0.661208 / 4.805227 (-4.144019) | 0.119129 / 6.500664 (-6.381535) | 0.040770 / 0.075469 (-0.034699) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.964865 / 1.841788 (-0.876923) | 12.050218 / 8.074308 (3.975910) | 10.458749 / 10.191392 (0.267357) | 0.141856 / 0.680424 (-0.538568) | 0.015091 / 0.534201 (-0.519109) | 0.288897 / 0.579283 (-0.290387) | 0.275343 / 0.434364 (-0.159021) | 0.328363 / 0.540337 (-0.211975) | 0.579243 / 1.386936 (-0.807693) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f7721021e284859ea0952444bae6300a0d00794f \"CML watermark\")\n" ]
"2023-12-05T15:36:23Z"
"2023-12-05T18:14:50Z"
"2023-12-05T18:08:41Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6473.diff", "html_url": "https://github.com/huggingface/datasets/pull/6473", "merged_at": "2023-12-05T18:08:41Z", "patch_url": "https://github.com/huggingface/datasets/pull/6473.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6473" }
Fix #6472.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6473/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6473/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4119
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4119/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4119/comments
https://api.github.com/repos/huggingface/datasets/issues/4119/events
https://github.com/huggingface/datasets/pull/4119
1,195,641,298
PR_kwDODunzps41yXHF
4,119
Hotfix failing CI tests on Windows
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._" ]
"2022-04-07T07:38:46Z"
"2022-04-07T09:47:24Z"
"2022-04-07T07:57:13Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4119.diff", "html_url": "https://github.com/huggingface/datasets/pull/4119", "merged_at": "2022-04-07T07:57:13Z", "patch_url": "https://github.com/huggingface/datasets/pull/4119.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4119" }
This PR makes a hotfix for our CI Windows tests: https://app.circleci.com/pipelines/github/huggingface/datasets/11092/workflows/9cfdb1dd-0fec-4fe0-8122-5f533192ebdc/jobs/67414 Fix #4118 I guess this issue is related to this PR: - huggingface/huggingface_hub#815
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/4119/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4119/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4296
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4296/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4296/comments
https://api.github.com/repos/huggingface/datasets/issues/4296/events
https://github.com/huggingface/datasets/pull/4296
1,229,554,645
PR_kwDODunzps43foZ-
4,296
Fix URL query parameters in compression hop path when streaming
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
open
false
null
[]
null
[ "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_4296). All of your documentation changes will be reflected on that endpoint." ]
"2022-05-09T11:18:22Z"
"2022-07-06T15:19:53Z"
null
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4296.diff", "html_url": "https://github.com/huggingface/datasets/pull/4296", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/4296.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4296" }
Fix #3488.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4296/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4296/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/6301
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6301/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6301/comments
https://api.github.com/repos/huggingface/datasets/issues/6301/events
https://github.com/huggingface/datasets/pull/6301
1,940,183,999
PR_kwDODunzps5cpPVh
6,301
Unpin `tensorflow` maximum version
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[]
closed
false
null
[]
null
[ "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006663 / 0.011353 (-0.004690) | 0.004091 / 0.011008 (-0.006918) | 0.084954 / 0.038508 (0.046445) | 0.071869 / 0.023109 (0.048760) | 0.314706 / 0.275898 (0.038808) | 0.352794 / 0.323480 (0.029314) | 0.004027 / 0.007986 (-0.003959) | 0.003371 / 0.004328 (-0.000957) | 0.065456 / 0.004250 (0.061205) | 0.055828 / 0.037052 (0.018775) | 0.316502 / 0.258489 (0.058013) | 0.377979 / 0.293841 (0.084138) | 0.030870 / 0.128546 (-0.097676) | 0.008616 / 0.075646 (-0.067030) | 0.288625 / 0.419271 (-0.130646) | 0.052314 / 0.043533 (0.008781) | 0.322725 / 0.255139 (0.067586) | 0.351810 / 0.283200 (0.068611) | 0.025726 / 0.141683 (-0.115957) | 1.439308 / 1.452155 (-0.012847) | 1.524484 / 1.492716 (0.031768) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.235212 / 0.018006 (0.217206) | 0.444926 / 0.000490 (0.444437) | 0.009887 / 0.000200 (0.009687) | 0.000402 / 0.000054 (0.000347) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.028956 / 0.037411 (-0.008455) | 0.084401 / 0.014526 (0.069875) | 0.339686 / 0.176557 (0.163130) | 0.186785 / 0.737135 (-0.550350) | 0.195017 / 0.296338 (-0.101322) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.405480 / 0.215209 (0.190271) | 4.024315 / 2.077655 (1.946661) | 2.056398 / 1.504120 (0.552278) | 1.912099 / 1.541195 (0.370904) | 1.950119 / 1.468490 (0.481629) | 0.486071 / 4.584777 (-4.098706) | 3.578501 / 3.745712 (-0.167211) | 3.268980 / 5.269862 (-2.000881) | 2.018114 / 4.565676 (-2.547563) | 0.057440 / 0.424275 (-0.366835) | 0.007281 / 0.007607 (-0.000326) | 0.474760 / 0.226044 (0.248716) | 4.746908 / 2.268929 (2.477979) | 2.550111 / 55.444624 (-52.894513) | 2.171932 / 6.876477 (-4.704544) | 2.392235 / 2.142072 (0.250162) | 0.585940 / 4.805227 (-4.219287) | 0.136445 / 6.500664 (-6.364219) | 0.062125 / 0.075469 (-0.013344) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.270763 / 1.841788 (-0.571025) | 19.213516 / 8.074308 (11.139208) | 13.992620 / 10.191392 (3.801228) | 0.167356 / 0.680424 (-0.513068) | 0.018261 / 0.534201 (-0.515940) | 0.392489 / 0.579283 (-0.186794) | 0.418845 / 0.434364 (-0.015519) | 0.461824 / 0.540337 (-0.078513) | 0.649661 / 1.386936 (-0.737275) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006675 / 0.011353 (-0.004678) | 0.003913 / 0.011008 (-0.007096) | 0.064943 / 0.038508 (0.026435) | 0.072426 / 0.023109 (0.049317) | 0.400785 / 0.275898 (0.124887) | 0.434359 / 0.323480 (0.110879) | 0.005370 / 0.007986 (-0.002616) | 0.003290 / 0.004328 (-0.001038) | 0.065035 / 0.004250 (0.060785) | 0.054924 / 0.037052 (0.017872) | 0.404442 / 0.258489 (0.145953) | 0.439027 / 0.293841 (0.145186) | 0.032467 / 0.128546 (-0.096080) | 0.008565 / 0.075646 (-0.067081) | 0.070653 / 0.419271 (-0.348619) | 0.048034 / 0.043533 (0.004501) | 0.400869 / 0.255139 (0.145730) | 0.423048 / 0.283200 (0.139848) | 0.022757 / 0.141683 (-0.118926) | 1.516956 / 1.452155 (0.064801) | 1.581599 / 1.492716 (0.088883) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.214761 / 0.018006 (0.196755) | 0.440921 / 0.000490 (0.440431) | 0.007538 / 0.000200 (0.007338) | 0.000087 / 0.000054 (0.000033) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.032313 / 0.037411 (-0.005099) | 0.091365 / 0.014526 (0.076839) | 0.106665 / 0.176557 (-0.069891) | 0.158637 / 0.737135 (-0.578498) | 0.104894 / 0.296338 (-0.191445) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.432995 / 0.215209 (0.217786) | 4.339911 / 2.077655 (2.262256) | 2.313139 / 1.504120 (0.809019) | 2.142552 / 1.541195 (0.601357) | 2.279275 / 1.468490 (0.810785) | 0.501133 / 4.584777 (-4.083644) | 3.696160 / 3.745712 (-0.049552) | 3.341886 / 5.269862 (-1.927976) | 2.105972 / 4.565676 (-2.459705) | 0.059268 / 0.424275 (-0.365008) | 0.007568 / 0.007607 (-0.000039) | 0.512546 / 0.226044 (0.286502) | 5.130219 / 2.268929 (2.861290) | 2.808292 / 55.444624 (-52.636332) | 2.478721 / 6.876477 (-4.397755) | 2.679341 / 2.142072 (0.537269) | 0.599022 / 4.805227 (-4.206206) | 0.143761 / 6.500664 (-6.356903) | 0.062061 / 0.075469 (-0.013409) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.430507 / 1.841788 (-0.411281) | 20.458085 / 8.074308 (12.383777) | 15.268356 / 10.191392 (5.076964) | 0.163359 / 0.680424 (-0.517065) | 0.020908 / 0.534201 (-0.513293) | 0.396870 / 0.579283 (-0.182413) | 0.432630 / 0.434364 (-0.001733) | 0.475909 / 0.540337 (-0.064429) | 0.681031 / 1.386936 (-0.705905) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#fd1dd6aa4c7fa7744c1c1f877573ff59f1529292 \"CML watermark\")\n", "CI failures are unrelated", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005815 / 0.011353 (-0.005538) | 0.003419 / 0.011008 (-0.007589) | 0.080286 / 0.038508 (0.041778) | 0.056487 / 0.023109 (0.033377) | 0.304414 / 0.275898 (0.028516) | 0.341039 / 0.323480 (0.017559) | 0.004392 / 0.007986 (-0.003594) | 0.002852 / 0.004328 (-0.001477) | 0.062339 / 0.004250 (0.058089) | 0.044683 / 0.037052 (0.007630) | 0.311651 / 0.258489 (0.053162) | 0.357249 / 0.293841 (0.063409) | 0.027300 / 0.128546 (-0.101246) | 0.007963 / 0.075646 (-0.067683) | 0.261948 / 0.419271 (-0.157323) | 0.044952 / 0.043533 (0.001419) | 0.309990 / 0.255139 (0.054851) | 0.340735 / 0.283200 (0.057536) | 0.020786 / 0.141683 (-0.120897) | 1.471378 / 1.452155 (0.019224) | 1.517260 / 1.492716 (0.024543) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.245447 / 0.018006 (0.227441) | 0.418967 / 0.000490 (0.418477) | 0.007039 / 0.000200 (0.006840) | 0.000196 / 0.000054 (0.000142) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022880 / 0.037411 (-0.014532) | 0.071862 / 0.014526 (0.057337) | 0.083009 / 0.176557 (-0.093547) | 0.143414 / 0.737135 (-0.593722) | 0.082896 / 0.296338 (-0.213442) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.390645 / 0.215209 (0.175436) | 3.888104 / 2.077655 (1.810450) | 1.859572 / 1.504120 (0.355452) | 1.683803 / 1.541195 (0.142608) | 1.697902 / 1.468490 (0.229412) | 0.499537 / 4.584777 (-4.085239) | 3.015832 / 3.745712 (-0.729881) | 2.805696 / 5.269862 (-2.464166) | 1.830408 / 4.565676 (-2.735268) | 0.058191 / 0.424275 (-0.366085) | 0.006357 / 0.007607 (-0.001250) | 0.462486 / 0.226044 (0.236442) | 4.634951 / 2.268929 (2.366022) | 2.309364 / 55.444624 (-53.135260) | 1.979521 / 6.876477 (-4.896956) | 2.080011 / 2.142072 (-0.062062) | 0.593086 / 4.805227 (-4.212141) | 0.124856 / 6.500664 (-6.375808) | 0.060172 / 0.075469 (-0.015297) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.251439 / 1.841788 (-0.590349) | 17.068999 / 8.074308 (8.994691) | 13.527209 / 10.191392 (3.335817) | 0.146636 / 0.680424 (-0.533788) | 0.016866 / 0.534201 (-0.517335) | 0.333202 / 0.579283 (-0.246081) | 0.360444 / 0.434364 (-0.073920) | 0.388378 / 0.540337 (-0.151959) | 0.530519 / 1.386936 (-0.856417) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006043 / 0.011353 (-0.005310) | 0.003612 / 0.011008 (-0.007396) | 0.062644 / 0.038508 (0.024135) | 0.056104 / 0.023109 (0.032995) | 0.446328 / 0.275898 (0.170430) | 0.478044 / 0.323480 (0.154564) | 0.004641 / 0.007986 (-0.003345) | 0.002896 / 0.004328 (-0.001432) | 0.062344 / 0.004250 (0.058093) | 0.046339 / 0.037052 (0.009287) | 0.454866 / 0.258489 (0.196377) | 0.484242 / 0.293841 (0.190401) | 0.028602 / 0.128546 (-0.099944) | 0.008075 / 0.075646 (-0.067571) | 0.067980 / 0.419271 (-0.351291) | 0.041339 / 0.043533 (-0.002194) | 0.452911 / 0.255139 (0.197772) | 0.474180 / 0.283200 (0.190981) | 0.019395 / 0.141683 (-0.122288) | 1.432161 / 1.452155 (-0.019993) | 1.505800 / 1.492716 (0.013083) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.216983 / 0.018006 (0.198977) | 0.406232 / 0.000490 (0.405743) | 0.005101 / 0.000200 (0.004902) | 0.000077 / 0.000054 (0.000022) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026295 / 0.037411 (-0.011116) | 0.080490 / 0.014526 (0.065964) | 0.088105 / 0.176557 (-0.088451) | 0.143294 / 0.737135 (-0.593841) | 0.089125 / 0.296338 (-0.207213) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.465512 / 0.215209 (0.250302) | 4.648656 / 2.077655 (2.571002) | 2.598225 / 1.504120 (1.094105) | 2.409588 / 1.541195 (0.868393) | 2.513745 / 1.468490 (1.045255) | 0.507425 / 4.584777 (-4.077352) | 3.130164 / 3.745712 (-0.615548) | 2.836817 / 5.269862 (-2.433045) | 1.836029 / 4.565676 (-2.729647) | 0.058829 / 0.424275 (-0.365446) | 0.006551 / 0.007607 (-0.001056) | 0.537892 / 0.226044 (0.311848) | 5.401079 / 2.268929 (3.132150) | 3.019817 / 55.444624 (-52.424807) | 2.695131 / 6.876477 (-4.181346) | 2.805321 / 2.142072 (0.663248) | 0.595681 / 4.805227 (-4.209546) | 0.124368 / 6.500664 (-6.376296) | 0.060712 / 0.075469 (-0.014757) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.361508 / 1.841788 (-0.480279) | 17.811373 / 8.074308 (9.737065) | 14.482705 / 10.191392 (4.291313) | 0.153193 / 0.680424 (-0.527231) | 0.018347 / 0.534201 (-0.515854) | 0.330900 / 0.579283 (-0.248383) | 0.374948 / 0.434364 (-0.059416) | 0.385615 / 0.540337 (-0.154722) | 0.568077 / 1.386936 (-0.818859) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#18ef408c21f8efbb2142f050a691b5c916455af3 \"CML watermark\")\n" ]
"2023-10-12T14:58:07Z"
"2023-10-12T15:58:20Z"
"2023-10-12T15:49:54Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6301.diff", "html_url": "https://github.com/huggingface/datasets/pull/6301", "merged_at": "2023-10-12T15:49:54Z", "patch_url": "https://github.com/huggingface/datasets/pull/6301.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6301" }
Removes the temporary pin introduced in #6264
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/6301/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6301/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2192
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2192/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2192/comments
https://api.github.com/repos/huggingface/datasets/issues/2192/events
https://github.com/huggingface/datasets/pull/2192
853,547,910
MDExOlB1bGxSZXF1ZXN0NjExNjE5NTY0
2,192
Fix typo in huggingface hub
{ "avatar_url": "https://avatars.githubusercontent.com/u/30755778?v=4", "events_url": "https://api.github.com/users/LysandreJik/events{/privacy}", "followers_url": "https://api.github.com/users/LysandreJik/followers", "following_url": "https://api.github.com/users/LysandreJik/following{/other_user}", "gists_url": "https://api.github.com/users/LysandreJik/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/LysandreJik", "id": 30755778, "login": "LysandreJik", "node_id": "MDQ6VXNlcjMwNzU1Nzc4", "organizations_url": "https://api.github.com/users/LysandreJik/orgs", "received_events_url": "https://api.github.com/users/LysandreJik/received_events", "repos_url": "https://api.github.com/users/LysandreJik/repos", "site_admin": false, "starred_url": "https://api.github.com/users/LysandreJik/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LysandreJik/subscriptions", "type": "User", "url": "https://api.github.com/users/LysandreJik" }
[]
closed
false
null
[]
null
[]
"2021-04-08T14:42:24Z"
"2021-04-08T15:47:41Z"
"2021-04-08T15:47:40Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2192.diff", "html_url": "https://github.com/huggingface/datasets/pull/2192", "merged_at": "2021-04-08T15:47:40Z", "patch_url": "https://github.com/huggingface/datasets/pull/2192.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2192" }
pip knows how to resolve to `huggingface_hub`, but conda doesn't! The `packaging` dependency is also required for the build to complete.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2192/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2192/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1137
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1137/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1137/comments
https://api.github.com/repos/huggingface/datasets/issues/1137/events
https://github.com/huggingface/datasets/pull/1137
757,358,145
MDExOlB1bGxSZXF1ZXN0NTMyNzQ4NDAx
1,137
add wmt mlqe 2020 shared task
{ "avatar_url": "https://avatars.githubusercontent.com/u/16107619?v=4", "events_url": "https://api.github.com/users/VictorSanh/events{/privacy}", "followers_url": "https://api.github.com/users/VictorSanh/followers", "following_url": "https://api.github.com/users/VictorSanh/following{/other_user}", "gists_url": "https://api.github.com/users/VictorSanh/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/VictorSanh", "id": 16107619, "login": "VictorSanh", "node_id": "MDQ6VXNlcjE2MTA3NjE5", "organizations_url": "https://api.github.com/users/VictorSanh/orgs", "received_events_url": "https://api.github.com/users/VictorSanh/received_events", "repos_url": "https://api.github.com/users/VictorSanh/repos", "site_admin": false, "starred_url": "https://api.github.com/users/VictorSanh/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/VictorSanh/subscriptions", "type": "User", "url": "https://api.github.com/users/VictorSanh" }
[]
closed
false
null
[]
null
[ "re-created in #1218 because this was too messy" ]
"2020-12-04T19:45:34Z"
"2020-12-06T19:59:44Z"
"2020-12-06T19:53:46Z"
MEMBER
null
1
{ "diff_url": "https://github.com/huggingface/datasets/pull/1137.diff", "html_url": "https://github.com/huggingface/datasets/pull/1137", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1137.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1137" }
First commit for Shared task 1 (wmt_mlqw_task1) of WMT20 MLQE (quality estimation of machine translation) Note that I copied the tags in the README for only one (of the 7 configurations): `en-de`. There is one configuration for each pair of languages.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1137/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1137/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/171
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/171/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/171/comments
https://api.github.com/repos/huggingface/datasets/issues/171/events
https://github.com/huggingface/datasets/pull/171
621,199,128
MDExOlB1bGxSZXF1ZXN0NDIwMjk0ODM0
171
fix squad metric format
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "One thing for SQuAD is that I wanted to be able to use the SQuAD dataset directly in the metrics and I'm not sure it will be possible with this format.\r\n\r\n(maybe it's not really possible in general though)", "This is kinda related to one thing I had in mind which is that we may want to be able to dump our model predictions in a `Dataset` as well so that we don't keep them in memory (and we can export them in a nice format later as well when we will have a serialization formats).\r\n\r\nMaybe this is overkill though, I haven't fully wraped my head around this.", "I'm also perfectly fine with merging this PR in the current state and working on a larger scope later.", "This is the format needed to run the official script directly. The format of the squad dataset is different from the input of the metric. \r\n\r\n> One thing for SQuAD is that I wanted to be able to use the SQuAD dataset directly in the metrics and I'm not sure it will be possible with this format.\r\n> \r\n> (maybe it's not really possible in general though)\r\n\r\nOk I see. I'll try to use the same format", "Ok with this update I changed the format to fit the squad dataset format.\r\nNow you can do:\r\n```python\r\nsquad_dset = nlp.load_dataset(\"squad\")\r\nsquad_metric = nlp.load_metric(\"/Users/quentinlhoest/Desktop/hf/nlp-bis/metrics/squad\")\r\npredictions = [\r\n {\"id\": v[\"id\"], \"prediction_text\": v[\"answers\"][\"text\"][0]} # take first possible answer\r\n for v in squad_dset[\"validation\"]\r\n]\r\nsquad_metric.compute(predictions, squad_dset[\"validation\"])\r\n```" ]
"2020-05-19T18:37:36Z"
"2020-05-22T13:36:50Z"
"2020-05-22T13:36:48Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/171.diff", "html_url": "https://github.com/huggingface/datasets/pull/171", "merged_at": "2020-05-22T13:36:48Z", "patch_url": "https://github.com/huggingface/datasets/pull/171.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/171" }
The format of the squad metric was wrong. This should fix #143 I tested with ```python3 predictions = [ {'id': '56be4db0acb8001400a502ec', 'prediction_text': 'Denver Broncos'} ] references = [ {'answers': [{'text': 'Denver Broncos'}], 'id': '56be4db0acb8001400a502ec'} ] ```
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/171/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/171/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5053
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5053/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5053/comments
https://api.github.com/repos/huggingface/datasets/issues/5053/events
https://github.com/huggingface/datasets/issues/5053
1,393,739,882
I_kwDODunzps5TEshq
5,053
Intermittent JSON parse error when streaming the Pile
{ "avatar_url": "https://avatars.githubusercontent.com/u/77788841?v=4", "events_url": "https://api.github.com/users/neelnanda-io/events{/privacy}", "followers_url": "https://api.github.com/users/neelnanda-io/followers", "following_url": "https://api.github.com/users/neelnanda-io/following{/other_user}", "gists_url": "https://api.github.com/users/neelnanda-io/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/neelnanda-io", "id": 77788841, "login": "neelnanda-io", "node_id": "MDQ6VXNlcjc3Nzg4ODQx", "organizations_url": "https://api.github.com/users/neelnanda-io/orgs", "received_events_url": "https://api.github.com/users/neelnanda-io/received_events", "repos_url": "https://api.github.com/users/neelnanda-io/repos", "site_admin": false, "starred_url": "https://api.github.com/users/neelnanda-io/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/neelnanda-io/subscriptions", "type": "User", "url": "https://api.github.com/users/neelnanda-io" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
open
false
null
[]
null
[ "Maybe #2838 can help. In this PR we allow to skip bad chunks of JSON data to not crash the training\r\n\r\nDid you have warning messages before the error ?\r\n\r\nsomething like this maybe ?\r\n```\r\n03/24/2022 02:19:46 - WARNING - datasets.utils.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [1/20]\r\n03/24/2022 02:20:01 - WARNING - datasets.utils.streaming_download_manager - Got disconnected from remote data host. Retrying in 5sec [2/20]\r\n03/24/2022 02:20:09 - ERROR - datasets.packaged_modules.json.json - Failed to read file 'gzip://file-000000000007.json::https://huggingface.co/datasets/lvwerra/codeparrot-clean-train/resolve/1d740acb9d09cf7a3307553323e2c677a6535407/file-000000000007.json.gz' with error <class 'pyarrow.lib.ArrowInvalid'>: JSON parse error: Invalid value. in row 0\r\n```", "Ah, thanks! I did get errors like that. Sad that PR wasn't merged in! \r\n\r\nI'm currently just downloading 200GB of the Pile locally to avoid streaming (I have space and it's faster anyway), but that's really useful! I can probably apply the dumb patch of just commenting out the bits that raise the JSON Parse Error lol, based on your code - if I continue the loop should it be fine?", "Yup you can get some inspiration from this PR. It simply ignores the bad chunks (a chunk is ~a few MBs of data).\r\nWe'll try to merge this PR soon" ]
"2022-10-02T11:56:46Z"
"2022-10-04T17:59:03Z"
null
NONE
null
null
null
## Describe the bug I have an intermittent error when streaming the Pile, where I get a JSON parse error which causes my program to crash. This is intermittent - when I rerun the program with the same random seed it does not crash in the same way. The exact point this happens also varied - it happened to me 11B tokens and 4 days into a training run, and now just happened 2 minutes into one, but I can't reliably reproduce it. I'm using a remote machine with 8 A6000 GPUs via runpod.io ## Expected results I have a DataLoader which can iterate through the whole Pile ## Actual results Stack trace: ``` Failed to read file 'zstd://12.jsonl::https://the-eye.eu/public/AI/pile/train/12.jsonl.zst' with error <class 'pyarrow.lib.ArrowInvalid'>: JSON parse error: Invalid value. in row 0 ``` I'm currently using HuggingFace accelerate, which also gave me the following stack trace, but I've also experienced this problem intermittently when using DataParallel, so I don't think it's to do with parallelisation ``` Traceback (most recent call last): File "ddp_script.py", line 1258, in <module> main() File "ddp_script.py", line 1143, in main for c, batch in tqdm.tqdm(enumerate(data_iter)): File "/opt/conda/lib/python3.7/site-packages/tqdm/std.py", line 1195, in __iter__ for obj in iterable: File "/opt/conda/lib/python3.7/site-packages/accelerate/data_loader.py", line 503, in __iter__ next_batch, next_batch_info, next_skip = self._fetch_batches(main_iterator) File "/opt/conda/lib/python3.7/site-packages/accelerate/data_loader.py", line 454, in _fetch_batches broadcast_object_list(batch_info) File "/opt/conda/lib/python3.7/site-packages/accelerate/utils/operations.py", line 333, in broadcast_object_list torch.distributed.broadcast_object_list(object_list, src=from_process) File "/opt/conda/lib/python3.7/site-packages/torch/distributed/distributed_c10d.py", line 1900, in broadcast_object_list object_list[i] = _tensor_to_object(obj_view, obj_size) File "/opt/conda/lib/python3.7/site-packages/torch/distributed/distributed_c10d.py", line 1571, in _tensor_to_object return _unpickler(io.BytesIO(buf)).load() _pickle.UnpicklingError: invalid load key, '@'. ``` ## Steps to reproduce the bug ```python from datasets import load_dataset dataset = load_dataset( cfg["dataset_name"], streaming=True, split="train") dataset = dataset.remove_columns("meta") dataset = dataset.map(tokenize_and_concatenate, batched=True) dataset = dataset.with_format(type="torch") train_data_loader = DataLoader( dataset, batch_size=cfg["batch_size"], num_workers=3) for batch in train_data_loader: continue ``` `tokenize_and_concatenate` is a custom tokenization function I defined on the GPT-NeoX tokenizer to tokenize the text, separated by endoftext tokens, and reshape to have length batch_size, I don't think this is related to tokenization: ``` import numpy as np import einops import torch def tokenize_and_concatenate(examples): texts = examples["text"] full_text = tokenizer.eos_token.join(texts) div = 20 length = len(full_text) // div text_list = [full_text[i * length: (i + 1) * length] for i in range(div)] tokens = tokenizer(text_list, return_tensors="np", padding=True)[ "input_ids" ].flatten() tokens = tokens[tokens != tokenizer.pad_token_id] n = len(tokens) curr_batch_size = n // (seq_len - 1) tokens = tokens[: (seq_len - 1) * curr_batch_size] tokens = einops.rearrange( tokens, "(batch_size seq) -> batch_size seq", batch_size=curr_batch_size, seq=seq_len - 1, ) prefix = np.ones((curr_batch_size, 1), dtype=np.int64) * \ tokenizer.bos_token_id return { "text": np.concatenate([prefix, tokens], axis=1) } ``` ## Environment info <!-- You can run the command `datasets-cli env` and copy-and-paste its output below. --> - `datasets` version: 2.4.0 - Platform: Linux-5.4.0-105-generic-x86_64-with-debian-buster-sid - Python version: 3.7.13 - PyArrow version: 9.0.0 - Pandas version: 1.3.5 ZStandard data: Version: 0.18.0 Summary: Zstandard bindings for Python Home-page: https://github.com/indygreg/python-zstandard Author: Gregory Szorc Author-email: [email protected] License: BSD Location: /opt/conda/lib/python3.7/site-packages Requires: Required-by:
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5053/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5053/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/5197
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5197/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5197/comments
https://api.github.com/repos/huggingface/datasets/issues/5197/events
https://github.com/huggingface/datasets/pull/5197
1,434,676,150
PR_kwDODunzps5CI0Ac
5,197
[zstd] Use max window log size
{ "avatar_url": "https://avatars.githubusercontent.com/u/728699?v=4", "events_url": "https://api.github.com/users/reyoung/events{/privacy}", "followers_url": "https://api.github.com/users/reyoung/followers", "following_url": "https://api.github.com/users/reyoung/following{/other_user}", "gists_url": "https://api.github.com/users/reyoung/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/reyoung", "id": 728699, "login": "reyoung", "node_id": "MDQ6VXNlcjcyODY5OQ==", "organizations_url": "https://api.github.com/users/reyoung/orgs", "received_events_url": "https://api.github.com/users/reyoung/received_events", "repos_url": "https://api.github.com/users/reyoung/repos", "site_admin": false, "starred_url": "https://api.github.com/users/reyoung/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/reyoung/subscriptions", "type": "User", "url": "https://api.github.com/users/reyoung" }
[]
open
false
null
[]
null
[ "@albertvillanova Please take a review.", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_5197). All of your documentation changes will be reflected on that endpoint." ]
"2022-11-03T13:35:58Z"
"2022-11-03T13:45:19Z"
null
NONE
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5197.diff", "html_url": "https://github.com/huggingface/datasets/pull/5197", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/5197.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5197" }
ZstdDecompressor has a parameter `max_window_size` to limit max memory usage when decompressing zstd files. The default `max_window_size` is not enough when files are compressed by `zstd --ultra` flags. Change `max_window_size` to the zstd's max window size. NOTE, the `zstd.WINDOWLOG_MAX` is the log_2 value of the max window size.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5197/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5197/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/393
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/393/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/393/comments
https://api.github.com/repos/huggingface/datasets/issues/393/events
https://github.com/huggingface/datasets/pull/393
657,330,911
MDExOlB1bGxSZXF1ZXN0NDQ5NDY1MTAz
393
Fix extracted files directory for the DownloadManager
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[]
"2020-07-15T12:59:55Z"
"2020-07-17T17:02:16Z"
"2020-07-17T17:02:14Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/393.diff", "html_url": "https://github.com/huggingface/datasets/pull/393", "merged_at": "2020-07-17T17:02:14Z", "patch_url": "https://github.com/huggingface/datasets/pull/393.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/393" }
The cache dir was often cluttered by extracted files because of the download manager. For downloaded files, we are using the `downloads` directory to make things easier to navigate, but extracted files were still placed at the root of the cache directory. To fix that I changed the directory for extracted files to cache_dir/downloads/extracted.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/393/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/393/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/544
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/544/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/544/comments
https://api.github.com/repos/huggingface/datasets/issues/544/events
https://github.com/huggingface/datasets/pull/544
689,062,519
MDExOlB1bGxSZXF1ZXN0NDc2MTc4MDM2
544
[Distributed] Fix load_dataset error when multiprocessing + add test
{ "avatar_url": "https://avatars.githubusercontent.com/u/7353373?v=4", "events_url": "https://api.github.com/users/thomwolf/events{/privacy}", "followers_url": "https://api.github.com/users/thomwolf/followers", "following_url": "https://api.github.com/users/thomwolf/following{/other_user}", "gists_url": "https://api.github.com/users/thomwolf/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/thomwolf", "id": 7353373, "login": "thomwolf", "node_id": "MDQ6VXNlcjczNTMzNzM=", "organizations_url": "https://api.github.com/users/thomwolf/orgs", "received_events_url": "https://api.github.com/users/thomwolf/received_events", "repos_url": "https://api.github.com/users/thomwolf/repos", "site_admin": false, "starred_url": "https://api.github.com/users/thomwolf/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/thomwolf/subscriptions", "type": "User", "url": "https://api.github.com/users/thomwolf" }
[]
closed
false
null
[]
null
[]
"2020-08-31T09:30:10Z"
"2020-08-31T11:15:11Z"
"2020-08-31T11:15:10Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/544.diff", "html_url": "https://github.com/huggingface/datasets/pull/544", "merged_at": "2020-08-31T11:15:10Z", "patch_url": "https://github.com/huggingface/datasets/pull/544.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/544" }
Fix #543 + add test
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/544/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/544/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1223
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1223/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1223/comments
https://api.github.com/repos/huggingface/datasets/issues/1223/events
https://github.com/huggingface/datasets/pull/1223
758,022,208
MDExOlB1bGxSZXF1ZXN0NTMzMjY2MDc4
1,223
🇸🇪 Added Swedish Reviews dataset for sentiment classification in Sw…
{ "avatar_url": "https://avatars.githubusercontent.com/u/6556710?v=4", "events_url": "https://api.github.com/users/timpal0l/events{/privacy}", "followers_url": "https://api.github.com/users/timpal0l/followers", "following_url": "https://api.github.com/users/timpal0l/following{/other_user}", "gists_url": "https://api.github.com/users/timpal0l/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/timpal0l", "id": 6556710, "login": "timpal0l", "node_id": "MDQ6VXNlcjY1NTY3MTA=", "organizations_url": "https://api.github.com/users/timpal0l/orgs", "received_events_url": "https://api.github.com/users/timpal0l/received_events", "repos_url": "https://api.github.com/users/timpal0l/repos", "site_admin": false, "starred_url": "https://api.github.com/users/timpal0l/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/timpal0l/subscriptions", "type": "User", "url": "https://api.github.com/users/timpal0l" }
[]
closed
false
null
[]
null
[]
"2020-12-06T21:02:54Z"
"2020-12-08T10:54:56Z"
"2020-12-08T10:54:56Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1223.diff", "html_url": "https://github.com/huggingface/datasets/pull/1223", "merged_at": "2020-12-08T10:54:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/1223.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1223" }
perhaps: @lhoestq 🤗
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1223/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1223/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1883
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1883/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1883/comments
https://api.github.com/repos/huggingface/datasets/issues/1883/events
https://github.com/huggingface/datasets/pull/1883
808,750,623
MDExOlB1bGxSZXF1ZXN0NTczNzM2NTIz
1,883
Add not-in-place implementations for several dataset transforms
{ "avatar_url": "https://avatars.githubusercontent.com/u/33657802?v=4", "events_url": "https://api.github.com/users/SBrandeis/events{/privacy}", "followers_url": "https://api.github.com/users/SBrandeis/followers", "following_url": "https://api.github.com/users/SBrandeis/following{/other_user}", "gists_url": "https://api.github.com/users/SBrandeis/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/SBrandeis", "id": 33657802, "login": "SBrandeis", "node_id": "MDQ6VXNlcjMzNjU3ODAy", "organizations_url": "https://api.github.com/users/SBrandeis/orgs", "received_events_url": "https://api.github.com/users/SBrandeis/received_events", "repos_url": "https://api.github.com/users/SBrandeis/repos", "site_admin": false, "starred_url": "https://api.github.com/users/SBrandeis/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SBrandeis/subscriptions", "type": "User", "url": "https://api.github.com/users/SBrandeis" }
[]
closed
false
null
[]
null
[ "@lhoestq I am not sure how to test `dictionary_encode_column` (in-place version was not tested before)", "I can take a look at dictionary_encode_column tomorrow.\r\nAlthough it's likely that it doesn't work then. It was added at the beginning of the lib and never tested nor used afaik.", "Now let's update the documentation to use the new methods x)" ]
"2021-02-15T18:44:26Z"
"2021-02-24T14:54:49Z"
"2021-02-24T14:53:26Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1883.diff", "html_url": "https://github.com/huggingface/datasets/pull/1883", "merged_at": "2021-02-24T14:53:26Z", "patch_url": "https://github.com/huggingface/datasets/pull/1883.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1883" }
Should we deprecate in-place versions of such methods?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1883/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1883/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4716
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4716/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4716/comments
https://api.github.com/repos/huggingface/datasets/issues/4716/events
https://github.com/huggingface/datasets/pull/4716
1,309,455,838
PR_kwDODunzps47pdbh
4,716
Support "tags" yaml tag
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "IMO `DatasetMetadata` shouldn't crash with attributes that it doesn't know, btw", "Yea this PR is mostly to have a validation that this field contains a list of strings.\r\n\r\nRegarding unknown fields, the tagging app currently returns an error if a field is unknown using the `DatasetMetadata`. We can change that though" ]
"2022-07-19T12:34:31Z"
"2022-07-20T13:44:50Z"
"2022-07-20T13:31:56Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4716.diff", "html_url": "https://github.com/huggingface/datasets/pull/4716", "merged_at": "2022-07-20T13:31:56Z", "patch_url": "https://github.com/huggingface/datasets/pull/4716.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4716" }
Added the "tags" YAML tag, so that users can specify data domain/topics keywords for dataset search
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4716/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4716/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/5606
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5606/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5606/comments
https://api.github.com/repos/huggingface/datasets/issues/5606/events
https://github.com/huggingface/datasets/issues/5606
1,608,911,632
I_kwDODunzps5f5gsQ
5,606
Add `Dataset.to_list` to the API
{ "avatar_url": "https://avatars.githubusercontent.com/u/47462742?v=4", "events_url": "https://api.github.com/users/mariosasko/events{/privacy}", "followers_url": "https://api.github.com/users/mariosasko/followers", "following_url": "https://api.github.com/users/mariosasko/following{/other_user}", "gists_url": "https://api.github.com/users/mariosasko/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mariosasko", "id": 47462742, "login": "mariosasko", "node_id": "MDQ6VXNlcjQ3NDYyNzQy", "organizations_url": "https://api.github.com/users/mariosasko/orgs", "received_events_url": "https://api.github.com/users/mariosasko/received_events", "repos_url": "https://api.github.com/users/mariosasko/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mariosasko/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mariosasko/subscriptions", "type": "User", "url": "https://api.github.com/users/mariosasko" }
[ { "color": "a2eeef", "default": true, "description": "New feature or request", "id": 1935892871, "name": "enhancement", "node_id": "MDU6TGFiZWwxOTM1ODkyODcx", "url": "https://api.github.com/repos/huggingface/datasets/labels/enhancement" }, { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/50972773?v=4", "events_url": "https://api.github.com/users/kyoto7250/events{/privacy}", "followers_url": "https://api.github.com/users/kyoto7250/followers", "following_url": "https://api.github.com/users/kyoto7250/following{/other_user}", "gists_url": "https://api.github.com/users/kyoto7250/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kyoto7250", "id": 50972773, "login": "kyoto7250", "node_id": "MDQ6VXNlcjUwOTcyNzcz", "organizations_url": "https://api.github.com/users/kyoto7250/orgs", "received_events_url": "https://api.github.com/users/kyoto7250/received_events", "repos_url": "https://api.github.com/users/kyoto7250/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kyoto7250/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kyoto7250/subscriptions", "type": "User", "url": "https://api.github.com/users/kyoto7250" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/50972773?v=4", "events_url": "https://api.github.com/users/kyoto7250/events{/privacy}", "followers_url": "https://api.github.com/users/kyoto7250/followers", "following_url": "https://api.github.com/users/kyoto7250/following{/other_user}", "gists_url": "https://api.github.com/users/kyoto7250/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/kyoto7250", "id": 50972773, "login": "kyoto7250", "node_id": "MDQ6VXNlcjUwOTcyNzcz", "organizations_url": "https://api.github.com/users/kyoto7250/orgs", "received_events_url": "https://api.github.com/users/kyoto7250/received_events", "repos_url": "https://api.github.com/users/kyoto7250/repos", "site_admin": false, "starred_url": "https://api.github.com/users/kyoto7250/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kyoto7250/subscriptions", "type": "User", "url": "https://api.github.com/users/kyoto7250" } ]
null
[ "Hello, I have an interest in this issue.\r\nIs the `Dataset.to_dict` you are describing correct in the code here?\r\n\r\nhttps://github.com/huggingface/datasets/blob/35b789e8f6826b6b5a6b48fcc2416c890a1f326a/src/datasets/arrow_dataset.py#L4633-L4667", "Yes, this is where `Dataset.to_dict` is defined.", "#self-assign" ]
"2023-03-03T16:17:10Z"
"2023-03-27T13:26:40Z"
"2023-03-27T13:26:40Z"
CONTRIBUTOR
null
null
null
Since there is `Dataset.from_list` in the API, we should also add `Dataset.to_list` to be consistent. Regarding the implementation, we can re-use `Dataset.to_dict`'s code and replace the `to_pydict` calls with `to_pylist`.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5606/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5606/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5651
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5651/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5651/comments
https://api.github.com/repos/huggingface/datasets/issues/5651/events
https://github.com/huggingface/datasets/issues/5651
1,631,967,509
I_kwDODunzps5hRdkV
5,651
expanduser in save_to_disk
{ "avatar_url": "https://avatars.githubusercontent.com/u/42400165?v=4", "events_url": "https://api.github.com/users/RmZeta2718/events{/privacy}", "followers_url": "https://api.github.com/users/RmZeta2718/followers", "following_url": "https://api.github.com/users/RmZeta2718/following{/other_user}", "gists_url": "https://api.github.com/users/RmZeta2718/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/RmZeta2718", "id": 42400165, "login": "RmZeta2718", "node_id": "MDQ6VXNlcjQyNDAwMTY1", "organizations_url": "https://api.github.com/users/RmZeta2718/orgs", "received_events_url": "https://api.github.com/users/RmZeta2718/received_events", "repos_url": "https://api.github.com/users/RmZeta2718/repos", "site_admin": false, "starred_url": "https://api.github.com/users/RmZeta2718/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/RmZeta2718/subscriptions", "type": "User", "url": "https://api.github.com/users/RmZeta2718" }
[ { "color": "7057ff", "default": true, "description": "Good for newcomers", "id": 1935892877, "name": "good first issue", "node_id": "MDU6TGFiZWwxOTM1ODkyODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/good%20first%20issue" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/35114142?v=4", "events_url": "https://api.github.com/users/benjaminbrown038/events{/privacy}", "followers_url": "https://api.github.com/users/benjaminbrown038/followers", "following_url": "https://api.github.com/users/benjaminbrown038/following{/other_user}", "gists_url": "https://api.github.com/users/benjaminbrown038/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/benjaminbrown038", "id": 35114142, "login": "benjaminbrown038", "node_id": "MDQ6VXNlcjM1MTE0MTQy", "organizations_url": "https://api.github.com/users/benjaminbrown038/orgs", "received_events_url": "https://api.github.com/users/benjaminbrown038/received_events", "repos_url": "https://api.github.com/users/benjaminbrown038/repos", "site_admin": false, "starred_url": "https://api.github.com/users/benjaminbrown038/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/benjaminbrown038/subscriptions", "type": "User", "url": "https://api.github.com/users/benjaminbrown038" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/35114142?v=4", "events_url": "https://api.github.com/users/benjaminbrown038/events{/privacy}", "followers_url": "https://api.github.com/users/benjaminbrown038/followers", "following_url": "https://api.github.com/users/benjaminbrown038/following{/other_user}", "gists_url": "https://api.github.com/users/benjaminbrown038/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/benjaminbrown038", "id": 35114142, "login": "benjaminbrown038", "node_id": "MDQ6VXNlcjM1MTE0MTQy", "organizations_url": "https://api.github.com/users/benjaminbrown038/orgs", "received_events_url": "https://api.github.com/users/benjaminbrown038/received_events", "repos_url": "https://api.github.com/users/benjaminbrown038/repos", "site_admin": false, "starred_url": "https://api.github.com/users/benjaminbrown038/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/benjaminbrown038/subscriptions", "type": "User", "url": "https://api.github.com/users/benjaminbrown038" } ]
null
[ "`save_to_disk` should indeed expand `~`. Marking it as a \"good first issue\".", "#self-assign\r\n\r\nFile path to code: \r\n\r\nhttps://github.com/huggingface/datasets/blob/2.13.0/src/datasets/arrow_dataset.py#L1364\r\n\r\n@RmZeta2718 I created a pull request for this issue. ", "Hello, \r\nIt says `save_to_disk` is deprecated in 2.8.0, so the alternative to this will be `storage_options`? \r\n\r\nhttps://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.save_to_disk", "@ashikshafi08 I think you misunderstood the warning. The method `save_to_disk` is not deprecated only the optional parameter `fs`.\r\nAlso @benjaminbrown038 as I cannot find your PR I would like to work on this if you don't mind.", "@mariosasko It's been several months and the PR is not reviewed. Could you please take a look? I assume this is not complicated and could be merged fairly soon." ]
"2023-03-20T12:02:18Z"
"2023-10-27T14:04:37Z"
"2023-10-27T14:04:37Z"
NONE
null
null
null
### Describe the bug save_to_disk() does not expand `~` 1. `dataset = load_datasets("any dataset")` 2. `dataset.save_to_disk("~/data")` 3. a folder named "~" created in current folder 4. FileNotFoundError is raised, because the expanded path does not exist (`/home/<user>/data`) related issue https://github.com/huggingface/transformers/issues/10628 ### Steps to reproduce the bug As described above. ### Expected behavior expanduser correctly ### Environment info - datasets 2.10.1 - python 3.10
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5651/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5651/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2226
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2226/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2226/comments
https://api.github.com/repos/huggingface/datasets/issues/2226/events
https://github.com/huggingface/datasets/issues/2226
859,720,302
MDU6SXNzdWU4NTk3MjAzMDI=
2,226
Batched map fails when removing all columns
{ "avatar_url": "https://avatars.githubusercontent.com/u/2743060?v=4", "events_url": "https://api.github.com/users/villmow/events{/privacy}", "followers_url": "https://api.github.com/users/villmow/followers", "following_url": "https://api.github.com/users/villmow/following{/other_user}", "gists_url": "https://api.github.com/users/villmow/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/villmow", "id": 2743060, "login": "villmow", "node_id": "MDQ6VXNlcjI3NDMwNjA=", "organizations_url": "https://api.github.com/users/villmow/orgs", "received_events_url": "https://api.github.com/users/villmow/received_events", "repos_url": "https://api.github.com/users/villmow/repos", "site_admin": false, "starred_url": "https://api.github.com/users/villmow/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/villmow/subscriptions", "type": "User", "url": "https://api.github.com/users/villmow" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" } ]
null
[ "I found the problem. I called `set_format` on some columns before. This makes it crash. Here is a complete example to reproduce:\r\n\r\n```python\r\nfrom datasets import load_dataset\r\nsst = load_dataset(\"sst\")\r\nsst.set_format(\"torch\", columns=[\"label\"], output_all_columns=True)\r\nds = sst[\"train\"]\r\n\r\n# crashes\r\nds.map(\r\n lambda x: {\"a\": list(range(20))},\r\n remove_columns=ds.column_names,\r\n load_from_cache_file=False,\r\n num_proc=1,\r\n batched=True,\r\n)\r\n```", "Thanks for reporting and for providing this code to reproduce the issue, this is really helpful !", "I merged a fix, it should work on `master` now :)\r\nWe'll do a new release soon !" ]
"2021-04-16T11:17:01Z"
"2022-10-05T17:32:15Z"
"2022-10-05T17:32:15Z"
NONE
null
null
null
Hi @lhoestq , I'm hijacking this issue, because I'm currently trying to do the approach you recommend: > Currently the optimal setup for single-column computations is probably to do something like > > ```python > result = dataset.map(f, input_columns="my_col", remove_columns=dataset.column_names) > ``` Here is my code: (see edit, in which I added a simplified version ``` This is the error: ```bash pyarrow.lib.ArrowInvalid: Column 1 named tokens expected length 8964 but got length 1000 ``` I wonder why this error occurs, when I delete every column? Can you give me a hint? ### Edit: I preprocessed my dataset before (using map with the features argument) and saved it to disk. May this be part of the error? I can iterate over the complete dataset and print every sample before calling map. There seems to be no other problem with the dataset. I tried to simplify the code that crashes: ```python # works log.debug(dataset.column_names) log.debug(dataset) for i, sample in enumerate(dataset): log.debug(i, sample) # crashes counted_dataset = dataset.map( lambda x: {"a": list(range(20))}, input_columns=column, remove_columns=dataset.column_names, load_from_cache_file=False, num_proc=num_workers, batched=True, ) ``` ``` pyarrow.lib.ArrowInvalid: Column 1 named tokens expected length 20 but got length 1000 ``` Edit2: May this be a problem with a schema I set when preprocessing the dataset before? I tried to add the `features` argument to the function and then I get a new error: ```python # crashes counted_dataset = dataset.map( lambda x: {"a": list(range(20))}, input_columns=column, remove_columns=dataset.column_names, load_from_cache_file=False, num_proc=num_workers, batched=True, features=datasets.Features( { "a": datasets.Sequence(datasets.Value("int32")) } ) ) ``` ``` File "env/lib/python3.8/site-packages/datasets/arrow_dataset.py", line 1704, in _map_single writer.write_batch(batch) File "env/lib/python3.8/site-packages/datasets/arrow_writer.py", line 312, in write_batch col_type = schema.field(col).type if schema is not None else None File "pyarrow/types.pxi", line 1341, in pyarrow.lib.Schema.field KeyError: 'Column tokens does not exist in schema' ``` _Originally posted by @villmow in https://github.com/huggingface/datasets/issues/2193#issuecomment-820230874_
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2226/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2226/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/2811
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2811/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2811/comments
https://api.github.com/repos/huggingface/datasets/issues/2811/events
https://github.com/huggingface/datasets/pull/2811
972,522,480
MDExOlB1bGxSZXF1ZXN0NzE0MTAzNDIy
2,811
Fix stream oscar
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[]
closed
false
null
[]
null
[ "One additional note: if we can try to not change the code of oscar.py too often, I'm sure users that have it in their cache directory will be happy to not have to redownload it every time they update the library ;)\r\n\r\n(since changing the code changes the cache directory of the dataset)", "I don't think this is confusing for users because users don't even know we have patched `open`. The only thing users care is that if the pass `streaming=True`, they want to be able to load the dataset in streaming mode.\r\n\r\nI don't see any other dataset where patching `open` with `fsspec.open`+`compression` is an \"underlying issue\". Are there other datasets where this is an issue?\r\n\r\nThe only dataset where this was an issue is in oscar and the issue is indeed due to the additional `open` you added inside `zip.open`.", "Closing this one since https://github.com/huggingface/datasets/pull/2822 reverted the change of behavior of `open`" ]
"2021-08-17T10:10:59Z"
"2021-08-26T10:26:15Z"
"2021-08-26T10:26:14Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2811.diff", "html_url": "https://github.com/huggingface/datasets/pull/2811", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/2811.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2811" }
Previously, an additional `open` was added to oscar to make it stream-compatible: 587bbb94e891b22863b312b99696e32708c379f4. This was argued that might be problematic: https://github.com/huggingface/datasets/pull/2786#discussion_r690045921 This PR: - removes that additional `open` - patches `gzip.open` with `xopen` + `compression="gzip"`
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2811/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2811/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4282
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4282/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4282/comments
https://api.github.com/repos/huggingface/datasets/issues/4282/events
https://github.com/huggingface/datasets/pull/4282
1,225,616,545
PR_kwDODunzps43TZYL
4,282
Don't do unnecessary list type casting to avoid replacing None values by empty lists
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "Quick question about the message in the warning. You say \"will be fixed in a future major version\" but don't you mean \"will raise an error in a future major version\"?", "Right ! Good catch, thanks, I updated the message to say \"will raise an error in a future major version\"" ]
"2022-05-04T16:37:01Z"
"2022-05-06T10:43:58Z"
"2022-05-06T10:37:00Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4282.diff", "html_url": "https://github.com/huggingface/datasets/pull/4282", "merged_at": "2022-05-06T10:37:00Z", "patch_url": "https://github.com/huggingface/datasets/pull/4282.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4282" }
In certain cases, `None` values are replaced by empty lists when casting feature types. It happens every time you cast an array of nested lists like [None, [0, 1, 2, 3]] to a different type (to change the integer precision for example). In this case you'd get [[], [0, 1, 2, 3]] for example. This issue comes from PyArrow, see the discussion in https://github.com/huggingface/datasets/issues/3676 This issue also happens when no type casting is needed, because casting is supposed to be a no-op in this case. But as https://github.com/huggingface/datasets/issues/3676 shown, it's not the case and `None` are replaced by empty lists even if we cast to the exact same type. In this PR I just workaround this bug in the case where no type casting is needed. In particular, I only call `pa.ListArray.from_arrays` only when necessary. I also added a warning when some `None` are effectively replaced by empty lists. I wanted to raise an error in this case, but maybe we should wait a major update to do so This PR fixes this particular case, that is occurring in `run_qa.py` in `transformers`: ```python from datasets import Dataset ds = Dataset.from_dict({"a": range(4)}) ds = ds.map(lambda x: {"b": [[None, [0]]]}, batched=True, batch_size=1, remove_columns=["a"]) print(ds.to_pandas()) # before: # b # 0 [None, [0]] # 1 [[], [0]] # 2 [[], [0]] # 3 [[], [0]] # # now: # b # 0 [None, [0]] # 1 [None, [0]] # 2 [None, [0]] # 3 [None, [0]] ``` cc @sgugger
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4282/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4282/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/976
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/976/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/976/comments
https://api.github.com/repos/huggingface/datasets/issues/976/events
https://github.com/huggingface/datasets/pull/976
754,826,146
MDExOlB1bGxSZXF1ZXN0NTMwNjU1NzM5
976
Arabic pos dialect
{ "avatar_url": "https://avatars.githubusercontent.com/u/26722925?v=4", "events_url": "https://api.github.com/users/mcmillanmajora/events{/privacy}", "followers_url": "https://api.github.com/users/mcmillanmajora/followers", "following_url": "https://api.github.com/users/mcmillanmajora/following{/other_user}", "gists_url": "https://api.github.com/users/mcmillanmajora/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mcmillanmajora", "id": 26722925, "login": "mcmillanmajora", "node_id": "MDQ6VXNlcjI2NzIyOTI1", "organizations_url": "https://api.github.com/users/mcmillanmajora/orgs", "received_events_url": "https://api.github.com/users/mcmillanmajora/received_events", "repos_url": "https://api.github.com/users/mcmillanmajora/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mcmillanmajora/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mcmillanmajora/subscriptions", "type": "User", "url": "https://api.github.com/users/mcmillanmajora" }
[]
closed
false
null
[]
null
[ "looks like this PR includes changes about many other files than the oens for Araboc POS Dialect\r\n\r\nCan you create a another branch and another PR please ?", "Sorry! I'm not sure how I managed to do that. I'll make a new branch." ]
"2020-12-02T00:21:13Z"
"2020-12-09T17:30:32Z"
"2020-12-09T17:30:32Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/976.diff", "html_url": "https://github.com/huggingface/datasets/pull/976", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/976.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/976" }
A README.md and loading script for the Arabic POS Dialect dataset. The README is missing the sections on personal information, biases, and limitations, as it would probably be better for those to be filled by someone who can read the contents of the dataset and is familiar with Arabic NLP.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/976/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/976/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4657
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4657/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4657/comments
https://api.github.com/repos/huggingface/datasets/issues/4657/events
https://github.com/huggingface/datasets/issues/4657
1,296,743,133
I_kwDODunzps5NSrrd
4,657
Add SQuAD2.0 Dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/4755430?v=4", "events_url": "https://api.github.com/users/omarespejel/events{/privacy}", "followers_url": "https://api.github.com/users/omarespejel/followers", "following_url": "https://api.github.com/users/omarespejel/following{/other_user}", "gists_url": "https://api.github.com/users/omarespejel/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/omarespejel", "id": 4755430, "login": "omarespejel", "node_id": "MDQ6VXNlcjQ3NTU0MzA=", "organizations_url": "https://api.github.com/users/omarespejel/orgs", "received_events_url": "https://api.github.com/users/omarespejel/received_events", "repos_url": "https://api.github.com/users/omarespejel/repos", "site_admin": false, "starred_url": "https://api.github.com/users/omarespejel/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/omarespejel/subscriptions", "type": "User", "url": "https://api.github.com/users/omarespejel" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" } ]
closed
false
null
[]
null
[ "Hey, It's already present [here](https://huggingface.co/datasets/squad_v2) ", "Hi! This dataset is indeed already available on the Hub. Closing." ]
"2022-07-07T03:19:36Z"
"2022-07-12T16:14:52Z"
"2022-07-12T16:14:52Z"
NONE
null
null
null
## Adding a Dataset - **Name:** *SQuAD2.0* - **Description:** *Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span, from the corresponding reading passage, or the question might be unanswerable.* - **Paper:** *https://aclanthology.org/P18-2124.pdf* - **Data:** *https://rajpurkar.github.io/SQuAD-explorer/* - **Motivation:** *Dataset for training and evaluating models of conversational response*
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4657/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4657/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/1677
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1677/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1677/comments
https://api.github.com/repos/huggingface/datasets/issues/1677/events
https://github.com/huggingface/datasets/pull/1677
777,553,383
MDExOlB1bGxSZXF1ZXN0NTQ3ODE3ODI1
1,677
Switchboard Dialog Act Corpus added under `datasets/swda`
{ "avatar_url": "https://avatars.githubusercontent.com/u/22454783?v=4", "events_url": "https://api.github.com/users/gmihaila/events{/privacy}", "followers_url": "https://api.github.com/users/gmihaila/followers", "following_url": "https://api.github.com/users/gmihaila/following{/other_user}", "gists_url": "https://api.github.com/users/gmihaila/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/gmihaila", "id": 22454783, "login": "gmihaila", "node_id": "MDQ6VXNlcjIyNDU0Nzgz", "organizations_url": "https://api.github.com/users/gmihaila/orgs", "received_events_url": "https://api.github.com/users/gmihaila/received_events", "repos_url": "https://api.github.com/users/gmihaila/repos", "site_admin": false, "starred_url": "https://api.github.com/users/gmihaila/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gmihaila/subscriptions", "type": "User", "url": "https://api.github.com/users/gmihaila" }
[]
closed
false
null
[]
null
[ "Need to fix code formatting." ]
"2021-01-03T01:16:42Z"
"2021-01-03T02:55:57Z"
"2021-01-03T02:55:56Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1677.diff", "html_url": "https://github.com/huggingface/datasets/pull/1677", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1677.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1677" }
Pleased to announced that I added my first dataset **Switchboard Dialog Act Corpus**. I think this is an important datasets to be added since it is the only one related to dialogue act classification. Hope the pull request is ok. Wasn't able to see any special formatting for the pull request form. The Switchboard Dialog Act Corpus (SwDA) extends the Switchboard-1 Telephone Speech Corpus, Release 2, with turn/utterance-level dialog-act tags. The tags summarize syntactic, semantic, and pragmatic information about the associated turn. The SwDA project was undertaken at UC Boulder in the late 1990s. [webpage](http://compprag.christopherpotts.net/swda.html) [repo](https://github.com/NathanDuran/Switchboard-Corpus/raw/master/swda_data/) Please contact me for any support! All tests passed and followed all steps in the contribution guide!
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1677/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1677/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2419
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2419/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2419/comments
https://api.github.com/repos/huggingface/datasets/issues/2419/events
https://github.com/huggingface/datasets/pull/2419
904,347,339
MDExOlB1bGxSZXF1ZXN0NjU1NTA1OTM1
2,419
adds license information for DailyDialog.
{ "avatar_url": "https://avatars.githubusercontent.com/u/11574558?v=4", "events_url": "https://api.github.com/users/aditya2211/events{/privacy}", "followers_url": "https://api.github.com/users/aditya2211/followers", "following_url": "https://api.github.com/users/aditya2211/following{/other_user}", "gists_url": "https://api.github.com/users/aditya2211/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/aditya2211", "id": 11574558, "login": "aditya2211", "node_id": "MDQ6VXNlcjExNTc0NTU4", "organizations_url": "https://api.github.com/users/aditya2211/orgs", "received_events_url": "https://api.github.com/users/aditya2211/received_events", "repos_url": "https://api.github.com/users/aditya2211/repos", "site_admin": false, "starred_url": "https://api.github.com/users/aditya2211/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aditya2211/subscriptions", "type": "User", "url": "https://api.github.com/users/aditya2211" }
[]
closed
false
null
[]
null
[ "Thanks! Can you also add it as metadata in the YAML block at the top of the file?\r\n\r\nShould be in the form:\r\n\r\n```\r\nlicenses:\r\n- cc-by-sa-4.0\r\n```", "seems like we need to add all the other tags ? \r\n``` \r\nif error_messages:\r\n> raise ValueError(\"\\n\".join(error_messages))\r\nE ValueError: The following issues have been found in the dataset cards:\r\nE YAML tags:\r\nE __init__() missing 8 required positional arguments: 'annotations_creators', 'language_creators', 'languages', 'multilinguality', 'size_categories', 'source_datasets', 'task_categories', and 'task_ids'\r\n```", "I'll let @lhoestq or @yjernite chime in (and maybe complete/merge). Thanks!", "Looks like CircleCI has an incident. Let's wait for it to be working again and make sure the CI is green", "The remaining error is unrelated to this PR, merging" ]
"2021-05-27T23:03:42Z"
"2021-05-31T13:16:52Z"
"2021-05-31T13:16:52Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/2419.diff", "html_url": "https://github.com/huggingface/datasets/pull/2419", "merged_at": "2021-05-31T13:16:52Z", "patch_url": "https://github.com/huggingface/datasets/pull/2419.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/2419" }
{ "+1": 1, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 1, "url": "https://api.github.com/repos/huggingface/datasets/issues/2419/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2419/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3859
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3859/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3859/comments
https://api.github.com/repos/huggingface/datasets/issues/3859/events
https://github.com/huggingface/datasets/issues/3859
1,162,559,333
I_kwDODunzps5FSz9l
3,859
Unable to dowload big_patent (FileNotFoundError)
{ "avatar_url": "https://avatars.githubusercontent.com/u/25265140?v=4", "events_url": "https://api.github.com/users/slvcsl/events{/privacy}", "followers_url": "https://api.github.com/users/slvcsl/followers", "following_url": "https://api.github.com/users/slvcsl/following{/other_user}", "gists_url": "https://api.github.com/users/slvcsl/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/slvcsl", "id": 25265140, "login": "slvcsl", "node_id": "MDQ6VXNlcjI1MjY1MTQw", "organizations_url": "https://api.github.com/users/slvcsl/orgs", "received_events_url": "https://api.github.com/users/slvcsl/received_events", "repos_url": "https://api.github.com/users/slvcsl/repos", "site_admin": false, "starred_url": "https://api.github.com/users/slvcsl/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/slvcsl/subscriptions", "type": "User", "url": "https://api.github.com/users/slvcsl" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" }, { "color": "cfd3d7", "default": true, "description": "This issue or pull request already exists", "id": 1935892865, "name": "duplicate", "node_id": "MDU6TGFiZWwxOTM1ODkyODY1", "url": "https://api.github.com/repos/huggingface/datasets/labels/duplicate" } ]
closed
false
{ "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" }
[ { "avatar_url": "https://avatars.githubusercontent.com/u/8515462?v=4", "events_url": "https://api.github.com/users/albertvillanova/events{/privacy}", "followers_url": "https://api.github.com/users/albertvillanova/followers", "following_url": "https://api.github.com/users/albertvillanova/following{/other_user}", "gists_url": "https://api.github.com/users/albertvillanova/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/albertvillanova", "id": 8515462, "login": "albertvillanova", "node_id": "MDQ6VXNlcjg1MTU0NjI=", "organizations_url": "https://api.github.com/users/albertvillanova/orgs", "received_events_url": "https://api.github.com/users/albertvillanova/received_events", "repos_url": "https://api.github.com/users/albertvillanova/repos", "site_admin": false, "starred_url": "https://api.github.com/users/albertvillanova/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/albertvillanova/subscriptions", "type": "User", "url": "https://api.github.com/users/albertvillanova" } ]
null
[ "Hi @slvcsl, thanks for reporting.\r\n\r\nYesterday we just made a patch release of our `datasets` library that fixes this issue: version 1.18.4.\r\nhttps://pypi.org/project/datasets/#history\r\n\r\nPlease, feel free to update `datasets` library to the latest version: \r\n```shell\r\npip install -U datasets\r\n```\r\nAnd then you should force redownload of the data file to update your local cache: \r\n```python\r\nds = load_dataset(\"big_patent\", \"g\", split=\"validation\", download_mode=\"force_redownload\")\r\n```\r\n- Note that before the fix, you just downloaded and cached the Google Drive virus scan warning page, instead of the data file\r\n\r\nThis issue was already reported \r\n- #3784\r\n\r\nand its root cause is a change in the Google Drive service. See:\r\n- #3786 \r\n\r\nWe already fixed it. See:\r\n- #3787 \r\n" ]
"2022-03-08T11:47:12Z"
"2022-03-08T13:04:09Z"
"2022-03-08T13:04:04Z"
NONE
null
null
null
## Describe the bug I am trying to download some splits of the big_patent dataset, using the following code: `ds = load_dataset("big_patent", "g", split="validation", download_mode="force_redownload") ` However, this leads to a FileNotFoundError. FileNotFoundError Traceback (most recent call last) [<ipython-input-3-8d8a745706a9>](https://localhost:8080/#) in <module>() 1 from datasets import load_dataset ----> 2 ds = load_dataset("big_patent", "g", split="validation", download_mode="force_redownload") 8 frames [/usr/local/lib/python3.7/dist-packages/datasets/load.py](https://localhost:8080/#) in load_dataset(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, script_version, **config_kwargs) 1705 ignore_verifications=ignore_verifications, 1706 try_from_hf_gcs=try_from_hf_gcs, -> 1707 use_auth_token=use_auth_token, 1708 ) 1709 [/usr/local/lib/python3.7/dist-packages/datasets/builder.py](https://localhost:8080/#) in download_and_prepare(self, download_config, download_mode, ignore_verifications, try_from_hf_gcs, dl_manager, base_path, use_auth_token, **download_and_prepare_kwargs) 593 if not downloaded_from_gcs: 594 self._download_and_prepare( --> 595 dl_manager=dl_manager, verify_infos=verify_infos, **download_and_prepare_kwargs 596 ) 597 # Sync info [/usr/local/lib/python3.7/dist-packages/datasets/builder.py](https://localhost:8080/#) in _download_and_prepare(self, dl_manager, verify_infos, **prepare_split_kwargs) 659 split_dict = SplitDict(dataset_name=self.name) 660 split_generators_kwargs = self._make_split_generators_kwargs(prepare_split_kwargs) --> 661 split_generators = self._split_generators(dl_manager, **split_generators_kwargs) 662 663 # Checksums verification [/root/.cache/huggingface/modules/datasets_modules/datasets/big_patent/bdefa7c0b39fba8bba1c6331b70b738e30d63c8ad4567f983ce315a5fef6131c/big_patent.py](https://localhost:8080/#) in _split_generators(self, dl_manager) 123 split_types = ["train", "val", "test"] 124 extract_paths = dl_manager.extract( --> 125 {k: os.path.join(dl_path, "bigPatentData", k + ".tar.gz") for k in split_types} 126 ) 127 extract_paths = {k: os.path.join(extract_paths[k], k) for k in split_types} [/usr/local/lib/python3.7/dist-packages/datasets/utils/download_manager.py](https://localhost:8080/#) in extract(self, path_or_paths, num_proc) 282 download_config.extract_compressed_file = True 283 extracted_paths = map_nested( --> 284 partial(cached_path, download_config=download_config), path_or_paths, num_proc=num_proc, disable_tqdm=False 285 ) 286 path_or_paths = NestedDataStructure(path_or_paths) [/usr/local/lib/python3.7/dist-packages/datasets/utils/py_utils.py](https://localhost:8080/#) in map_nested(function, data_struct, dict_only, map_list, map_tuple, map_numpy, num_proc, types, disable_tqdm) 260 mapped = [ 261 _single_map_nested((function, obj, types, None, True)) --> 262 for obj in utils.tqdm(iterable, disable=disable_tqdm) 263 ] 264 else: [/usr/local/lib/python3.7/dist-packages/datasets/utils/py_utils.py](https://localhost:8080/#) in <listcomp>(.0) 260 mapped = [ 261 _single_map_nested((function, obj, types, None, True)) --> 262 for obj in utils.tqdm(iterable, disable=disable_tqdm) 263 ] 264 else: [/usr/local/lib/python3.7/dist-packages/datasets/utils/py_utils.py](https://localhost:8080/#) in _single_map_nested(args) 194 # Singleton first to spare some computation 195 if not isinstance(data_struct, dict) and not isinstance(data_struct, types): --> 196 return function(data_struct) 197 198 # Reduce logging to keep things readable in multiprocessing with tqdm [/usr/local/lib/python3.7/dist-packages/datasets/utils/file_utils.py](https://localhost:8080/#) in cached_path(url_or_filename, download_config, **download_kwargs) 314 elif is_local_path(url_or_filename): 315 # File, but it doesn't exist. --> 316 raise FileNotFoundError(f"Local file {url_or_filename} doesn't exist") 317 else: 318 # Something unknown FileNotFoundError: Local file /root/.cache/huggingface/datasets/downloads/extracted/ad068abb3e11f9f2f5440b62e37eb2b03ee515df9de1637c55cd1793b68668b2/bigPatentData/train.tar.gz doesn't exist I have tried this in a number of machines, including on Colab, so I think this is not environment dependent. How do I load the bigPatent dataset?
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3859/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3859/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/5072
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/5072/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/5072/comments
https://api.github.com/repos/huggingface/datasets/issues/5072/events
https://github.com/huggingface/datasets/pull/5072
1,397,765,531
PR_kwDODunzps5ANoo5
5,072
Image & Audio formatting for numpy/torch/tf/jax
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "I just added a consolidation step so that numpy arrays or tensors of images are stacked together if the shapes match, instead of having lists of tensors\r\n\r\nFeel free to review @mariosasko :)", "I added a few lines in the docs and reverted the ragged numpy array change :)\r\n\r\nready for another review @mariosasko !" ]
"2022-10-05T13:07:03Z"
"2022-10-10T13:24:10Z"
"2022-10-10T13:21:32Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/5072.diff", "html_url": "https://github.com/huggingface/datasets/pull/5072", "merged_at": "2022-10-10T13:21:32Z", "patch_url": "https://github.com/huggingface/datasets/pull/5072.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/5072" }
Added support for image and audio formatting for numpy, torch, tf and jax. For images, the dtype used is the one of the image (the one returned by PIL.Image), e.g. uint8 I also added support for string, binary and None types. In particular for torch and jax, strings are kept unchanged (previously it was returning an error because you can't create a tensor of strings)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/5072/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/5072/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/2850
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/2850/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/2850/comments
https://api.github.com/repos/huggingface/datasets/issues/2850/events
https://github.com/huggingface/datasets/issues/2850
982,654,644
MDU6SXNzdWU5ODI2NTQ2NDQ=
2,850
Wound segmentation datasets
{ "avatar_url": "https://avatars.githubusercontent.com/u/7246357?v=4", "events_url": "https://api.github.com/users/osanseviero/events{/privacy}", "followers_url": "https://api.github.com/users/osanseviero/followers", "following_url": "https://api.github.com/users/osanseviero/following{/other_user}", "gists_url": "https://api.github.com/users/osanseviero/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/osanseviero", "id": 7246357, "login": "osanseviero", "node_id": "MDQ6VXNlcjcyNDYzNTc=", "organizations_url": "https://api.github.com/users/osanseviero/orgs", "received_events_url": "https://api.github.com/users/osanseviero/received_events", "repos_url": "https://api.github.com/users/osanseviero/repos", "site_admin": false, "starred_url": "https://api.github.com/users/osanseviero/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/osanseviero/subscriptions", "type": "User", "url": "https://api.github.com/users/osanseviero" }
[ { "color": "e99695", "default": false, "description": "Requesting to add a new dataset", "id": 2067376369, "name": "dataset request", "node_id": "MDU6TGFiZWwyMDY3Mzc2MzY5", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20request" }, { "color": "bfdadc", "default": false, "description": "Vision datasets", "id": 3608941089, "name": "vision", "node_id": "LA_kwDODunzps7XHBIh", "url": "https://api.github.com/repos/huggingface/datasets/labels/vision" } ]
open
false
null
[]
null
[]
"2021-08-30T10:44:32Z"
"2021-12-08T12:02:00Z"
null
MEMBER
null
null
null
## Adding a Dataset - **Name:** Wound segmentation datasets - **Description:** annotated wound image dataset - **Paper:** https://www.nature.com/articles/s41598-020-78799-w - **Data:** https://github.com/uwm-bigdata/wound-segmentation - **Motivation:** Interesting simple image dataset, useful for segmentation, with visibility due to http://www.miccai.org/special-interest-groups/challenges/ and https://fusc.grand-challenge.org/ Instructions to add a new dataset can be found [here](https://github.com/huggingface/datasets/blob/master/ADD_NEW_DATASET.md).
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/2850/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/2850/timeline
null
null
false
https://api.github.com/repos/huggingface/datasets/issues/6448
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/6448/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/6448/comments
https://api.github.com/repos/huggingface/datasets/issues/6448/events
https://github.com/huggingface/datasets/pull/6448
2,008,614,985
PR_kwDODunzps5gQBsE
6,448
Use parquet export if possible
{ "avatar_url": "https://avatars.githubusercontent.com/u/42851186?v=4", "events_url": "https://api.github.com/users/lhoestq/events{/privacy}", "followers_url": "https://api.github.com/users/lhoestq/followers", "following_url": "https://api.github.com/users/lhoestq/following{/other_user}", "gists_url": "https://api.github.com/users/lhoestq/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/lhoestq", "id": 42851186, "login": "lhoestq", "node_id": "MDQ6VXNlcjQyODUxMTg2", "organizations_url": "https://api.github.com/users/lhoestq/orgs", "received_events_url": "https://api.github.com/users/lhoestq/received_events", "repos_url": "https://api.github.com/users/lhoestq/repos", "site_admin": false, "starred_url": "https://api.github.com/users/lhoestq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lhoestq/subscriptions", "type": "User", "url": "https://api.github.com/users/lhoestq" }
[]
closed
false
null
[]
null
[ "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005177 / 0.011353 (-0.006176) | 0.003002 / 0.011008 (-0.008006) | 0.061915 / 0.038508 (0.023407) | 0.052065 / 0.023109 (0.028956) | 0.246114 / 0.275898 (-0.029784) | 0.273974 / 0.323480 (-0.049506) | 0.002983 / 0.007986 (-0.005003) | 0.002444 / 0.004328 (-0.001885) | 0.048424 / 0.004250 (0.044174) | 0.039609 / 0.037052 (0.002557) | 0.257771 / 0.258489 (-0.000718) | 0.286228 / 0.293841 (-0.007613) | 0.023925 / 0.128546 (-0.104621) | 0.007248 / 0.075646 (-0.068398) | 0.202205 / 0.419271 (-0.217067) | 0.037124 / 0.043533 (-0.006409) | 0.254872 / 0.255139 (-0.000267) | 0.275252 / 0.283200 (-0.007947) | 0.019251 / 0.141683 (-0.122432) | 1.074921 / 1.452155 (-0.377234) | 1.146515 / 1.492716 (-0.346202) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091998 / 0.018006 (0.073992) | 0.299146 / 0.000490 (0.298656) | 0.000240 / 0.000200 (0.000040) | 0.000054 / 0.000054 (0.000000) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019266 / 0.037411 (-0.018145) | 0.062560 / 0.014526 (0.048034) | 0.075012 / 0.176557 (-0.101544) | 0.120077 / 0.737135 (-0.617058) | 0.077851 / 0.296338 (-0.218488) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290629 / 0.215209 (0.075420) | 2.823847 / 2.077655 (0.746192) | 1.516966 / 1.504120 (0.012846) | 1.393383 / 1.541195 (-0.147812) | 1.427688 / 1.468490 (-0.040802) | 0.407456 / 4.584777 (-4.177321) | 2.378280 / 3.745712 (-1.367433) | 2.689800 / 5.269862 (-2.580061) | 1.588037 / 4.565676 (-2.977640) | 0.045837 / 0.424275 (-0.378438) | 0.004884 / 0.007607 (-0.002724) | 0.340464 / 0.226044 (0.114420) | 3.377158 / 2.268929 (1.108230) | 1.897854 / 55.444624 (-53.546771) | 1.588285 / 6.876477 (-5.288191) | 1.651708 / 2.142072 (-0.490364) | 0.482018 / 4.805227 (-4.323209) | 0.101583 / 6.500664 (-6.399081) | 0.042306 / 0.075469 (-0.033163) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.948659 / 1.841788 (-0.893128) | 11.809778 / 8.074308 (3.735470) | 10.481896 / 10.191392 (0.290504) | 0.143538 / 0.680424 (-0.536885) | 0.014105 / 0.534201 (-0.520096) | 0.272278 / 0.579283 (-0.307005) | 0.264241 / 0.434364 (-0.170123) | 0.307187 / 0.540337 (-0.233150) | 0.401270 / 1.386936 (-0.985666) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.004831 / 0.011353 (-0.006521) | 0.002896 / 0.011008 (-0.008112) | 0.047479 / 0.038508 (0.008971) | 0.050665 / 0.023109 (0.027555) | 0.275243 / 0.275898 (-0.000655) | 0.296547 / 0.323480 (-0.026933) | 0.004022 / 0.007986 (-0.003963) | 0.002425 / 0.004328 (-0.001904) | 0.047086 / 0.004250 (0.042836) | 0.039611 / 0.037052 (0.002558) | 0.275272 / 0.258489 (0.016783) | 0.302429 / 0.293841 (0.008588) | 0.024308 / 0.128546 (-0.104238) | 0.007167 / 0.075646 (-0.068479) | 0.052825 / 0.419271 (-0.366446) | 0.032319 / 0.043533 (-0.011213) | 0.273334 / 0.255139 (0.018195) | 0.291161 / 0.283200 (0.007961) | 0.017918 / 0.141683 (-0.123764) | 1.110005 / 1.452155 (-0.342150) | 1.176616 / 1.492716 (-0.316100) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092478 / 0.018006 (0.074471) | 0.311431 / 0.000490 (0.310942) | 0.000237 / 0.000200 (0.000037) | 0.000059 / 0.000054 (0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021979 / 0.037411 (-0.015432) | 0.080617 / 0.014526 (0.066091) | 0.081534 / 0.176557 (-0.095023) | 0.121073 / 0.737135 (-0.616062) | 0.083235 / 0.296338 (-0.213104) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289527 / 0.215209 (0.074318) | 2.839668 / 2.077655 (0.762013) | 1.601737 / 1.504120 (0.097617) | 1.496028 / 1.541195 (-0.045167) | 1.511933 / 1.468490 (0.043443) | 0.399819 / 4.584777 (-4.184958) | 2.394147 / 3.745712 (-1.351565) | 2.520767 / 5.269862 (-2.749095) | 1.589496 / 4.565676 (-2.976180) | 0.046673 / 0.424275 (-0.377602) | 0.004858 / 0.007607 (-0.002749) | 0.357986 / 0.226044 (0.131941) | 3.376217 / 2.268929 (1.107289) | 1.981853 / 55.444624 (-53.462771) | 1.682240 / 6.876477 (-5.194236) | 1.830643 / 2.142072 (-0.311429) | 0.478286 / 4.805227 (-4.326941) | 0.099589 / 6.500664 (-6.401075) | 0.041173 / 0.075469 (-0.034296) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.985160 / 1.841788 (-0.856628) | 12.312963 / 8.074308 (4.238655) | 10.577225 / 10.191392 (0.385833) | 0.130167 / 0.680424 (-0.550257) | 0.016657 / 0.534201 (-0.517544) | 0.271330 / 0.579283 (-0.307953) | 0.276979 / 0.434364 (-0.157385) | 0.304904 / 0.540337 (-0.235434) | 0.412090 / 1.386936 (-0.974846) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1adc80151e892122ecb60f4e0b4572b136b2dd47 \"CML watermark\")\n", "The docs for this PR live [here](https://moon-ci-docs.huggingface.co/docs/datasets/pr_6448). All of your documentation changes will be reflected on that endpoint.", "hooray! very excited about this", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005039 / 0.011353 (-0.006314) | 0.003577 / 0.011008 (-0.007431) | 0.062892 / 0.038508 (0.024384) | 0.056334 / 0.023109 (0.033225) | 0.252281 / 0.275898 (-0.023617) | 0.274945 / 0.323480 (-0.048535) | 0.003906 / 0.007986 (-0.004080) | 0.002483 / 0.004328 (-0.001845) | 0.049006 / 0.004250 (0.044756) | 0.038375 / 0.037052 (0.001323) | 0.257376 / 0.258489 (-0.001113) | 0.292512 / 0.293841 (-0.001328) | 0.027134 / 0.128546 (-0.101412) | 0.010579 / 0.075646 (-0.065068) | 0.212021 / 0.419271 (-0.207250) | 0.035851 / 0.043533 (-0.007682) | 0.258076 / 0.255139 (0.002937) | 0.271758 / 0.283200 (-0.011442) | 0.018222 / 0.141683 (-0.123461) | 1.120481 / 1.452155 (-0.331674) | 1.187007 / 1.492716 (-0.305710) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094986 / 0.018006 (0.076980) | 0.302121 / 0.000490 (0.301631) | 0.000211 / 0.000200 (0.000011) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019260 / 0.037411 (-0.018152) | 0.062909 / 0.014526 (0.048383) | 0.075644 / 0.176557 (-0.100912) | 0.120966 / 0.737135 (-0.616170) | 0.076678 / 0.296338 (-0.219661) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286754 / 0.215209 (0.071545) | 2.797467 / 2.077655 (0.719812) | 1.436798 / 1.504120 (-0.067322) | 1.315032 / 1.541195 (-0.226163) | 1.367841 / 1.468490 (-0.100649) | 0.578917 / 4.584777 (-4.005860) | 2.439773 / 3.745712 (-1.305939) | 2.932779 / 5.269862 (-2.337082) | 1.843895 / 4.565676 (-2.721782) | 0.063351 / 0.424275 (-0.360925) | 0.004998 / 0.007607 (-0.002610) | 0.347385 / 0.226044 (0.121340) | 3.449969 / 2.268929 (1.181040) | 1.857734 / 55.444624 (-53.586890) | 1.541341 / 6.876477 (-5.335136) | 1.574915 / 2.142072 (-0.567158) | 0.660178 / 4.805227 (-4.145049) | 0.117686 / 6.500664 (-6.382978) | 0.042602 / 0.075469 (-0.032867) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.937735 / 1.841788 (-0.904052) | 11.962091 / 8.074308 (3.887783) | 10.401715 / 10.191392 (0.210323) | 0.142200 / 0.680424 (-0.538224) | 0.014137 / 0.534201 (-0.520064) | 0.289853 / 0.579283 (-0.289430) | 0.267100 / 0.434364 (-0.167264) | 0.323401 / 0.540337 (-0.216936) | 0.418665 / 1.386936 (-0.968271) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005480 / 0.011353 (-0.005873) | 0.003401 / 0.011008 (-0.007607) | 0.049304 / 0.038508 (0.010796) | 0.062043 / 0.023109 (0.038934) | 0.270571 / 0.275898 (-0.005327) | 0.295226 / 0.323480 (-0.028254) | 0.004152 / 0.007986 (-0.003834) | 0.002511 / 0.004328 (-0.001817) | 0.048480 / 0.004250 (0.044229) | 0.043964 / 0.037052 (0.006912) | 0.273545 / 0.258489 (0.015056) | 0.295152 / 0.293841 (0.001311) | 0.029224 / 0.128546 (-0.099322) | 0.010629 / 0.075646 (-0.065018) | 0.057433 / 0.419271 (-0.361839) | 0.033115 / 0.043533 (-0.010418) | 0.269893 / 0.255139 (0.014754) | 0.288658 / 0.283200 (0.005459) | 0.018216 / 0.141683 (-0.123467) | 1.123039 / 1.452155 (-0.329116) | 1.182892 / 1.492716 (-0.309825) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095948 / 0.018006 (0.077942) | 0.305811 / 0.000490 (0.305321) | 0.000221 / 0.000200 (0.000021) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022996 / 0.037411 (-0.014415) | 0.073836 / 0.014526 (0.059310) | 0.082658 / 0.176557 (-0.093899) | 0.121970 / 0.737135 (-0.615166) | 0.086096 / 0.296338 (-0.210242) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291032 / 0.215209 (0.075823) | 2.864613 / 2.077655 (0.786958) | 1.567530 / 1.504120 (0.063410) | 1.460291 / 1.541195 (-0.080903) | 1.527066 / 1.468490 (0.058576) | 0.571160 / 4.584777 (-4.013617) | 2.465261 / 3.745712 (-1.280451) | 2.915547 / 5.269862 (-2.354314) | 1.835822 / 4.565676 (-2.729855) | 0.064328 / 0.424275 (-0.359947) | 0.005061 / 0.007607 (-0.002546) | 0.357105 / 0.226044 (0.131061) | 3.491363 / 2.268929 (1.222435) | 1.943213 / 55.444624 (-53.501412) | 1.675778 / 6.876477 (-5.200699) | 1.719016 / 2.142072 (-0.423057) | 0.658993 / 4.805227 (-4.146235) | 0.122320 / 6.500664 (-6.378344) | 0.049030 / 0.075469 (-0.026439) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.964762 / 1.841788 (-0.877025) | 12.367251 / 8.074308 (4.292943) | 10.886213 / 10.191392 (0.694821) | 0.141533 / 0.680424 (-0.538891) | 0.015646 / 0.534201 (-0.518555) | 0.288583 / 0.579283 (-0.290700) | 0.280353 / 0.434364 (-0.154010) | 0.329095 / 0.540337 (-0.211242) | 0.565118 / 1.386936 (-0.821818) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#493bf695dc3ee6cc81bfd0aae6a38f70547bb752 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.006475 / 0.011353 (-0.004878) | 0.004080 / 0.011008 (-0.006928) | 0.066479 / 0.038508 (0.027971) | 0.073270 / 0.023109 (0.050161) | 0.244412 / 0.275898 (-0.031486) | 0.273778 / 0.323480 (-0.049702) | 0.003186 / 0.007986 (-0.004800) | 0.003419 / 0.004328 (-0.000910) | 0.049743 / 0.004250 (0.045492) | 0.043581 / 0.037052 (0.006529) | 0.248215 / 0.258489 (-0.010274) | 0.280873 / 0.293841 (-0.012967) | 0.029282 / 0.128546 (-0.099264) | 0.011241 / 0.075646 (-0.064405) | 0.215031 / 0.419271 (-0.204241) | 0.038764 / 0.043533 (-0.004769) | 0.259363 / 0.255139 (0.004224) | 0.279253 / 0.283200 (-0.003946) | 0.019524 / 0.141683 (-0.122159) | 1.104735 / 1.452155 (-0.347420) | 1.159823 / 1.492716 (-0.332894) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.108383 / 0.018006 (0.090377) | 0.332904 / 0.000490 (0.332415) | 0.000222 / 0.000200 (0.000022) | 0.000065 / 0.000054 (0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020693 / 0.037411 (-0.016719) | 0.071764 / 0.014526 (0.057238) | 0.077073 / 0.176557 (-0.099484) | 0.124604 / 0.737135 (-0.612532) | 0.078057 / 0.296338 (-0.218282) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.291014 / 0.215209 (0.075805) | 2.865885 / 2.077655 (0.788231) | 1.506141 / 1.504120 (0.002021) | 1.435924 / 1.541195 (-0.105271) | 1.461994 / 1.468490 (-0.006497) | 0.571779 / 4.584777 (-4.012998) | 2.461950 / 3.745712 (-1.283762) | 3.079771 / 5.269862 (-2.190091) | 1.933337 / 4.565676 (-2.632339) | 0.063405 / 0.424275 (-0.360870) | 0.005203 / 0.007607 (-0.002404) | 0.345077 / 0.226044 (0.119032) | 3.487189 / 2.268929 (1.218261) | 1.903733 / 55.444624 (-53.540891) | 1.705596 / 6.876477 (-5.170880) | 1.718849 / 2.142072 (-0.423223) | 0.658745 / 4.805227 (-4.146482) | 0.120847 / 6.500664 (-6.379817) | 0.045670 / 0.075469 (-0.029799) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.965969 / 1.841788 (-0.875819) | 13.520489 / 8.074308 (5.446181) | 12.322363 / 10.191392 (2.130971) | 0.146605 / 0.680424 (-0.533819) | 0.015061 / 0.534201 (-0.519140) | 0.298125 / 0.579283 (-0.281159) | 0.276864 / 0.434364 (-0.157500) | 0.326787 / 0.540337 (-0.213550) | 0.436897 / 1.386936 (-0.950039) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005862 / 0.011353 (-0.005491) | 0.003716 / 0.011008 (-0.007292) | 0.052849 / 0.038508 (0.014341) | 0.072114 / 0.023109 (0.049005) | 0.277800 / 0.275898 (0.001902) | 0.325321 / 0.323480 (0.001841) | 0.004428 / 0.007986 (-0.003557) | 0.002527 / 0.004328 (-0.001801) | 0.048847 / 0.004250 (0.044596) | 0.047355 / 0.037052 (0.010303) | 0.279331 / 0.258489 (0.020842) | 0.310477 / 0.293841 (0.016636) | 0.029661 / 0.128546 (-0.098886) | 0.010812 / 0.075646 (-0.064834) | 0.059803 / 0.419271 (-0.359469) | 0.033554 / 0.043533 (-0.009978) | 0.276890 / 0.255139 (0.021751) | 0.308911 / 0.283200 (0.025712) | 0.020752 / 0.141683 (-0.120931) | 1.120896 / 1.452155 (-0.331259) | 1.186428 / 1.492716 (-0.306288) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.106551 / 0.018006 (0.088545) | 0.354455 / 0.000490 (0.353966) | 0.000353 / 0.000200 (0.000153) | 0.000069 / 0.000054 (0.000015) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.023488 / 0.037411 (-0.013923) | 0.080548 / 0.014526 (0.066022) | 0.084431 / 0.176557 (-0.092126) | 0.140698 / 0.737135 (-0.596438) | 0.085692 / 0.296338 (-0.210647) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.314253 / 0.215209 (0.099044) | 2.993236 / 2.077655 (0.915582) | 1.639013 / 1.504120 (0.134893) | 1.543966 / 1.541195 (0.002771) | 1.567732 / 1.468490 (0.099242) | 0.565857 / 4.584777 (-4.018920) | 2.545339 / 3.745712 (-1.200373) | 3.134546 / 5.269862 (-2.135316) | 1.940350 / 4.565676 (-2.625326) | 0.063847 / 0.424275 (-0.360429) | 0.005079 / 0.007607 (-0.002528) | 0.365762 / 0.226044 (0.139718) | 3.610921 / 2.268929 (1.341993) | 2.035151 / 55.444624 (-53.409473) | 1.773409 / 6.876477 (-5.103068) | 1.790332 / 2.142072 (-0.351741) | 0.683019 / 4.805227 (-4.122209) | 0.119566 / 6.500664 (-6.381099) | 0.043578 / 0.075469 (-0.031891) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.996568 / 1.841788 (-0.845219) | 14.094366 / 8.074308 (6.020058) | 12.433600 / 10.191392 (2.242208) | 0.139835 / 0.680424 (-0.540589) | 0.016454 / 0.534201 (-0.517747) | 0.294073 / 0.579283 (-0.285210) | 0.309032 / 0.434364 (-0.125332) | 0.330699 / 0.540337 (-0.209638) | 0.619392 / 1.386936 (-0.767544) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#026fbce1c93a30188b6d0646bb975da8f56e2a2f \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005389 / 0.011353 (-0.005964) | 0.003209 / 0.011008 (-0.007799) | 0.061610 / 0.038508 (0.023102) | 0.049781 / 0.023109 (0.026672) | 0.240208 / 0.275898 (-0.035690) | 0.263307 / 0.323480 (-0.060173) | 0.002908 / 0.007986 (-0.005078) | 0.002375 / 0.004328 (-0.001953) | 0.047462 / 0.004250 (0.043212) | 0.038643 / 0.037052 (0.001591) | 0.246287 / 0.258489 (-0.012202) | 0.278715 / 0.293841 (-0.015126) | 0.027507 / 0.128546 (-0.101039) | 0.010168 / 0.075646 (-0.065479) | 0.204131 / 0.419271 (-0.215140) | 0.035452 / 0.043533 (-0.008081) | 0.251721 / 0.255139 (-0.003418) | 0.266642 / 0.283200 (-0.016558) | 0.017741 / 0.141683 (-0.123942) | 1.094672 / 1.452155 (-0.357482) | 1.162715 / 1.492716 (-0.330002) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092154 / 0.018006 (0.074148) | 0.301376 / 0.000490 (0.300886) | 0.000217 / 0.000200 (0.000017) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018534 / 0.037411 (-0.018877) | 0.061995 / 0.014526 (0.047469) | 0.072654 / 0.176557 (-0.103903) | 0.119501 / 0.737135 (-0.617635) | 0.073756 / 0.296338 (-0.222583) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280066 / 0.215209 (0.064857) | 2.744207 / 2.077655 (0.666553) | 1.483367 / 1.504120 (-0.020753) | 1.386173 / 1.541195 (-0.155022) | 1.381833 / 1.468490 (-0.086657) | 0.552780 / 4.584777 (-4.031997) | 2.395541 / 3.745712 (-1.350171) | 2.747507 / 5.269862 (-2.522355) | 1.735074 / 4.565676 (-2.830602) | 0.062096 / 0.424275 (-0.362179) | 0.004905 / 0.007607 (-0.002702) | 0.338327 / 0.226044 (0.112283) | 3.365391 / 2.268929 (1.096462) | 1.839663 / 55.444624 (-53.604961) | 1.577535 / 6.876477 (-5.298942) | 1.558054 / 2.142072 (-0.584018) | 0.636520 / 4.805227 (-4.168708) | 0.116182 / 6.500664 (-6.384482) | 0.042078 / 0.075469 (-0.033391) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.938512 / 1.841788 (-0.903276) | 11.455749 / 8.074308 (3.381441) | 10.510985 / 10.191392 (0.319593) | 0.140865 / 0.680424 (-0.539559) | 0.014073 / 0.534201 (-0.520128) | 0.294747 / 0.579283 (-0.284536) | 0.266147 / 0.434364 (-0.168217) | 0.325354 / 0.540337 (-0.214984) | 0.422182 / 1.386936 (-0.964754) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005231 / 0.011353 (-0.006122) | 0.003032 / 0.011008 (-0.007977) | 0.049608 / 0.038508 (0.011099) | 0.051441 / 0.023109 (0.028332) | 0.273812 / 0.275898 (-0.002086) | 0.294318 / 0.323480 (-0.029162) | 0.003958 / 0.007986 (-0.004028) | 0.002384 / 0.004328 (-0.001944) | 0.047942 / 0.004250 (0.043691) | 0.039179 / 0.037052 (0.002127) | 0.277504 / 0.258489 (0.019014) | 0.299713 / 0.293841 (0.005872) | 0.028989 / 0.128546 (-0.099557) | 0.010267 / 0.075646 (-0.065379) | 0.058318 / 0.419271 (-0.360954) | 0.032214 / 0.043533 (-0.011318) | 0.277964 / 0.255139 (0.022825) | 0.293055 / 0.283200 (0.009856) | 0.018532 / 0.141683 (-0.123151) | 1.128620 / 1.452155 (-0.323535) | 1.187365 / 1.492716 (-0.305351) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092137 / 0.018006 (0.074130) | 0.299726 / 0.000490 (0.299236) | 0.000222 / 0.000200 (0.000022) | 0.000050 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021342 / 0.037411 (-0.016070) | 0.069943 / 0.014526 (0.055417) | 0.079862 / 0.176557 (-0.096694) | 0.118917 / 0.737135 (-0.618218) | 0.081861 / 0.296338 (-0.214477) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295883 / 0.215209 (0.080674) | 2.881640 / 2.077655 (0.803986) | 1.597705 / 1.504120 (0.093585) | 1.473220 / 1.541195 (-0.067975) | 1.501006 / 1.468490 (0.032516) | 0.559409 / 4.584777 (-4.025368) | 2.442709 / 3.745712 (-1.303003) | 2.742139 / 5.269862 (-2.527723) | 1.726002 / 4.565676 (-2.839674) | 0.062436 / 0.424275 (-0.361840) | 0.004896 / 0.007607 (-0.002711) | 0.349203 / 0.226044 (0.123159) | 3.435175 / 2.268929 (1.166247) | 1.954888 / 55.444624 (-53.489737) | 1.666233 / 6.876477 (-5.210243) | 1.680852 / 2.142072 (-0.461221) | 0.644271 / 4.805227 (-4.160956) | 0.115160 / 6.500664 (-6.385504) | 0.040681 / 0.075469 (-0.034788) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963810 / 1.841788 (-0.877977) | 11.860860 / 8.074308 (3.786552) | 10.541703 / 10.191392 (0.350311) | 0.131532 / 0.680424 (-0.548892) | 0.016790 / 0.534201 (-0.517411) | 0.286695 / 0.579283 (-0.292588) | 0.279628 / 0.434364 (-0.154735) | 0.324622 / 0.540337 (-0.215715) | 0.535507 / 1.386936 (-0.851429) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#11217347e4bcfe1aaf794d164a5dd9f085b2f682 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005672 / 0.011353 (-0.005681) | 0.003411 / 0.011008 (-0.007597) | 0.062528 / 0.038508 (0.024020) | 0.055209 / 0.023109 (0.032100) | 0.248366 / 0.275898 (-0.027532) | 0.279522 / 0.323480 (-0.043957) | 0.002907 / 0.007986 (-0.005079) | 0.002369 / 0.004328 (-0.001959) | 0.047982 / 0.004250 (0.043731) | 0.039009 / 0.037052 (0.001956) | 0.256422 / 0.258489 (-0.002067) | 0.288530 / 0.293841 (-0.005311) | 0.028164 / 0.128546 (-0.100382) | 0.010448 / 0.075646 (-0.065198) | 0.208863 / 0.419271 (-0.210408) | 0.036291 / 0.043533 (-0.007242) | 0.251642 / 0.255139 (-0.003497) | 0.275589 / 0.283200 (-0.007610) | 0.019839 / 0.141683 (-0.121844) | 1.092800 / 1.452155 (-0.359355) | 1.147950 / 1.492716 (-0.344766) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094920 / 0.018006 (0.076914) | 0.303049 / 0.000490 (0.302559) | 0.000199 / 0.000200 (-0.000001) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018820 / 0.037411 (-0.018591) | 0.063319 / 0.014526 (0.048793) | 0.073644 / 0.176557 (-0.102912) | 0.120045 / 0.737135 (-0.617091) | 0.076219 / 0.296338 (-0.220119) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.283897 / 0.215209 (0.068688) | 2.822836 / 2.077655 (0.745182) | 1.490505 / 1.504120 (-0.013615) | 1.359777 / 1.541195 (-0.181418) | 1.420536 / 1.468490 (-0.047954) | 0.562308 / 4.584777 (-4.022469) | 2.419249 / 3.745712 (-1.326463) | 2.827620 / 5.269862 (-2.442241) | 1.783171 / 4.565676 (-2.782505) | 0.063206 / 0.424275 (-0.361069) | 0.004966 / 0.007607 (-0.002641) | 0.339647 / 0.226044 (0.113602) | 3.378157 / 2.268929 (1.109229) | 1.873221 / 55.444624 (-53.571403) | 1.606367 / 6.876477 (-5.270109) | 1.624976 / 2.142072 (-0.517096) | 0.652653 / 4.805227 (-4.152574) | 0.117997 / 6.500664 (-6.382667) | 0.041955 / 0.075469 (-0.033514) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.961420 / 1.841788 (-0.880368) | 11.807624 / 8.074308 (3.733316) | 10.668249 / 10.191392 (0.476857) | 0.141855 / 0.680424 (-0.538569) | 0.014451 / 0.534201 (-0.519750) | 0.289706 / 0.579283 (-0.289577) | 0.268392 / 0.434364 (-0.165972) | 0.323435 / 0.540337 (-0.216903) | 0.420667 / 1.386936 (-0.966269) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005382 / 0.011353 (-0.005971) | 0.003361 / 0.011008 (-0.007647) | 0.048420 / 0.038508 (0.009912) | 0.053702 / 0.023109 (0.030593) | 0.286976 / 0.275898 (0.011078) | 0.296708 / 0.323480 (-0.026772) | 0.004013 / 0.007986 (-0.003972) | 0.002444 / 0.004328 (-0.001884) | 0.047797 / 0.004250 (0.043547) | 0.042361 / 0.037052 (0.005309) | 0.277543 / 0.258489 (0.019054) | 0.300736 / 0.293841 (0.006896) | 0.029894 / 0.128546 (-0.098653) | 0.014119 / 0.075646 (-0.061527) | 0.057636 / 0.419271 (-0.361636) | 0.032533 / 0.043533 (-0.010999) | 0.280963 / 0.255139 (0.025824) | 0.291305 / 0.283200 (0.008106) | 0.018391 / 0.141683 (-0.123292) | 1.140042 / 1.452155 (-0.312113) | 1.179485 / 1.492716 (-0.313231) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094668 / 0.018006 (0.076661) | 0.301677 / 0.000490 (0.301187) | 0.000245 / 0.000200 (0.000045) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021376 / 0.037411 (-0.016036) | 0.070628 / 0.014526 (0.056102) | 0.082249 / 0.176557 (-0.094308) | 0.120423 / 0.737135 (-0.616712) | 0.083792 / 0.296338 (-0.212546) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.298884 / 0.215209 (0.083675) | 2.931849 / 2.077655 (0.854194) | 1.591888 / 1.504120 (0.087768) | 1.455781 / 1.541195 (-0.085414) | 1.500312 / 1.468490 (0.031822) | 0.558466 / 4.584777 (-4.026311) | 2.450449 / 3.745712 (-1.295263) | 2.842768 / 5.269862 (-2.427094) | 1.755614 / 4.565676 (-2.810062) | 0.063200 / 0.424275 (-0.361075) | 0.005022 / 0.007607 (-0.002585) | 0.358282 / 0.226044 (0.132238) | 3.575392 / 2.268929 (1.306464) | 1.960258 / 55.444624 (-53.484366) | 1.675518 / 6.876477 (-5.200959) | 1.696630 / 2.142072 (-0.445442) | 0.647185 / 4.805227 (-4.158042) | 0.117038 / 6.500664 (-6.383626) | 0.041622 / 0.075469 (-0.033848) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.962503 / 1.841788 (-0.879285) | 12.194950 / 8.074308 (4.120642) | 10.662233 / 10.191392 (0.470841) | 0.131618 / 0.680424 (-0.548806) | 0.016000 / 0.534201 (-0.518201) | 0.291546 / 0.579283 (-0.287737) | 0.279537 / 0.434364 (-0.154827) | 0.328716 / 0.540337 (-0.211622) | 0.547565 / 1.386936 (-0.839371) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4de8f5f09f60613d47b5d7eb901752321c7b6a49 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005209 / 0.011353 (-0.006144) | 0.003017 / 0.011008 (-0.007991) | 0.062017 / 0.038508 (0.023509) | 0.048268 / 0.023109 (0.025158) | 0.246384 / 0.275898 (-0.029514) | 0.270441 / 0.323480 (-0.053039) | 0.002763 / 0.007986 (-0.005222) | 0.003140 / 0.004328 (-0.001188) | 0.048720 / 0.004250 (0.044470) | 0.038175 / 0.037052 (0.001123) | 0.254184 / 0.258489 (-0.004306) | 0.275515 / 0.293841 (-0.018326) | 0.027309 / 0.128546 (-0.101238) | 0.010507 / 0.075646 (-0.065140) | 0.210315 / 0.419271 (-0.208956) | 0.035203 / 0.043533 (-0.008329) | 0.253015 / 0.255139 (-0.002124) | 0.271465 / 0.283200 (-0.011734) | 0.019543 / 0.141683 (-0.122140) | 1.119242 / 1.452155 (-0.332913) | 1.149359 / 1.492716 (-0.343357) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088935 / 0.018006 (0.070928) | 0.293922 / 0.000490 (0.293432) | 0.000202 / 0.000200 (0.000002) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018174 / 0.037411 (-0.019237) | 0.060215 / 0.014526 (0.045689) | 0.072868 / 0.176557 (-0.103689) | 0.117998 / 0.737135 (-0.619137) | 0.074159 / 0.296338 (-0.222179) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289229 / 0.215209 (0.074020) | 2.840414 / 2.077655 (0.762759) | 1.468357 / 1.504120 (-0.035763) | 1.347714 / 1.541195 (-0.193481) | 1.363704 / 1.468490 (-0.104786) | 0.572059 / 4.584777 (-4.012718) | 2.400631 / 3.745712 (-1.345081) | 2.755779 / 5.269862 (-2.514083) | 1.740937 / 4.565676 (-2.824739) | 0.063473 / 0.424275 (-0.360802) | 0.005012 / 0.007607 (-0.002595) | 0.336057 / 0.226044 (0.110012) | 3.382126 / 2.268929 (1.113197) | 1.807838 / 55.444624 (-53.636786) | 1.534594 / 6.876477 (-5.341883) | 1.529951 / 2.142072 (-0.612121) | 0.636661 / 4.805227 (-4.168566) | 0.117090 / 6.500664 (-6.383574) | 0.042310 / 0.075469 (-0.033160) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.924440 / 1.841788 (-0.917347) | 11.120517 / 8.074308 (3.046209) | 10.177210 / 10.191392 (-0.014182) | 0.139060 / 0.680424 (-0.541364) | 0.013818 / 0.534201 (-0.520383) | 0.285634 / 0.579283 (-0.293649) | 0.268657 / 0.434364 (-0.165706) | 0.325842 / 0.540337 (-0.214496) | 0.439902 / 1.386936 (-0.947034) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005202 / 0.011353 (-0.006150) | 0.003002 / 0.011008 (-0.008006) | 0.048729 / 0.038508 (0.010221) | 0.048178 / 0.023109 (0.025069) | 0.288573 / 0.275898 (0.012675) | 0.311122 / 0.323480 (-0.012358) | 0.003953 / 0.007986 (-0.004033) | 0.002544 / 0.004328 (-0.001785) | 0.047762 / 0.004250 (0.043511) | 0.039711 / 0.037052 (0.002658) | 0.308389 / 0.258489 (0.049900) | 0.321913 / 0.293841 (0.028072) | 0.029166 / 0.128546 (-0.099380) | 0.010697 / 0.075646 (-0.064950) | 0.057758 / 0.419271 (-0.361514) | 0.032743 / 0.043533 (-0.010789) | 0.290933 / 0.255139 (0.035794) | 0.309404 / 0.283200 (0.026205) | 0.017691 / 0.141683 (-0.123992) | 1.157713 / 1.452155 (-0.294442) | 1.210485 / 1.492716 (-0.282231) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.088959 / 0.018006 (0.070953) | 0.298531 / 0.000490 (0.298041) | 0.000221 / 0.000200 (0.000021) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021129 / 0.037411 (-0.016283) | 0.068419 / 0.014526 (0.053893) | 0.079328 / 0.176557 (-0.097228) | 0.118603 / 0.737135 (-0.618532) | 0.080489 / 0.296338 (-0.215850) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.292464 / 0.215209 (0.077254) | 2.898221 / 2.077655 (0.820566) | 1.600868 / 1.504120 (0.096748) | 1.485128 / 1.541195 (-0.056067) | 1.493091 / 1.468490 (0.024600) | 0.576117 / 4.584777 (-4.008660) | 2.450440 / 3.745712 (-1.295273) | 2.746026 / 5.269862 (-2.523836) | 1.722555 / 4.565676 (-2.843122) | 0.062869 / 0.424275 (-0.361406) | 0.004918 / 0.007607 (-0.002689) | 0.348470 / 0.226044 (0.122425) | 3.420267 / 2.268929 (1.151339) | 1.942973 / 55.444624 (-53.501651) | 1.667684 / 6.876477 (-5.208793) | 1.669618 / 2.142072 (-0.472454) | 0.630275 / 4.805227 (-4.174952) | 0.115072 / 6.500664 (-6.385592) | 0.040430 / 0.075469 (-0.035039) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.989827 / 1.841788 (-0.851961) | 11.578068 / 8.074308 (3.503760) | 10.636060 / 10.191392 (0.444668) | 0.131943 / 0.680424 (-0.548481) | 0.015915 / 0.534201 (-0.518286) | 0.287277 / 0.579283 (-0.292006) | 0.279451 / 0.434364 (-0.154913) | 0.325485 / 0.540337 (-0.214852) | 0.544635 / 1.386936 (-0.842301) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#f22579be6c73867ac1a3c03e925abaf4872f8437 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005144 / 0.011353 (-0.006209) | 0.003686 / 0.011008 (-0.007322) | 0.064003 / 0.038508 (0.025495) | 0.058962 / 0.023109 (0.035853) | 0.233753 / 0.275898 (-0.042145) | 0.255802 / 0.323480 (-0.067677) | 0.003871 / 0.007986 (-0.004115) | 0.002609 / 0.004328 (-0.001719) | 0.048675 / 0.004250 (0.044425) | 0.037550 / 0.037052 (0.000498) | 0.240658 / 0.258489 (-0.017831) | 0.272303 / 0.293841 (-0.021538) | 0.027455 / 0.128546 (-0.101091) | 0.010706 / 0.075646 (-0.064941) | 0.210878 / 0.419271 (-0.208393) | 0.035763 / 0.043533 (-0.007770) | 0.239937 / 0.255139 (-0.015202) | 0.262520 / 0.283200 (-0.020680) | 0.017676 / 0.141683 (-0.124006) | 1.095036 / 1.452155 (-0.357118) | 1.178318 / 1.492716 (-0.314399) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095310 / 0.018006 (0.077304) | 0.307485 / 0.000490 (0.306995) | 0.000212 / 0.000200 (0.000013) | 0.000047 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018630 / 0.037411 (-0.018781) | 0.060461 / 0.014526 (0.045936) | 0.073117 / 0.176557 (-0.103440) | 0.119737 / 0.737135 (-0.617399) | 0.073909 / 0.296338 (-0.222430) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280938 / 0.215209 (0.065729) | 2.755333 / 2.077655 (0.677679) | 1.468153 / 1.504120 (-0.035967) | 1.350247 / 1.541195 (-0.190948) | 1.379834 / 1.468490 (-0.088656) | 0.564027 / 4.584777 (-4.020750) | 2.387794 / 3.745712 (-1.357918) | 2.768529 / 5.269862 (-2.501333) | 1.761994 / 4.565676 (-2.803682) | 0.062079 / 0.424275 (-0.362196) | 0.005018 / 0.007607 (-0.002589) | 0.337576 / 0.226044 (0.111532) | 3.345347 / 2.268929 (1.076418) | 1.821950 / 55.444624 (-53.622674) | 1.545471 / 6.876477 (-5.331006) | 1.534941 / 2.142072 (-0.607131) | 0.626560 / 4.805227 (-4.178668) | 0.116227 / 6.500664 (-6.384437) | 0.041722 / 0.075469 (-0.033747) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.950480 / 1.841788 (-0.891307) | 11.616355 / 8.074308 (3.542047) | 10.426687 / 10.191392 (0.235295) | 0.129967 / 0.680424 (-0.550457) | 0.013977 / 0.534201 (-0.520224) | 0.287150 / 0.579283 (-0.292133) | 0.264028 / 0.434364 (-0.170336) | 0.325061 / 0.540337 (-0.215277) | 0.441281 / 1.386936 (-0.945655) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005436 / 0.011353 (-0.005917) | 0.003567 / 0.011008 (-0.007441) | 0.055275 / 0.038508 (0.016767) | 0.053216 / 0.023109 (0.030107) | 0.272826 / 0.275898 (-0.003072) | 0.298399 / 0.323480 (-0.025081) | 0.004803 / 0.007986 (-0.003183) | 0.002681 / 0.004328 (-0.001648) | 0.048704 / 0.004250 (0.044453) | 0.040048 / 0.037052 (0.002996) | 0.278200 / 0.258489 (0.019711) | 0.331167 / 0.293841 (0.037326) | 0.029282 / 0.128546 (-0.099265) | 0.010766 / 0.075646 (-0.064881) | 0.057370 / 0.419271 (-0.361902) | 0.032674 / 0.043533 (-0.010859) | 0.269430 / 0.255139 (0.014291) | 0.288256 / 0.283200 (0.005056) | 0.019340 / 0.141683 (-0.122343) | 1.118058 / 1.452155 (-0.334097) | 1.157811 / 1.492716 (-0.334906) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094091 / 0.018006 (0.076085) | 0.301833 / 0.000490 (0.301343) | 0.000216 / 0.000200 (0.000016) | 0.000053 / 0.000054 (-0.000002) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021327 / 0.037411 (-0.016085) | 0.068636 / 0.014526 (0.054110) | 0.080246 / 0.176557 (-0.096311) | 0.120524 / 0.737135 (-0.616611) | 0.082226 / 0.296338 (-0.214113) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293579 / 0.215209 (0.078370) | 2.880281 / 2.077655 (0.802626) | 1.594647 / 1.504120 (0.090528) | 1.477152 / 1.541195 (-0.064043) | 1.498122 / 1.468490 (0.029632) | 0.555073 / 4.584777 (-4.029704) | 2.446743 / 3.745712 (-1.298970) | 2.794971 / 5.269862 (-2.474890) | 1.749730 / 4.565676 (-2.815947) | 0.062537 / 0.424275 (-0.361738) | 0.004908 / 0.007607 (-0.002699) | 0.350772 / 0.226044 (0.124727) | 3.486535 / 2.268929 (1.217607) | 1.957414 / 55.444624 (-53.487210) | 1.669169 / 6.876477 (-5.207308) | 1.682396 / 2.142072 (-0.459676) | 0.627379 / 4.805227 (-4.177848) | 0.117218 / 6.500664 (-6.383446) | 0.041000 / 0.075469 (-0.034469) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.958248 / 1.841788 (-0.883539) | 12.022677 / 8.074308 (3.948369) | 10.331661 / 10.191392 (0.140269) | 0.129765 / 0.680424 (-0.550659) | 0.015073 / 0.534201 (-0.519128) | 0.287212 / 0.579283 (-0.292071) | 0.278310 / 0.434364 (-0.156054) | 0.328155 / 0.540337 (-0.212183) | 0.564990 / 1.386936 (-0.821946) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#0c16e56371e50adae771288945e3389cb81a31fd \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005576 / 0.011353 (-0.005777) | 0.003430 / 0.011008 (-0.007578) | 0.062714 / 0.038508 (0.024206) | 0.051240 / 0.023109 (0.028131) | 0.236637 / 0.275898 (-0.039261) | 0.262660 / 0.323480 (-0.060820) | 0.002924 / 0.007986 (-0.005061) | 0.002712 / 0.004328 (-0.001616) | 0.048680 / 0.004250 (0.044430) | 0.038997 / 0.037052 (0.001945) | 0.241426 / 0.258489 (-0.017063) | 0.270652 / 0.293841 (-0.023189) | 0.027355 / 0.128546 (-0.101192) | 0.010640 / 0.075646 (-0.065006) | 0.207754 / 0.419271 (-0.211517) | 0.035921 / 0.043533 (-0.007612) | 0.247645 / 0.255139 (-0.007494) | 0.262933 / 0.283200 (-0.020266) | 0.019658 / 0.141683 (-0.122025) | 1.112576 / 1.452155 (-0.339578) | 1.177362 / 1.492716 (-0.315354) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098100 / 0.018006 (0.080093) | 0.310170 / 0.000490 (0.309680) | 0.000220 / 0.000200 (0.000020) | 0.000051 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019626 / 0.037411 (-0.017785) | 0.065468 / 0.014526 (0.050942) | 0.074767 / 0.176557 (-0.101789) | 0.123619 / 0.737135 (-0.613516) | 0.077159 / 0.296338 (-0.219179) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.288585 / 0.215209 (0.073376) | 2.771254 / 2.077655 (0.693599) | 1.457091 / 1.504120 (-0.047029) | 1.324341 / 1.541195 (-0.216854) | 1.361960 / 1.468490 (-0.106530) | 0.574197 / 4.584777 (-4.010580) | 2.391440 / 3.745712 (-1.354273) | 2.935060 / 5.269862 (-2.334802) | 1.802792 / 4.565676 (-2.762884) | 0.063530 / 0.424275 (-0.360745) | 0.005129 / 0.007607 (-0.002478) | 0.345977 / 0.226044 (0.119933) | 3.368042 / 2.268929 (1.099113) | 1.789575 / 55.444624 (-53.655050) | 1.509165 / 6.876477 (-5.367312) | 1.579792 / 2.142072 (-0.562280) | 0.652136 / 4.805227 (-4.153091) | 0.117014 / 6.500664 (-6.383650) | 0.042385 / 0.075469 (-0.033084) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963967 / 1.841788 (-0.877821) | 11.847856 / 8.074308 (3.773548) | 10.584088 / 10.191392 (0.392696) | 0.143953 / 0.680424 (-0.536471) | 0.014355 / 0.534201 (-0.519846) | 0.286936 / 0.579283 (-0.292347) | 0.269039 / 0.434364 (-0.165325) | 0.324531 / 0.540337 (-0.215807) | 0.443187 / 1.386936 (-0.943749) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005448 / 0.011353 (-0.005905) | 0.003742 / 0.011008 (-0.007266) | 0.048808 / 0.038508 (0.010300) | 0.055409 / 0.023109 (0.032300) | 0.271574 / 0.275898 (-0.004324) | 0.295599 / 0.323480 (-0.027881) | 0.004208 / 0.007986 (-0.003778) | 0.002683 / 0.004328 (-0.001645) | 0.048813 / 0.004250 (0.044562) | 0.043672 / 0.037052 (0.006620) | 0.282173 / 0.258489 (0.023684) | 0.295447 / 0.293841 (0.001606) | 0.030461 / 0.128546 (-0.098086) | 0.010988 / 0.075646 (-0.064658) | 0.057050 / 0.419271 (-0.362221) | 0.033329 / 0.043533 (-0.010203) | 0.269700 / 0.255139 (0.014561) | 0.287099 / 0.283200 (0.003899) | 0.018203 / 0.141683 (-0.123480) | 1.142584 / 1.452155 (-0.309571) | 1.181848 / 1.492716 (-0.310869) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.096958 / 0.018006 (0.078952) | 0.310563 / 0.000490 (0.310074) | 0.000224 / 0.000200 (0.000024) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022213 / 0.037411 (-0.015199) | 0.072054 / 0.014526 (0.057528) | 0.086393 / 0.176557 (-0.090163) | 0.122431 / 0.737135 (-0.614704) | 0.085298 / 0.296338 (-0.211041) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290823 / 0.215209 (0.075614) | 2.838026 / 2.077655 (0.760371) | 1.541425 / 1.504120 (0.037305) | 1.431903 / 1.541195 (-0.109292) | 1.476567 / 1.468490 (0.008077) | 0.557856 / 4.584777 (-4.026920) | 2.449101 / 3.745712 (-1.296611) | 2.924633 / 5.269862 (-2.345229) | 1.824420 / 4.565676 (-2.741256) | 0.063735 / 0.424275 (-0.360540) | 0.005025 / 0.007607 (-0.002582) | 0.349458 / 0.226044 (0.123413) | 3.468627 / 2.268929 (1.199699) | 1.925173 / 55.444624 (-53.519451) | 1.655038 / 6.876477 (-5.221439) | 1.698612 / 2.142072 (-0.443460) | 0.643623 / 4.805227 (-4.161604) | 0.116128 / 6.500664 (-6.384536) | 0.042283 / 0.075469 (-0.033186) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963029 / 1.841788 (-0.878758) | 13.273985 / 8.074308 (5.199677) | 11.400884 / 10.191392 (1.209492) | 0.152635 / 0.680424 (-0.527788) | 0.016442 / 0.534201 (-0.517759) | 0.289272 / 0.579283 (-0.290012) | 0.285286 / 0.434364 (-0.149078) | 0.330028 / 0.540337 (-0.210310) | 0.596500 / 1.386936 (-0.790436) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#9c427c4b1dcf84c898ae62dc521bf446bb35e0e7 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005124 / 0.011353 (-0.006229) | 0.003832 / 0.011008 (-0.007176) | 0.062806 / 0.038508 (0.024298) | 0.053137 / 0.023109 (0.030028) | 0.241155 / 0.275898 (-0.034743) | 0.260521 / 0.323480 (-0.062959) | 0.004005 / 0.007986 (-0.003981) | 0.002754 / 0.004328 (-0.001575) | 0.048934 / 0.004250 (0.044684) | 0.039438 / 0.037052 (0.002385) | 0.242534 / 0.258489 (-0.015955) | 0.275498 / 0.293841 (-0.018343) | 0.027338 / 0.128546 (-0.101208) | 0.010809 / 0.075646 (-0.064837) | 0.206986 / 0.419271 (-0.212285) | 0.035614 / 0.043533 (-0.007919) | 0.245780 / 0.255139 (-0.009359) | 0.259793 / 0.283200 (-0.023407) | 0.018108 / 0.141683 (-0.123575) | 1.103412 / 1.452155 (-0.348742) | 1.162940 / 1.492716 (-0.329776) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092463 / 0.018006 (0.074457) | 0.299516 / 0.000490 (0.299026) | 0.000210 / 0.000200 (0.000010) | 0.000047 / 0.000054 (-0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018261 / 0.037411 (-0.019150) | 0.060178 / 0.014526 (0.045652) | 0.073043 / 0.176557 (-0.103513) | 0.120541 / 0.737135 (-0.616594) | 0.074972 / 0.296338 (-0.221367) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.287288 / 0.215209 (0.072078) | 2.814915 / 2.077655 (0.737260) | 1.520221 / 1.504120 (0.016101) | 1.396045 / 1.541195 (-0.145149) | 1.419662 / 1.468490 (-0.048828) | 0.589247 / 4.584777 (-3.995530) | 2.411101 / 3.745712 (-1.334611) | 2.777709 / 5.269862 (-2.492153) | 1.750386 / 4.565676 (-2.815291) | 0.063734 / 0.424275 (-0.360541) | 0.005021 / 0.007607 (-0.002586) | 0.338817 / 0.226044 (0.112773) | 3.371218 / 2.268929 (1.102289) | 1.892691 / 55.444624 (-53.551934) | 1.599039 / 6.876477 (-5.277438) | 1.574726 / 2.142072 (-0.567346) | 0.665623 / 4.805227 (-4.139604) | 0.118628 / 6.500664 (-6.382036) | 0.041803 / 0.075469 (-0.033666) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.948696 / 1.841788 (-0.893092) | 11.502916 / 8.074308 (3.428608) | 10.301174 / 10.191392 (0.109782) | 0.141752 / 0.680424 (-0.538672) | 0.014064 / 0.534201 (-0.520137) | 0.286701 / 0.579283 (-0.292583) | 0.265805 / 0.434364 (-0.168559) | 0.328420 / 0.540337 (-0.211917) | 0.433619 / 1.386936 (-0.953317) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005262 / 0.011353 (-0.006091) | 0.003361 / 0.011008 (-0.007648) | 0.049525 / 0.038508 (0.011016) | 0.048950 / 0.023109 (0.025841) | 0.273617 / 0.275898 (-0.002281) | 0.296614 / 0.323480 (-0.026866) | 0.004014 / 0.007986 (-0.003971) | 0.002630 / 0.004328 (-0.001698) | 0.048203 / 0.004250 (0.043952) | 0.040912 / 0.037052 (0.003860) | 0.279736 / 0.258489 (0.021247) | 0.301671 / 0.293841 (0.007830) | 0.028546 / 0.128546 (-0.100000) | 0.010440 / 0.075646 (-0.065206) | 0.057869 / 0.419271 (-0.361402) | 0.032876 / 0.043533 (-0.010657) | 0.277649 / 0.255139 (0.022510) | 0.296565 / 0.283200 (0.013365) | 0.017558 / 0.141683 (-0.124125) | 1.155005 / 1.452155 (-0.297149) | 1.204827 / 1.492716 (-0.287889) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093248 / 0.018006 (0.075242) | 0.302721 / 0.000490 (0.302231) | 0.000218 / 0.000200 (0.000018) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021882 / 0.037411 (-0.015530) | 0.068259 / 0.014526 (0.053733) | 0.080982 / 0.176557 (-0.095574) | 0.119386 / 0.737135 (-0.617750) | 0.081745 / 0.296338 (-0.214593) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.297812 / 0.215209 (0.082603) | 2.909938 / 2.077655 (0.832283) | 1.603736 / 1.504120 (0.099616) | 1.482989 / 1.541195 (-0.058206) | 1.495107 / 1.468490 (0.026617) | 0.562275 / 4.584777 (-4.022502) | 2.424812 / 3.745712 (-1.320901) | 2.759127 / 5.269862 (-2.510735) | 1.733283 / 4.565676 (-2.832394) | 0.063144 / 0.424275 (-0.361131) | 0.004949 / 0.007607 (-0.002658) | 0.352756 / 0.226044 (0.126711) | 3.496028 / 2.268929 (1.227100) | 1.982804 / 55.444624 (-53.461820) | 1.689787 / 6.876477 (-5.186690) | 1.672699 / 2.142072 (-0.469373) | 0.660169 / 4.805227 (-4.145059) | 0.116535 / 6.500664 (-6.384129) | 0.040616 / 0.075469 (-0.034853) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.975055 / 1.841788 (-0.866733) | 11.919295 / 8.074308 (3.844986) | 10.779188 / 10.191392 (0.587796) | 0.143106 / 0.680424 (-0.537318) | 0.015159 / 0.534201 (-0.519041) | 0.289734 / 0.579283 (-0.289549) | 0.278637 / 0.434364 (-0.155727) | 0.328159 / 0.540337 (-0.212178) | 0.570560 / 1.386936 (-0.816376) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#241500208da5fef64ad6ddc1cc5ab2be18f2f76d \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005155 / 0.011353 (-0.006198) | 0.003589 / 0.011008 (-0.007419) | 0.064440 / 0.038508 (0.025932) | 0.051020 / 0.023109 (0.027911) | 0.246099 / 0.275898 (-0.029799) | 0.273383 / 0.323480 (-0.050097) | 0.003984 / 0.007986 (-0.004002) | 0.002791 / 0.004328 (-0.001537) | 0.049076 / 0.004250 (0.044826) | 0.037975 / 0.037052 (0.000922) | 0.253709 / 0.258489 (-0.004780) | 0.281730 / 0.293841 (-0.012111) | 0.028060 / 0.128546 (-0.100486) | 0.010808 / 0.075646 (-0.064838) | 0.206663 / 0.419271 (-0.212609) | 0.035989 / 0.043533 (-0.007544) | 0.252635 / 0.255139 (-0.002504) | 0.280042 / 0.283200 (-0.003158) | 0.016982 / 0.141683 (-0.124700) | 1.098679 / 1.452155 (-0.353475) | 1.157051 / 1.492716 (-0.335666) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098238 / 0.018006 (0.080232) | 0.311990 / 0.000490 (0.311501) | 0.000229 / 0.000200 (0.000029) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018270 / 0.037411 (-0.019141) | 0.062711 / 0.014526 (0.048186) | 0.074381 / 0.176557 (-0.102175) | 0.119946 / 0.737135 (-0.617189) | 0.075013 / 0.296338 (-0.221325) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.282106 / 0.215209 (0.066897) | 2.752653 / 2.077655 (0.674999) | 1.488771 / 1.504120 (-0.015349) | 1.372552 / 1.541195 (-0.168643) | 1.390270 / 1.468490 (-0.078220) | 0.558928 / 4.584777 (-4.025849) | 2.411821 / 3.745712 (-1.333891) | 2.771441 / 5.269862 (-2.498421) | 1.747507 / 4.565676 (-2.818169) | 0.061360 / 0.424275 (-0.362915) | 0.004956 / 0.007607 (-0.002652) | 0.332330 / 0.226044 (0.106286) | 3.301405 / 2.268929 (1.032476) | 1.786726 / 55.444624 (-53.657899) | 1.529974 / 6.876477 (-5.346502) | 1.538412 / 2.142072 (-0.603660) | 0.637590 / 4.805227 (-4.167637) | 0.117215 / 6.500664 (-6.383449) | 0.042186 / 0.075469 (-0.033283) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.945574 / 1.841788 (-0.896213) | 11.616152 / 8.074308 (3.541844) | 10.365114 / 10.191392 (0.173722) | 0.130358 / 0.680424 (-0.550066) | 0.013587 / 0.534201 (-0.520614) | 0.306024 / 0.579283 (-0.273259) | 0.270577 / 0.434364 (-0.163787) | 0.340768 / 0.540337 (-0.199569) | 0.460841 / 1.386936 (-0.926095) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005254 / 0.011353 (-0.006099) | 0.003137 / 0.011008 (-0.007871) | 0.048302 / 0.038508 (0.009794) | 0.051952 / 0.023109 (0.028843) | 0.269078 / 0.275898 (-0.006820) | 0.292044 / 0.323480 (-0.031436) | 0.003985 / 0.007986 (-0.004000) | 0.002597 / 0.004328 (-0.001732) | 0.049998 / 0.004250 (0.045747) | 0.040227 / 0.037052 (0.003174) | 0.274714 / 0.258489 (0.016225) | 0.298160 / 0.293841 (0.004319) | 0.028857 / 0.128546 (-0.099690) | 0.010545 / 0.075646 (-0.065101) | 0.057234 / 0.419271 (-0.362038) | 0.032515 / 0.043533 (-0.011018) | 0.271526 / 0.255139 (0.016387) | 0.288556 / 0.283200 (0.005356) | 0.018155 / 0.141683 (-0.123527) | 1.201906 / 1.452155 (-0.250248) | 1.220068 / 1.492716 (-0.272648) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.100098 / 0.018006 (0.082092) | 0.311081 / 0.000490 (0.310591) | 0.000231 / 0.000200 (0.000032) | 0.000051 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022349 / 0.037411 (-0.015062) | 0.069698 / 0.014526 (0.055172) | 0.081334 / 0.176557 (-0.095222) | 0.120847 / 0.737135 (-0.616289) | 0.082091 / 0.296338 (-0.214248) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.293810 / 0.215209 (0.078601) | 2.844191 / 2.077655 (0.766536) | 1.594494 / 1.504120 (0.090374) | 1.486531 / 1.541195 (-0.054664) | 1.506307 / 1.468490 (0.037817) | 0.560247 / 4.584777 (-4.024530) | 2.478309 / 3.745712 (-1.267403) | 2.759024 / 5.269862 (-2.510837) | 1.733063 / 4.565676 (-2.832613) | 0.061838 / 0.424275 (-0.362438) | 0.004869 / 0.007607 (-0.002738) | 0.347267 / 0.226044 (0.121222) | 3.407737 / 2.268929 (1.138808) | 1.944420 / 55.444624 (-53.500204) | 1.660060 / 6.876477 (-5.216417) | 1.704219 / 2.142072 (-0.437854) | 0.646969 / 4.805227 (-4.158258) | 0.115750 / 6.500664 (-6.384914) | 0.041614 / 0.075469 (-0.033855) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.972537 / 1.841788 (-0.869251) | 12.013530 / 8.074308 (3.939222) | 10.650215 / 10.191392 (0.458823) | 0.132877 / 0.680424 (-0.547547) | 0.016828 / 0.534201 (-0.517372) | 0.288321 / 0.579283 (-0.290962) | 0.284203 / 0.434364 (-0.150161) | 0.324016 / 0.540337 (-0.216321) | 0.575403 / 1.386936 (-0.811533) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#17ec1a7a610adba3db44f316a930b979872d4ef7 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005925 / 0.011353 (-0.005427) | 0.005138 / 0.011008 (-0.005870) | 0.069865 / 0.038508 (0.031356) | 0.067181 / 0.023109 (0.044072) | 0.309642 / 0.275898 (0.033743) | 0.302919 / 0.323480 (-0.020561) | 0.003365 / 0.007986 (-0.004620) | 0.003148 / 0.004328 (-0.001180) | 0.054102 / 0.004250 (0.049852) | 0.044196 / 0.037052 (0.007143) | 0.306882 / 0.258489 (0.048393) | 0.315153 / 0.293841 (0.021313) | 0.030458 / 0.128546 (-0.098089) | 0.011773 / 0.075646 (-0.063874) | 0.235075 / 0.419271 (-0.184196) | 0.040840 / 0.043533 (-0.002693) | 0.279897 / 0.255139 (0.024758) | 0.316334 / 0.283200 (0.033135) | 0.020128 / 0.141683 (-0.121555) | 1.237327 / 1.452155 (-0.214828) | 1.290386 / 1.492716 (-0.202331) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.118540 / 0.018006 (0.100534) | 0.363282 / 0.000490 (0.362792) | 0.000266 / 0.000200 (0.000066) | 0.000058 / 0.000054 (0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021435 / 0.037411 (-0.015977) | 0.068124 / 0.014526 (0.053598) | 0.082747 / 0.176557 (-0.093809) | 0.137179 / 0.737135 (-0.599956) | 0.084815 / 0.296338 (-0.211523) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.307836 / 0.215209 (0.092626) | 2.983444 / 2.077655 (0.905790) | 1.616430 / 1.504120 (0.112310) | 1.466843 / 1.541195 (-0.074351) | 1.512440 / 1.468490 (0.043950) | 0.652311 / 4.584777 (-3.932466) | 2.676420 / 3.745712 (-1.069292) | 3.265747 / 5.269862 (-2.004115) | 2.028586 / 4.565676 (-2.537090) | 0.071997 / 0.424275 (-0.352278) | 0.007068 / 0.007607 (-0.000539) | 0.367199 / 0.226044 (0.141155) | 3.617970 / 2.268929 (1.349042) | 1.991345 / 55.444624 (-53.453280) | 1.670015 / 6.876477 (-5.206462) | 1.720515 / 2.142072 (-0.421557) | 0.724649 / 4.805227 (-4.080579) | 0.134888 / 6.500664 (-6.365776) | 0.048325 / 0.075469 (-0.027144) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.051058 / 1.841788 (-0.790730) | 13.772809 / 8.074308 (5.698501) | 11.813879 / 10.191392 (1.622487) | 0.160065 / 0.680424 (-0.520359) | 0.016256 / 0.534201 (-0.517945) | 0.320393 / 0.579283 (-0.258890) | 0.314462 / 0.434364 (-0.119901) | 0.371911 / 0.540337 (-0.168427) | 0.506864 / 1.386936 (-0.880072) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005857 / 0.011353 (-0.005496) | 0.004077 / 0.011008 (-0.006931) | 0.056033 / 0.038508 (0.017525) | 0.067622 / 0.023109 (0.044513) | 0.298956 / 0.275898 (0.023058) | 0.323484 / 0.323480 (0.000004) | 0.004825 / 0.007986 (-0.003160) | 0.003120 / 0.004328 (-0.001208) | 0.055227 / 0.004250 (0.050976) | 0.048439 / 0.037052 (0.011387) | 0.303207 / 0.258489 (0.044718) | 0.329478 / 0.293841 (0.035637) | 0.032516 / 0.128546 (-0.096031) | 0.012260 / 0.075646 (-0.063386) | 0.065037 / 0.419271 (-0.354234) | 0.038799 / 0.043533 (-0.004734) | 0.299102 / 0.255139 (0.043963) | 0.318248 / 0.283200 (0.035048) | 0.020190 / 0.141683 (-0.121493) | 1.263479 / 1.452155 (-0.188676) | 1.329788 / 1.492716 (-0.162928) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.119801 / 0.018006 (0.101794) | 0.359618 / 0.000490 (0.359129) | 0.000260 / 0.000200 (0.000060) | 0.000058 / 0.000054 (0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.026876 / 0.037411 (-0.010535) | 0.080637 / 0.014526 (0.066111) | 0.092260 / 0.176557 (-0.084297) | 0.137260 / 0.737135 (-0.599875) | 0.093309 / 0.296338 (-0.203029) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.329327 / 0.215209 (0.114118) | 3.193014 / 2.077655 (1.115359) | 1.755838 / 1.504120 (0.251718) | 1.612279 / 1.541195 (0.071084) | 1.631958 / 1.468490 (0.163468) | 0.630886 / 4.584777 (-3.953891) | 2.739731 / 3.745712 (-1.005981) | 3.186745 / 5.269862 (-2.083117) | 1.987125 / 4.565676 (-2.578552) | 0.070694 / 0.424275 (-0.353581) | 0.006461 / 0.007607 (-0.001146) | 0.386367 / 0.226044 (0.160323) | 3.815837 / 2.268929 (1.546908) | 2.155904 / 55.444624 (-53.288720) | 1.832575 / 6.876477 (-5.043902) | 1.842097 / 2.142072 (-0.299975) | 0.716394 / 4.805227 (-4.088833) | 0.130796 / 6.500664 (-6.369869) | 0.045674 / 0.075469 (-0.029795) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 1.109117 / 1.841788 (-0.732671) | 14.116582 / 8.074308 (6.042274) | 11.926356 / 10.191392 (1.734964) | 0.150543 / 0.680424 (-0.529881) | 0.017426 / 0.534201 (-0.516775) | 0.323058 / 0.579283 (-0.256225) | 0.330228 / 0.434364 (-0.104136) | 0.372533 / 0.540337 (-0.167804) | 0.661348 / 1.386936 (-0.725588) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#04ffd22a30ecc7545234559edd9d23c85c6d84d9 \"CML watermark\")\n", "Thanks for the review, I took your comments into account !", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005477 / 0.011353 (-0.005876) | 0.003509 / 0.011008 (-0.007499) | 0.062884 / 0.038508 (0.024376) | 0.051042 / 0.023109 (0.027933) | 0.285180 / 0.275898 (0.009282) | 0.315353 / 0.323480 (-0.008127) | 0.002943 / 0.007986 (-0.005043) | 0.003286 / 0.004328 (-0.001042) | 0.048885 / 0.004250 (0.044635) | 0.038591 / 0.037052 (0.001539) | 0.288527 / 0.258489 (0.030038) | 0.316102 / 0.293841 (0.022261) | 0.028252 / 0.128546 (-0.100295) | 0.010622 / 0.075646 (-0.065024) | 0.205573 / 0.419271 (-0.213699) | 0.035764 / 0.043533 (-0.007769) | 0.285729 / 0.255139 (0.030590) | 0.304578 / 0.283200 (0.021378) | 0.019862 / 0.141683 (-0.121821) | 1.102866 / 1.452155 (-0.349288) | 1.175161 / 1.492716 (-0.317555) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095253 / 0.018006 (0.077246) | 0.302290 / 0.000490 (0.301800) | 0.000243 / 0.000200 (0.000043) | 0.000061 / 0.000054 (0.000007) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018680 / 0.037411 (-0.018731) | 0.060375 / 0.014526 (0.045849) | 0.074033 / 0.176557 (-0.102524) | 0.120290 / 0.737135 (-0.616845) | 0.075350 / 0.296338 (-0.220989) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.277617 / 0.215209 (0.062408) | 2.718201 / 2.077655 (0.640546) | 1.462952 / 1.504120 (-0.041168) | 1.339199 / 1.541195 (-0.201996) | 1.375805 / 1.468490 (-0.092685) | 0.559956 / 4.584777 (-4.024821) | 2.373865 / 3.745712 (-1.371847) | 2.795732 / 5.269862 (-2.474129) | 1.755490 / 4.565676 (-2.810186) | 0.062002 / 0.424275 (-0.362273) | 0.004935 / 0.007607 (-0.002672) | 0.334786 / 0.226044 (0.108741) | 3.237499 / 2.268929 (0.968571) | 1.787561 / 55.444624 (-53.657064) | 1.513300 / 6.876477 (-5.363176) | 1.549797 / 2.142072 (-0.592275) | 0.643587 / 4.805227 (-4.161640) | 0.117275 / 6.500664 (-6.383389) | 0.042184 / 0.075469 (-0.033285) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.933366 / 1.841788 (-0.908421) | 11.792282 / 8.074308 (3.717973) | 10.466608 / 10.191392 (0.275216) | 0.142148 / 0.680424 (-0.538275) | 0.014084 / 0.534201 (-0.520117) | 0.287233 / 0.579283 (-0.292050) | 0.266022 / 0.434364 (-0.168342) | 0.326854 / 0.540337 (-0.213483) | 0.451348 / 1.386936 (-0.935588) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005384 / 0.011353 (-0.005969) | 0.003562 / 0.011008 (-0.007446) | 0.049014 / 0.038508 (0.010506) | 0.057480 / 0.023109 (0.034371) | 0.274456 / 0.275898 (-0.001442) | 0.298387 / 0.323480 (-0.025093) | 0.003909 / 0.007986 (-0.004076) | 0.002646 / 0.004328 (-0.001683) | 0.048374 / 0.004250 (0.044124) | 0.040907 / 0.037052 (0.003854) | 0.278267 / 0.258489 (0.019778) | 0.299862 / 0.293841 (0.006021) | 0.029108 / 0.128546 (-0.099439) | 0.010752 / 0.075646 (-0.064894) | 0.057523 / 0.419271 (-0.361749) | 0.032692 / 0.043533 (-0.010841) | 0.276288 / 0.255139 (0.021149) | 0.291572 / 0.283200 (0.008372) | 0.017818 / 0.141683 (-0.123865) | 1.129517 / 1.452155 (-0.322638) | 1.186630 / 1.492716 (-0.306086) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093405 / 0.018006 (0.075399) | 0.301254 / 0.000490 (0.300764) | 0.000225 / 0.000200 (0.000025) | 0.000054 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021793 / 0.037411 (-0.015618) | 0.069033 / 0.014526 (0.054508) | 0.083502 / 0.176557 (-0.093055) | 0.122149 / 0.737135 (-0.614986) | 0.083801 / 0.296338 (-0.212537) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.299149 / 0.215209 (0.083940) | 2.936550 / 2.077655 (0.858895) | 1.595766 / 1.504120 (0.091647) | 1.487117 / 1.541195 (-0.054078) | 1.494606 / 1.468490 (0.026116) | 0.569346 / 4.584777 (-4.015431) | 2.445642 / 3.745712 (-1.300070) | 2.805696 / 5.269862 (-2.464165) | 1.743796 / 4.565676 (-2.821881) | 0.062695 / 0.424275 (-0.361580) | 0.004885 / 0.007607 (-0.002723) | 0.354186 / 0.226044 (0.128142) | 3.487926 / 2.268929 (1.218997) | 1.965703 / 55.444624 (-53.478922) | 1.682284 / 6.876477 (-5.194193) | 1.705586 / 2.142072 (-0.436487) | 0.655099 / 4.805227 (-4.150128) | 0.116441 / 6.500664 (-6.384223) | 0.040851 / 0.075469 (-0.034618) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.967361 / 1.841788 (-0.874427) | 12.037718 / 8.074308 (3.963409) | 10.599761 / 10.191392 (0.408369) | 0.143127 / 0.680424 (-0.537297) | 0.015063 / 0.534201 (-0.519138) | 0.286894 / 0.579283 (-0.292389) | 0.301505 / 0.434364 (-0.132859) | 0.324339 / 0.540337 (-0.215999) | 0.591782 / 1.386936 (-0.795154) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b96ff08d4aa6dbafc8a10a9d03dfabe236378bcd \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005337 / 0.011353 (-0.006015) | 0.004074 / 0.011008 (-0.006934) | 0.062653 / 0.038508 (0.024145) | 0.054295 / 0.023109 (0.031186) | 0.248284 / 0.275898 (-0.027614) | 0.271604 / 0.323480 (-0.051876) | 0.003931 / 0.007986 (-0.004055) | 0.002907 / 0.004328 (-0.001422) | 0.047991 / 0.004250 (0.043740) | 0.042842 / 0.037052 (0.005790) | 0.253648 / 0.258489 (-0.004841) | 0.282546 / 0.293841 (-0.011295) | 0.028005 / 0.128546 (-0.100541) | 0.010734 / 0.075646 (-0.064912) | 0.210023 / 0.419271 (-0.209248) | 0.035940 / 0.043533 (-0.007592) | 0.250766 / 0.255139 (-0.004373) | 0.267644 / 0.283200 (-0.015556) | 0.020451 / 0.141683 (-0.121232) | 1.114972 / 1.452155 (-0.337183) | 1.159823 / 1.492716 (-0.332893) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.095527 / 0.018006 (0.077521) | 0.303321 / 0.000490 (0.302831) | 0.000216 / 0.000200 (0.000016) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018725 / 0.037411 (-0.018686) | 0.062537 / 0.014526 (0.048011) | 0.073091 / 0.176557 (-0.103466) | 0.119570 / 0.737135 (-0.617565) | 0.074863 / 0.296338 (-0.221476) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.284936 / 0.215209 (0.069727) | 2.802498 / 2.077655 (0.724843) | 1.493316 / 1.504120 (-0.010804) | 1.372319 / 1.541195 (-0.168875) | 1.403657 / 1.468490 (-0.064833) | 0.569303 / 4.584777 (-4.015474) | 2.402498 / 3.745712 (-1.343214) | 2.834778 / 5.269862 (-2.435084) | 1.791312 / 4.565676 (-2.774365) | 0.062526 / 0.424275 (-0.361749) | 0.004947 / 0.007607 (-0.002660) | 0.345141 / 0.226044 (0.119097) | 3.371863 / 2.268929 (1.102934) | 1.846023 / 55.444624 (-53.598602) | 1.596368 / 6.876477 (-5.280109) | 1.615902 / 2.142072 (-0.526170) | 0.644333 / 4.805227 (-4.160894) | 0.119460 / 6.500664 (-6.381204) | 0.049122 / 0.075469 (-0.026347) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.951839 / 1.841788 (-0.889948) | 11.677074 / 8.074308 (3.602766) | 10.562586 / 10.191392 (0.371194) | 0.143633 / 0.680424 (-0.536791) | 0.014157 / 0.534201 (-0.520044) | 0.289141 / 0.579283 (-0.290142) | 0.264719 / 0.434364 (-0.169645) | 0.327862 / 0.540337 (-0.212476) | 0.451215 / 1.386936 (-0.935721) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005343 / 0.011353 (-0.006010) | 0.003522 / 0.011008 (-0.007486) | 0.049354 / 0.038508 (0.010846) | 0.051441 / 0.023109 (0.028332) | 0.259350 / 0.275898 (-0.016548) | 0.288946 / 0.323480 (-0.034534) | 0.004052 / 0.007986 (-0.003934) | 0.002690 / 0.004328 (-0.001639) | 0.049996 / 0.004250 (0.045746) | 0.040224 / 0.037052 (0.003171) | 0.264588 / 0.258489 (0.006099) | 0.296474 / 0.293841 (0.002633) | 0.028868 / 0.128546 (-0.099679) | 0.010917 / 0.075646 (-0.064730) | 0.057866 / 0.419271 (-0.361405) | 0.032610 / 0.043533 (-0.010923) | 0.260657 / 0.255139 (0.005518) | 0.276947 / 0.283200 (-0.006253) | 0.018877 / 0.141683 (-0.122806) | 1.126205 / 1.452155 (-0.325949) | 1.206173 / 1.492716 (-0.286543) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094464 / 0.018006 (0.076458) | 0.304473 / 0.000490 (0.303984) | 0.000231 / 0.000200 (0.000031) | 0.000053 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021472 / 0.037411 (-0.015939) | 0.070864 / 0.014526 (0.056338) | 0.086607 / 0.176557 (-0.089950) | 0.120679 / 0.737135 (-0.616456) | 0.084271 / 0.296338 (-0.212068) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.296448 / 0.215209 (0.081239) | 2.893996 / 2.077655 (0.816341) | 1.573409 / 1.504120 (0.069289) | 1.438799 / 1.541195 (-0.102396) | 1.461241 / 1.468490 (-0.007249) | 0.566737 / 4.584777 (-4.018040) | 2.425709 / 3.745712 (-1.320003) | 2.826764 / 5.269862 (-2.443098) | 1.785330 / 4.565676 (-2.780347) | 0.063721 / 0.424275 (-0.360554) | 0.005158 / 0.007607 (-0.002449) | 0.354961 / 0.226044 (0.128916) | 3.457499 / 2.268929 (1.188570) | 1.931374 / 55.444624 (-53.513251) | 1.646515 / 6.876477 (-5.229962) | 1.629891 / 2.142072 (-0.512182) | 0.648922 / 4.805227 (-4.156305) | 0.114953 / 6.500664 (-6.385711) | 0.040997 / 0.075469 (-0.034472) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.951049 / 1.841788 (-0.890739) | 12.258298 / 8.074308 (4.183990) | 10.663309 / 10.191392 (0.471917) | 0.142933 / 0.680424 (-0.537491) | 0.015927 / 0.534201 (-0.518273) | 0.286914 / 0.579283 (-0.292369) | 0.286600 / 0.434364 (-0.147764) | 0.324464 / 0.540337 (-0.215874) | 0.575075 / 1.386936 (-0.811861) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#ed47b9d5e9c6aa03a0aa07d8abfd3fa8241da353 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005298 / 0.011353 (-0.006055) | 0.003645 / 0.011008 (-0.007363) | 0.061629 / 0.038508 (0.023121) | 0.052322 / 0.023109 (0.029212) | 0.242579 / 0.275898 (-0.033319) | 0.263525 / 0.323480 (-0.059955) | 0.002794 / 0.007986 (-0.005192) | 0.002152 / 0.004328 (-0.002177) | 0.048301 / 0.004250 (0.044050) | 0.038177 / 0.037052 (0.001125) | 0.247724 / 0.258489 (-0.010765) | 0.274455 / 0.293841 (-0.019386) | 0.026992 / 0.128546 (-0.101555) | 0.010110 / 0.075646 (-0.065536) | 0.205662 / 0.419271 (-0.213609) | 0.034901 / 0.043533 (-0.008632) | 0.241920 / 0.255139 (-0.013219) | 0.262048 / 0.283200 (-0.021152) | 0.019111 / 0.141683 (-0.122572) | 1.127600 / 1.452155 (-0.324555) | 1.193931 / 1.492716 (-0.298786) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090321 / 0.018006 (0.072315) | 0.299046 / 0.000490 (0.298556) | 0.000197 / 0.000200 (-0.000003) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018278 / 0.037411 (-0.019133) | 0.060114 / 0.014526 (0.045588) | 0.073602 / 0.176557 (-0.102954) | 0.119676 / 0.737135 (-0.617459) | 0.074786 / 0.296338 (-0.221552) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.280385 / 0.215209 (0.065176) | 2.764259 / 2.077655 (0.686604) | 1.501027 / 1.504120 (-0.003093) | 1.376900 / 1.541195 (-0.164295) | 1.390587 / 1.468490 (-0.077903) | 0.555180 / 4.584777 (-4.029597) | 2.354307 / 3.745712 (-1.391405) | 2.755862 / 5.269862 (-2.514000) | 1.714771 / 4.565676 (-2.850906) | 0.062507 / 0.424275 (-0.361768) | 0.004974 / 0.007607 (-0.002633) | 0.333900 / 0.226044 (0.107856) | 3.266922 / 2.268929 (0.997994) | 1.805401 / 55.444624 (-53.639223) | 1.526970 / 6.876477 (-5.349507) | 1.539425 / 2.142072 (-0.602647) | 0.629364 / 4.805227 (-4.175863) | 0.114929 / 6.500664 (-6.385735) | 0.041258 / 0.075469 (-0.034211) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.968601 / 1.841788 (-0.873187) | 11.260937 / 8.074308 (3.186629) | 10.393839 / 10.191392 (0.202447) | 0.127988 / 0.680424 (-0.552436) | 0.014564 / 0.534201 (-0.519637) | 0.286560 / 0.579283 (-0.292723) | 0.260493 / 0.434364 (-0.173871) | 0.330949 / 0.540337 (-0.209388) | 0.435798 / 1.386936 (-0.951138) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005232 / 0.011353 (-0.006121) | 0.003030 / 0.011008 (-0.007978) | 0.048513 / 0.038508 (0.010005) | 0.049501 / 0.023109 (0.026392) | 0.270545 / 0.275898 (-0.005353) | 0.289128 / 0.323480 (-0.034352) | 0.003925 / 0.007986 (-0.004061) | 0.002568 / 0.004328 (-0.001761) | 0.047692 / 0.004250 (0.043442) | 0.039854 / 0.037052 (0.002802) | 0.272654 / 0.258489 (0.014165) | 0.296275 / 0.293841 (0.002434) | 0.029027 / 0.128546 (-0.099519) | 0.010335 / 0.075646 (-0.065311) | 0.056726 / 0.419271 (-0.362546) | 0.033257 / 0.043533 (-0.010275) | 0.272672 / 0.255139 (0.017533) | 0.286298 / 0.283200 (0.003098) | 0.017877 / 0.141683 (-0.123806) | 1.150322 / 1.452155 (-0.301833) | 1.221031 / 1.492716 (-0.271685) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.102838 / 0.018006 (0.084832) | 0.298810 / 0.000490 (0.298320) | 0.000207 / 0.000200 (0.000007) | 0.000043 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021232 / 0.037411 (-0.016180) | 0.067949 / 0.014526 (0.053423) | 0.116487 / 0.176557 (-0.060070) | 0.124035 / 0.737135 (-0.613100) | 0.081075 / 0.296338 (-0.215263) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289098 / 0.215209 (0.073889) | 2.844476 / 2.077655 (0.766821) | 1.609576 / 1.504120 (0.105456) | 1.480453 / 1.541195 (-0.060742) | 1.489672 / 1.468490 (0.021182) | 0.589661 / 4.584777 (-3.995116) | 2.453804 / 3.745712 (-1.291908) | 2.722381 / 5.269862 (-2.547480) | 1.720251 / 4.565676 (-2.845425) | 0.066085 / 0.424275 (-0.358190) | 0.004943 / 0.007607 (-0.002664) | 0.355149 / 0.226044 (0.129104) | 3.444323 / 2.268929 (1.175395) | 1.971157 / 55.444624 (-53.473467) | 1.683029 / 6.876477 (-5.193448) | 1.672798 / 2.142072 (-0.469274) | 0.644812 / 4.805227 (-4.160416) | 0.115098 / 6.500664 (-6.385566) | 0.039883 / 0.075469 (-0.035586) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.960454 / 1.841788 (-0.881334) | 11.604732 / 8.074308 (3.530424) | 10.405481 / 10.191392 (0.214089) | 0.129146 / 0.680424 (-0.551278) | 0.014945 / 0.534201 (-0.519256) | 0.286239 / 0.579283 (-0.293044) | 0.281041 / 0.434364 (-0.153323) | 0.320448 / 0.540337 (-0.219890) | 0.554304 / 1.386936 (-0.832632) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b2cfb7859b029654829c4dfee230812ddab1f104 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005510 / 0.011353 (-0.005843) | 0.003575 / 0.011008 (-0.007433) | 0.062232 / 0.038508 (0.023724) | 0.051115 / 0.023109 (0.028006) | 0.250709 / 0.275898 (-0.025189) | 0.274837 / 0.323480 (-0.048642) | 0.002972 / 0.007986 (-0.005014) | 0.002708 / 0.004328 (-0.001621) | 0.048088 / 0.004250 (0.043838) | 0.038588 / 0.037052 (0.001535) | 0.252550 / 0.258489 (-0.005939) | 0.285238 / 0.293841 (-0.008603) | 0.027867 / 0.128546 (-0.100679) | 0.011000 / 0.075646 (-0.064646) | 0.206918 / 0.419271 (-0.212354) | 0.035711 / 0.043533 (-0.007822) | 0.255306 / 0.255139 (0.000167) | 0.298636 / 0.283200 (0.015436) | 0.018222 / 0.141683 (-0.123461) | 1.122276 / 1.452155 (-0.329879) | 1.196471 / 1.492716 (-0.296245) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.092072 / 0.018006 (0.074066) | 0.301469 / 0.000490 (0.300979) | 0.000225 / 0.000200 (0.000025) | 0.000050 / 0.000054 (-0.000004) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018672 / 0.037411 (-0.018739) | 0.060235 / 0.014526 (0.045709) | 0.074036 / 0.176557 (-0.102521) | 0.119578 / 0.737135 (-0.617557) | 0.073605 / 0.296338 (-0.222734) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.286474 / 0.215209 (0.071264) | 2.779427 / 2.077655 (0.701772) | 1.478746 / 1.504120 (-0.025373) | 1.362692 / 1.541195 (-0.178503) | 1.388194 / 1.468490 (-0.080296) | 0.560707 / 4.584777 (-4.024070) | 2.352846 / 3.745712 (-1.392866) | 2.784400 / 5.269862 (-2.485461) | 1.775642 / 4.565676 (-2.790035) | 0.062324 / 0.424275 (-0.361951) | 0.004938 / 0.007607 (-0.002669) | 0.334149 / 0.226044 (0.108105) | 3.319446 / 2.268929 (1.050517) | 1.810369 / 55.444624 (-53.634255) | 1.559462 / 6.876477 (-5.317014) | 1.611199 / 2.142072 (-0.530873) | 0.655984 / 4.805227 (-4.149244) | 0.118508 / 6.500664 (-6.382156) | 0.043661 / 0.075469 (-0.031808) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.935046 / 1.841788 (-0.906742) | 11.413501 / 8.074308 (3.339192) | 10.392314 / 10.191392 (0.200922) | 0.131507 / 0.680424 (-0.548917) | 0.014827 / 0.534201 (-0.519374) | 0.289069 / 0.579283 (-0.290214) | 0.268288 / 0.434364 (-0.166076) | 0.326843 / 0.540337 (-0.213495) | 0.441283 / 1.386936 (-0.945653) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005375 / 0.011353 (-0.005978) | 0.003549 / 0.011008 (-0.007459) | 0.048996 / 0.038508 (0.010488) | 0.051408 / 0.023109 (0.028298) | 0.272265 / 0.275898 (-0.003633) | 0.293228 / 0.323480 (-0.030252) | 0.004147 / 0.007986 (-0.003839) | 0.002673 / 0.004328 (-0.001655) | 0.048116 / 0.004250 (0.043865) | 0.039926 / 0.037052 (0.002874) | 0.276987 / 0.258489 (0.018498) | 0.302955 / 0.293841 (0.009115) | 0.029488 / 0.128546 (-0.099058) | 0.010797 / 0.075646 (-0.064849) | 0.057552 / 0.419271 (-0.361720) | 0.032827 / 0.043533 (-0.010706) | 0.270888 / 0.255139 (0.015749) | 0.289136 / 0.283200 (0.005937) | 0.018815 / 0.141683 (-0.122868) | 1.148624 / 1.452155 (-0.303530) | 1.191184 / 1.492716 (-0.301532) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.091712 / 0.018006 (0.073706) | 0.311198 / 0.000490 (0.310708) | 0.000226 / 0.000200 (0.000026) | 0.000049 / 0.000054 (-0.000005) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022097 / 0.037411 (-0.015314) | 0.070641 / 0.014526 (0.056116) | 0.080084 / 0.176557 (-0.096472) | 0.118998 / 0.737135 (-0.618137) | 0.081827 / 0.296338 (-0.214512) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.298599 / 0.215209 (0.083390) | 2.884759 / 2.077655 (0.807105) | 1.630794 / 1.504120 (0.126674) | 1.454309 / 1.541195 (-0.086886) | 1.466795 / 1.468490 (-0.001695) | 0.565405 / 4.584777 (-4.019372) | 2.460883 / 3.745712 (-1.284829) | 2.764193 / 5.269862 (-2.505668) | 1.734270 / 4.565676 (-2.831407) | 0.063408 / 0.424275 (-0.360867) | 0.004887 / 0.007607 (-0.002720) | 0.347762 / 0.226044 (0.121717) | 3.458385 / 2.268929 (1.189457) | 1.965434 / 55.444624 (-53.479190) | 1.671047 / 6.876477 (-5.205430) | 1.665642 / 2.142072 (-0.476430) | 0.640665 / 4.805227 (-4.164562) | 0.116025 / 6.500664 (-6.384639) | 0.040147 / 0.075469 (-0.035322) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.982194 / 1.841788 (-0.859593) | 11.983487 / 8.074308 (3.909179) | 10.660605 / 10.191392 (0.469213) | 0.140647 / 0.680424 (-0.539777) | 0.015870 / 0.534201 (-0.518331) | 0.287032 / 0.579283 (-0.292251) | 0.276629 / 0.434364 (-0.157735) | 0.331171 / 0.540337 (-0.209166) | 0.575346 / 1.386936 (-0.811590) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#56433c2f6a42d5fcc5acb46c6275911c29afc371 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005014 / 0.011353 (-0.006339) | 0.003434 / 0.011008 (-0.007574) | 0.063283 / 0.038508 (0.024775) | 0.048068 / 0.023109 (0.024959) | 0.239521 / 0.275898 (-0.036377) | 0.265294 / 0.323480 (-0.058186) | 0.003790 / 0.007986 (-0.004196) | 0.002577 / 0.004328 (-0.001751) | 0.048618 / 0.004250 (0.044368) | 0.037427 / 0.037052 (0.000375) | 0.245263 / 0.258489 (-0.013226) | 0.276618 / 0.293841 (-0.017223) | 0.026615 / 0.128546 (-0.101931) | 0.010378 / 0.075646 (-0.065268) | 0.205670 / 0.419271 (-0.213601) | 0.035076 / 0.043533 (-0.008457) | 0.245062 / 0.255139 (-0.010077) | 0.264584 / 0.283200 (-0.018616) | 0.017760 / 0.141683 (-0.123922) | 1.148061 / 1.452155 (-0.304094) | 1.192762 / 1.492716 (-0.299955) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090870 / 0.018006 (0.072864) | 0.305458 / 0.000490 (0.304968) | 0.000207 / 0.000200 (0.000007) | 0.000052 / 0.000054 (-0.000003) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018597 / 0.037411 (-0.018814) | 0.060349 / 0.014526 (0.045823) | 0.074854 / 0.176557 (-0.101702) | 0.123243 / 0.737135 (-0.613892) | 0.075843 / 0.296338 (-0.220496) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.275855 / 0.215209 (0.060645) | 2.723965 / 2.077655 (0.646311) | 1.436010 / 1.504120 (-0.068110) | 1.323495 / 1.541195 (-0.217700) | 1.356234 / 1.468490 (-0.112256) | 0.564388 / 4.584777 (-4.020389) | 2.390180 / 3.745712 (-1.355532) | 2.782863 / 5.269862 (-2.486998) | 1.765048 / 4.565676 (-2.800628) | 0.062680 / 0.424275 (-0.361595) | 0.004929 / 0.007607 (-0.002678) | 0.337578 / 0.226044 (0.111533) | 3.316780 / 2.268929 (1.047851) | 1.803829 / 55.444624 (-53.640795) | 1.524585 / 6.876477 (-5.351891) | 1.549695 / 2.142072 (-0.592377) | 0.638053 / 4.805227 (-4.167174) | 0.116983 / 6.500664 (-6.383681) | 0.042251 / 0.075469 (-0.033218) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.946978 / 1.841788 (-0.894810) | 11.809483 / 8.074308 (3.735175) | 10.459974 / 10.191392 (0.268582) | 0.130015 / 0.680424 (-0.550409) | 0.013843 / 0.534201 (-0.520358) | 0.286972 / 0.579283 (-0.292311) | 0.268904 / 0.434364 (-0.165460) | 0.325591 / 0.540337 (-0.214746) | 0.439233 / 1.386936 (-0.947703) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005804 / 0.011353 (-0.005549) | 0.003431 / 0.011008 (-0.007577) | 0.049041 / 0.038508 (0.010533) | 0.054758 / 0.023109 (0.031649) | 0.262330 / 0.275898 (-0.013568) | 0.288872 / 0.323480 (-0.034608) | 0.004016 / 0.007986 (-0.003970) | 0.002606 / 0.004328 (-0.001722) | 0.047878 / 0.004250 (0.043628) | 0.045066 / 0.037052 (0.008013) | 0.266310 / 0.258489 (0.007820) | 0.290072 / 0.293841 (-0.003768) | 0.028738 / 0.128546 (-0.099809) | 0.010667 / 0.075646 (-0.064979) | 0.057300 / 0.419271 (-0.361972) | 0.032715 / 0.043533 (-0.010818) | 0.264043 / 0.255139 (0.008904) | 0.278652 / 0.283200 (-0.004547) | 0.017873 / 0.141683 (-0.123810) | 1.125981 / 1.452155 (-0.326174) | 1.168548 / 1.492716 (-0.324168) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090997 / 0.018006 (0.072991) | 0.300807 / 0.000490 (0.300317) | 0.000223 / 0.000200 (0.000023) | 0.000043 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.021510 / 0.037411 (-0.015901) | 0.068251 / 0.014526 (0.053725) | 0.082073 / 0.176557 (-0.094484) | 0.120071 / 0.737135 (-0.617064) | 0.082245 / 0.296338 (-0.214093) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.290601 / 0.215209 (0.075392) | 2.871855 / 2.077655 (0.794200) | 1.558239 / 1.504120 (0.054119) | 1.447767 / 1.541195 (-0.093427) | 1.446851 / 1.468490 (-0.021639) | 0.573990 / 4.584777 (-4.010787) | 2.439859 / 3.745712 (-1.305853) | 2.795899 / 5.269862 (-2.473963) | 1.746751 / 4.565676 (-2.818926) | 0.062100 / 0.424275 (-0.362175) | 0.004948 / 0.007607 (-0.002659) | 0.344281 / 0.226044 (0.118236) | 3.427499 / 2.268929 (1.158570) | 1.940348 / 55.444624 (-53.504276) | 1.660926 / 6.876477 (-5.215551) | 1.669485 / 2.142072 (-0.472588) | 0.634034 / 4.805227 (-4.171193) | 0.114748 / 6.500664 (-6.385916) | 0.041617 / 0.075469 (-0.033852) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.966411 / 1.841788 (-0.875376) | 12.040753 / 8.074308 (3.966445) | 10.506542 / 10.191392 (0.315150) | 0.129659 / 0.680424 (-0.550764) | 0.015691 / 0.534201 (-0.518510) | 0.286911 / 0.579283 (-0.292372) | 0.273588 / 0.434364 (-0.160776) | 0.333642 / 0.540337 (-0.206695) | 0.568550 / 1.386936 (-0.818386) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#b38ed4705263df92ae06d89baab0932ae10065e0 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005023 / 0.011353 (-0.006330) | 0.003492 / 0.011008 (-0.007516) | 0.062808 / 0.038508 (0.024300) | 0.051649 / 0.023109 (0.028540) | 0.246871 / 0.275898 (-0.029027) | 0.273430 / 0.323480 (-0.050050) | 0.003851 / 0.007986 (-0.004135) | 0.002643 / 0.004328 (-0.001686) | 0.048499 / 0.004250 (0.044248) | 0.037713 / 0.037052 (0.000661) | 0.256431 / 0.258489 (-0.002058) | 0.306956 / 0.293841 (0.013116) | 0.027116 / 0.128546 (-0.101430) | 0.010769 / 0.075646 (-0.064877) | 0.206218 / 0.419271 (-0.213053) | 0.035592 / 0.043533 (-0.007941) | 0.249629 / 0.255139 (-0.005510) | 0.268438 / 0.283200 (-0.014761) | 0.018557 / 0.141683 (-0.123125) | 1.123988 / 1.452155 (-0.328167) | 1.158196 / 1.492716 (-0.334520) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090221 / 0.018006 (0.072215) | 0.300892 / 0.000490 (0.300402) | 0.000209 / 0.000200 (0.000009) | 0.000046 / 0.000054 (-0.000008) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018305 / 0.037411 (-0.019106) | 0.060294 / 0.014526 (0.045769) | 0.073330 / 0.176557 (-0.103227) | 0.119620 / 0.737135 (-0.617515) | 0.074611 / 0.296338 (-0.221727) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285347 / 0.215209 (0.070138) | 2.795144 / 2.077655 (0.717490) | 1.468321 / 1.504120 (-0.035799) | 1.343848 / 1.541195 (-0.197347) | 1.388998 / 1.468490 (-0.079492) | 0.559609 / 4.584777 (-4.025168) | 2.355056 / 3.745712 (-1.390656) | 2.798763 / 5.269862 (-2.471099) | 1.764371 / 4.565676 (-2.801305) | 0.062563 / 0.424275 (-0.361712) | 0.005101 / 0.007607 (-0.002506) | 0.339205 / 0.226044 (0.113161) | 3.336729 / 2.268929 (1.067800) | 1.801987 / 55.444624 (-53.642637) | 1.526720 / 6.876477 (-5.349757) | 1.539324 / 2.142072 (-0.602749) | 0.635805 / 4.805227 (-4.169422) | 0.138762 / 6.500664 (-6.361902) | 0.042092 / 0.075469 (-0.033377) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.928755 / 1.841788 (-0.913032) | 11.468224 / 8.074308 (3.393916) | 10.784568 / 10.191392 (0.593176) | 0.130332 / 0.680424 (-0.550092) | 0.014203 / 0.534201 (-0.519998) | 0.287125 / 0.579283 (-0.292158) | 0.263921 / 0.434364 (-0.170443) | 0.327824 / 0.540337 (-0.212513) | 0.434679 / 1.386936 (-0.952257) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005194 / 0.011353 (-0.006159) | 0.003411 / 0.011008 (-0.007598) | 0.050122 / 0.038508 (0.011614) | 0.049378 / 0.023109 (0.026269) | 0.272980 / 0.275898 (-0.002918) | 0.298047 / 0.323480 (-0.025433) | 0.003945 / 0.007986 (-0.004041) | 0.002633 / 0.004328 (-0.001696) | 0.048935 / 0.004250 (0.044685) | 0.040157 / 0.037052 (0.003104) | 0.277056 / 0.258489 (0.018567) | 0.299824 / 0.293841 (0.005983) | 0.028997 / 0.128546 (-0.099550) | 0.010868 / 0.075646 (-0.064779) | 0.057895 / 0.419271 (-0.361377) | 0.033522 / 0.043533 (-0.010010) | 0.274912 / 0.255139 (0.019773) | 0.288902 / 0.283200 (0.005702) | 0.018016 / 0.141683 (-0.123667) | 1.116669 / 1.452155 (-0.335485) | 1.175007 / 1.492716 (-0.317710) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.090169 / 0.018006 (0.072163) | 0.310577 / 0.000490 (0.310087) | 0.000215 / 0.000200 (0.000015) | 0.000048 / 0.000054 (-0.000006) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.020448 / 0.037411 (-0.016963) | 0.068216 / 0.014526 (0.053690) | 0.081798 / 0.176557 (-0.094759) | 0.119151 / 0.737135 (-0.617985) | 0.085197 / 0.296338 (-0.211142) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294957 / 0.215209 (0.079748) | 2.874065 / 2.077655 (0.796410) | 1.590963 / 1.504120 (0.086843) | 1.459596 / 1.541195 (-0.081599) | 1.467931 / 1.468490 (-0.000559) | 0.562832 / 4.584777 (-4.021944) | 2.426384 / 3.745712 (-1.319328) | 2.767749 / 5.269862 (-2.502112) | 1.746702 / 4.565676 (-2.818975) | 0.063353 / 0.424275 (-0.360922) | 0.005073 / 0.007607 (-0.002534) | 0.348258 / 0.226044 (0.122213) | 3.390351 / 2.268929 (1.121423) | 1.950092 / 55.444624 (-53.494532) | 1.671227 / 6.876477 (-5.205250) | 1.683349 / 2.142072 (-0.458723) | 0.637613 / 4.805227 (-4.167614) | 0.115172 / 6.500664 (-6.385492) | 0.040202 / 0.075469 (-0.035267) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963085 / 1.841788 (-0.878702) | 11.895384 / 8.074308 (3.821076) | 10.609906 / 10.191392 (0.418513) | 0.130865 / 0.680424 (-0.549559) | 0.016020 / 0.534201 (-0.518181) | 0.287540 / 0.579283 (-0.291743) | 0.278204 / 0.434364 (-0.156160) | 0.326007 / 0.540337 (-0.214330) | 0.590881 / 1.386936 (-0.796055) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c291e330a7d460ff09d867377de1d4c53fd5394c \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005266 / 0.011353 (-0.006087) | 0.003751 / 0.011008 (-0.007257) | 0.063835 / 0.038508 (0.025327) | 0.052688 / 0.023109 (0.029579) | 0.261957 / 0.275898 (-0.013941) | 0.284264 / 0.323480 (-0.039216) | 0.003958 / 0.007986 (-0.004027) | 0.002696 / 0.004328 (-0.001633) | 0.052791 / 0.004250 (0.048540) | 0.038294 / 0.037052 (0.001242) | 0.259488 / 0.258489 (0.000999) | 0.298368 / 0.293841 (0.004528) | 0.028309 / 0.128546 (-0.100237) | 0.010819 / 0.075646 (-0.064827) | 0.208221 / 0.419271 (-0.211050) | 0.036373 / 0.043533 (-0.007160) | 0.257000 / 0.255139 (0.001861) | 0.273108 / 0.283200 (-0.010092) | 0.019674 / 0.141683 (-0.122009) | 1.119196 / 1.452155 (-0.332958) | 1.161613 / 1.492716 (-0.331104) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093408 / 0.018006 (0.075401) | 0.302278 / 0.000490 (0.301788) | 0.000212 / 0.000200 (0.000012) | 0.000074 / 0.000054 (0.000020) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.019417 / 0.037411 (-0.017995) | 0.060847 / 0.014526 (0.046321) | 0.075399 / 0.176557 (-0.101158) | 0.121233 / 0.737135 (-0.615902) | 0.076916 / 0.296338 (-0.219422) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.281265 / 0.215209 (0.066056) | 2.651726 / 2.077655 (0.574072) | 1.457726 / 1.504120 (-0.046394) | 1.339250 / 1.541195 (-0.201945) | 1.398529 / 1.468490 (-0.069961) | 0.566574 / 4.584777 (-4.018203) | 2.431576 / 3.745712 (-1.314136) | 2.845884 / 5.269862 (-2.423977) | 1.798051 / 4.565676 (-2.767626) | 0.063619 / 0.424275 (-0.360656) | 0.005286 / 0.007607 (-0.002321) | 0.332834 / 0.226044 (0.106789) | 3.293222 / 2.268929 (1.024293) | 1.837810 / 55.444624 (-53.606815) | 1.568511 / 6.876477 (-5.307966) | 1.627518 / 2.142072 (-0.514555) | 0.643520 / 4.805227 (-4.161708) | 0.118482 / 6.500664 (-6.382182) | 0.049563 / 0.075469 (-0.025906) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.947767 / 1.841788 (-0.894021) | 11.994999 / 8.074308 (3.920691) | 10.662651 / 10.191392 (0.471259) | 0.142070 / 0.680424 (-0.538354) | 0.014276 / 0.534201 (-0.519925) | 0.288455 / 0.579283 (-0.290828) | 0.266335 / 0.434364 (-0.168029) | 0.328455 / 0.540337 (-0.211883) | 0.440740 / 1.386936 (-0.946196) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005636 / 0.011353 (-0.005717) | 0.003664 / 0.011008 (-0.007344) | 0.050340 / 0.038508 (0.011832) | 0.062795 / 0.023109 (0.039685) | 0.280874 / 0.275898 (0.004976) | 0.314056 / 0.323480 (-0.009424) | 0.004089 / 0.007986 (-0.003897) | 0.002780 / 0.004328 (-0.001548) | 0.048468 / 0.004250 (0.044218) | 0.042924 / 0.037052 (0.005871) | 0.281381 / 0.258489 (0.022892) | 0.308232 / 0.293841 (0.014391) | 0.030294 / 0.128546 (-0.098252) | 0.011098 / 0.075646 (-0.064548) | 0.057535 / 0.419271 (-0.361736) | 0.034217 / 0.043533 (-0.009316) | 0.283022 / 0.255139 (0.027883) | 0.298425 / 0.283200 (0.015225) | 0.019285 / 0.141683 (-0.122398) | 1.117722 / 1.452155 (-0.334433) | 1.185878 / 1.492716 (-0.306839) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094915 / 0.018006 (0.076909) | 0.311782 / 0.000490 (0.311293) | 0.000217 / 0.000200 (0.000017) | 0.000054 / 0.000054 (-0.000001) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022652 / 0.037411 (-0.014759) | 0.069766 / 0.014526 (0.055240) | 0.084495 / 0.176557 (-0.092061) | 0.121295 / 0.737135 (-0.615841) | 0.082447 / 0.296338 (-0.213891) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.294286 / 0.215209 (0.079077) | 2.863694 / 2.077655 (0.786039) | 1.578338 / 1.504120 (0.074219) | 1.478737 / 1.541195 (-0.062458) | 1.528569 / 1.468490 (0.060079) | 0.576944 / 4.584777 (-4.007833) | 2.438730 / 3.745712 (-1.306982) | 2.956138 / 5.269862 (-2.313723) | 1.844484 / 4.565676 (-2.721192) | 0.065980 / 0.424275 (-0.358295) | 0.004998 / 0.007607 (-0.002609) | 0.352063 / 0.226044 (0.126019) | 3.456355 / 2.268929 (1.187426) | 1.971582 / 55.444624 (-53.473042) | 1.684536 / 6.876477 (-5.191940) | 1.726823 / 2.142072 (-0.415250) | 0.660235 / 4.805227 (-4.144992) | 0.119029 / 6.500664 (-6.381635) | 0.042497 / 0.075469 (-0.032972) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.971817 / 1.841788 (-0.869970) | 12.900324 / 8.074308 (4.826015) | 10.957495 / 10.191392 (0.766103) | 0.133705 / 0.680424 (-0.546718) | 0.015669 / 0.534201 (-0.518532) | 0.287340 / 0.579283 (-0.291943) | 0.280380 / 0.434364 (-0.153984) | 0.330369 / 0.540337 (-0.209969) | 0.581793 / 1.386936 (-0.805143) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#c2af5efae1985499d6a0a1b6ab4120337eebf776 \"CML watermark\")\n", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005038 / 0.011353 (-0.006315) | 0.003737 / 0.011008 (-0.007272) | 0.063118 / 0.038508 (0.024610) | 0.050120 / 0.023109 (0.027011) | 0.240722 / 0.275898 (-0.035176) | 0.263128 / 0.323480 (-0.060352) | 0.003839 / 0.007986 (-0.004147) | 0.002718 / 0.004328 (-0.001610) | 0.047869 / 0.004250 (0.043618) | 0.038092 / 0.037052 (0.001040) | 0.245759 / 0.258489 (-0.012730) | 0.277728 / 0.293841 (-0.016113) | 0.027466 / 0.128546 (-0.101081) | 0.011767 / 0.075646 (-0.063879) | 0.205505 / 0.419271 (-0.213766) | 0.035429 / 0.043533 (-0.008104) | 0.241665 / 0.255139 (-0.013474) | 0.260908 / 0.283200 (-0.022292) | 0.017133 / 0.141683 (-0.124550) | 1.107725 / 1.452155 (-0.344429) | 1.169707 / 1.492716 (-0.323009) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094112 / 0.018006 (0.076106) | 0.302596 / 0.000490 (0.302106) | 0.000237 / 0.000200 (0.000037) | 0.000041 / 0.000054 (-0.000013) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.017923 / 0.037411 (-0.019488) | 0.060356 / 0.014526 (0.045830) | 0.073708 / 0.176557 (-0.102849) | 0.119952 / 0.737135 (-0.617183) | 0.075350 / 0.296338 (-0.220989) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.289253 / 0.215209 (0.074044) | 2.800772 / 2.077655 (0.723117) | 1.538368 / 1.504120 (0.034248) | 1.401037 / 1.541195 (-0.140158) | 1.427170 / 1.468490 (-0.041320) | 0.560497 / 4.584777 (-4.024280) | 2.417844 / 3.745712 (-1.327868) | 2.798377 / 5.269862 (-2.471484) | 1.756517 / 4.565676 (-2.809160) | 0.063897 / 0.424275 (-0.360378) | 0.005323 / 0.007607 (-0.002284) | 0.339881 / 0.226044 (0.113836) | 3.354858 / 2.268929 (1.085929) | 1.877233 / 55.444624 (-53.567391) | 1.578713 / 6.876477 (-5.297764) | 1.631898 / 2.142072 (-0.510175) | 0.640303 / 4.805227 (-4.164924) | 0.116731 / 6.500664 (-6.383933) | 0.041978 / 0.075469 (-0.033491) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.963259 / 1.841788 (-0.878529) | 11.983646 / 8.074308 (3.909338) | 10.561596 / 10.191392 (0.370204) | 0.135863 / 0.680424 (-0.544561) | 0.015607 / 0.534201 (-0.518594) | 0.295164 / 0.579283 (-0.284119) | 0.283366 / 0.434364 (-0.150998) | 0.341848 / 0.540337 (-0.198489) | 0.448359 / 1.386936 (-0.938577) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005366 / 0.011353 (-0.005987) | 0.003621 / 0.011008 (-0.007387) | 0.048615 / 0.038508 (0.010107) | 0.053950 / 0.023109 (0.030841) | 0.273112 / 0.275898 (-0.002786) | 0.295655 / 0.323480 (-0.027825) | 0.004066 / 0.007986 (-0.003920) | 0.002700 / 0.004328 (-0.001628) | 0.047899 / 0.004250 (0.043648) | 0.041633 / 0.037052 (0.004581) | 0.277760 / 0.258489 (0.019271) | 0.302068 / 0.293841 (0.008227) | 0.028879 / 0.128546 (-0.099668) | 0.010756 / 0.075646 (-0.064891) | 0.057190 / 0.419271 (-0.362082) | 0.032555 / 0.043533 (-0.010978) | 0.272045 / 0.255139 (0.016906) | 0.289330 / 0.283200 (0.006130) | 0.018466 / 0.141683 (-0.123216) | 1.180435 / 1.452155 (-0.271720) | 1.192228 / 1.492716 (-0.300488) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.094871 / 0.018006 (0.076864) | 0.302552 / 0.000490 (0.302062) | 0.000224 / 0.000200 (0.000024) | 0.000044 / 0.000054 (-0.000011) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.022008 / 0.037411 (-0.015403) | 0.068528 / 0.014526 (0.054002) | 0.081735 / 0.176557 (-0.094821) | 0.120990 / 0.737135 (-0.616145) | 0.083155 / 0.296338 (-0.213184) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.305030 / 0.215209 (0.089821) | 3.009812 / 2.077655 (0.932158) | 1.677773 / 1.504120 (0.173654) | 1.552280 / 1.541195 (0.011085) | 1.606248 / 1.468490 (0.137758) | 0.557093 / 4.584777 (-4.027684) | 2.418292 / 3.745712 (-1.327420) | 2.813049 / 5.269862 (-2.456813) | 1.764507 / 4.565676 (-2.801169) | 0.065089 / 0.424275 (-0.359186) | 0.004944 / 0.007607 (-0.002663) | 0.360672 / 0.226044 (0.134628) | 3.525850 / 2.268929 (1.256921) | 2.030091 / 55.444624 (-53.414533) | 1.754669 / 6.876477 (-5.121807) | 1.772673 / 2.142072 (-0.369399) | 0.642904 / 4.805227 (-4.162324) | 0.116018 / 6.500664 (-6.384646) | 0.041308 / 0.075469 (-0.034161) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.986386 / 1.841788 (-0.855401) | 12.291623 / 8.074308 (4.217315) | 10.655932 / 10.191392 (0.464540) | 0.141736 / 0.680424 (-0.538688) | 0.016669 / 0.534201 (-0.517532) | 0.286875 / 0.579283 (-0.292408) | 0.281898 / 0.434364 (-0.152466) | 0.325206 / 0.540337 (-0.215132) | 0.577607 / 1.386936 (-0.809329) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#1cf33502493fb9760ea8cc8e51622bf94d0c9e31 \"CML watermark\")\n", "Alright tests are passing (except one on temp dir cleanup windows but I don't think it's related to this PR ?)\r\n\r\n```\r\nFAILED tests/test_load.py::test_loading_from_the_datasets_hub - NotADirectoryError: [WinError 267] The directory name is invalid: 'C:\\\\Users\\\\RUNNER~1\\\\AppData\\\\Local\\\\Temp\\\\tmpqy3f2ft_\\\\hf-internal-testing___dataset_with_script\\\\default\\\\0.0.0\\\\c240e2be3370bdbd\\\\dataset_with_script-train.arrow'\r\n```", "<details>\n<summary>Show benchmarks</summary>\n\nPyArrow==8.0.0\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005072 / 0.011353 (-0.006281) | 0.003449 / 0.011008 (-0.007559) | 0.062630 / 0.038508 (0.024122) | 0.054276 / 0.023109 (0.031167) | 0.253345 / 0.275898 (-0.022553) | 0.273460 / 0.323480 (-0.050020) | 0.003859 / 0.007986 (-0.004127) | 0.002646 / 0.004328 (-0.001683) | 0.048289 / 0.004250 (0.044038) | 0.037943 / 0.037052 (0.000891) | 0.256569 / 0.258489 (-0.001920) | 0.287809 / 0.293841 (-0.006032) | 0.027675 / 0.128546 (-0.100872) | 0.010554 / 0.075646 (-0.065092) | 0.205157 / 0.419271 (-0.214115) | 0.035464 / 0.043533 (-0.008069) | 0.254300 / 0.255139 (-0.000839) | 0.272907 / 0.283200 (-0.010292) | 0.018146 / 0.141683 (-0.123537) | 1.110528 / 1.452155 (-0.341626) | 1.170156 / 1.492716 (-0.322560) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.093151 / 0.018006 (0.075144) | 0.302087 / 0.000490 (0.301598) | 0.000216 / 0.000200 (0.000016) | 0.000042 / 0.000054 (-0.000012) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.018744 / 0.037411 (-0.018667) | 0.059843 / 0.014526 (0.045317) | 0.073165 / 0.176557 (-0.103391) | 0.120464 / 0.737135 (-0.616671) | 0.074992 / 0.296338 (-0.221347) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.285103 / 0.215209 (0.069894) | 2.820254 / 2.077655 (0.742600) | 1.505336 / 1.504120 (0.001216) | 1.368631 / 1.541195 (-0.172564) | 1.404140 / 1.468490 (-0.064350) | 0.563906 / 4.584777 (-4.020871) | 2.411871 / 3.745712 (-1.333841) | 2.788390 / 5.269862 (-2.481471) | 1.749788 / 4.565676 (-2.815888) | 0.062171 / 0.424275 (-0.362104) | 0.004918 / 0.007607 (-0.002689) | 0.339615 / 0.226044 (0.113571) | 3.337789 / 2.268929 (1.068861) | 1.808445 / 55.444624 (-53.636180) | 1.541015 / 6.876477 (-5.335462) | 1.572389 / 2.142072 (-0.569683) | 0.641739 / 4.805227 (-4.163488) | 0.115844 / 6.500664 (-6.384820) | 0.042504 / 0.075469 (-0.032965) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.942463 / 1.841788 (-0.899325) | 11.602364 / 8.074308 (3.528056) | 10.628921 / 10.191392 (0.437529) | 0.136154 / 0.680424 (-0.544270) | 0.013842 / 0.534201 (-0.520359) | 0.287085 / 0.579283 (-0.292198) | 0.269860 / 0.434364 (-0.164503) | 0.329525 / 0.540337 (-0.210812) | 0.441287 / 1.386936 (-0.945649) |\n\n</details>\nPyArrow==latest\n\n<details>\n<summary>Show updated benchmarks!</summary>\n\n### Benchmark: benchmark_array_xd.json\n\n| metric | read_batch_formatted_as_numpy after write_array2d | read_batch_formatted_as_numpy after write_flattened_sequence | read_batch_formatted_as_numpy after write_nested_sequence | read_batch_unformated after write_array2d | read_batch_unformated after write_flattened_sequence | read_batch_unformated after write_nested_sequence | read_col_formatted_as_numpy after write_array2d | read_col_formatted_as_numpy after write_flattened_sequence | read_col_formatted_as_numpy after write_nested_sequence | read_col_unformated after write_array2d | read_col_unformated after write_flattened_sequence | read_col_unformated after write_nested_sequence | read_formatted_as_numpy after write_array2d | read_formatted_as_numpy after write_flattened_sequence | read_formatted_as_numpy after write_nested_sequence | read_unformated after write_array2d | read_unformated after write_flattened_sequence | read_unformated after write_nested_sequence | write_array2d | write_flattened_sequence | write_nested_sequence |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.005215 / 0.011353 (-0.006138) | 0.003549 / 0.011008 (-0.007460) | 0.049199 / 0.038508 (0.010691) | 0.051655 / 0.023109 (0.028545) | 0.272150 / 0.275898 (-0.003748) | 0.291978 / 0.323480 (-0.031502) | 0.003985 / 0.007986 (-0.004001) | 0.002668 / 0.004328 (-0.001661) | 0.048524 / 0.004250 (0.044274) | 0.039824 / 0.037052 (0.002772) | 0.275566 / 0.258489 (0.017077) | 0.298076 / 0.293841 (0.004235) | 0.029508 / 0.128546 (-0.099038) | 0.010673 / 0.075646 (-0.064973) | 0.057327 / 0.419271 (-0.361944) | 0.032590 / 0.043533 (-0.010943) | 0.273295 / 0.255139 (0.018156) | 0.289127 / 0.283200 (0.005928) | 0.017694 / 0.141683 (-0.123989) | 1.134502 / 1.452155 (-0.317653) | 1.185603 / 1.492716 (-0.307114) |\n\n### Benchmark: benchmark_getitem\\_100B.json\n\n| metric | get_batch_of\\_1024\\_random_rows | get_batch_of\\_1024\\_rows | get_first_row | get_last_row |\n|--------|---|---|---|---|\n| new / old (diff) | 0.098403 / 0.018006 (0.080396) | 0.302735 / 0.000490 (0.302245) | 0.000228 / 0.000200 (0.000028) | 0.000044 / 0.000054 (-0.000010) |\n\n### Benchmark: benchmark_indices_mapping.json\n\n| metric | select | shard | shuffle | sort | train_test_split |\n|--------|---|---|---|---|---|\n| new / old (diff) | 0.025192 / 0.037411 (-0.012219) | 0.068149 / 0.014526 (0.053623) | 0.082220 / 0.176557 (-0.094336) | 0.119491 / 0.737135 (-0.617645) | 0.082484 / 0.296338 (-0.213855) |\n\n### Benchmark: benchmark_iterating.json\n\n| metric | read 5000 | read 50000 | read_batch 50000 10 | read_batch 50000 100 | read_batch 50000 1000 | read_formatted numpy 5000 | read_formatted pandas 5000 | read_formatted tensorflow 5000 | read_formatted torch 5000 | read_formatted_batch numpy 5000 10 | read_formatted_batch numpy 5000 1000 | shuffled read 5000 | shuffled read 50000 | shuffled read_batch 50000 10 | shuffled read_batch 50000 100 | shuffled read_batch 50000 1000 | shuffled read_formatted numpy 5000 | shuffled read_formatted_batch numpy 5000 10 | shuffled read_formatted_batch numpy 5000 1000 |\n|--------|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.295339 / 0.215209 (0.080130) | 2.868411 / 2.077655 (0.790757) | 1.590665 / 1.504120 (0.086545) | 1.465995 / 1.541195 (-0.075200) | 1.489205 / 1.468490 (0.020715) | 0.562503 / 4.584777 (-4.022274) | 2.480100 / 3.745712 (-1.265613) | 2.774216 / 5.269862 (-2.495646) | 1.733129 / 4.565676 (-2.832548) | 0.062698 / 0.424275 (-0.361577) | 0.004910 / 0.007607 (-0.002697) | 0.354766 / 0.226044 (0.128722) | 3.435541 / 2.268929 (1.166613) | 1.953357 / 55.444624 (-53.491267) | 1.673584 / 6.876477 (-5.202893) | 1.677749 / 2.142072 (-0.464323) | 0.632601 / 4.805227 (-4.172626) | 0.114875 / 6.500664 (-6.385789) | 0.040577 / 0.075469 (-0.034892) |\n\n### Benchmark: benchmark_map_filter.json\n\n| metric | filter | map fast-tokenizer batched | map identity | map identity batched | map no-op batched | map no-op batched numpy | map no-op batched pandas | map no-op batched pytorch | map no-op batched tensorflow |\n|--------|---|---|---|---|---|---|---|---|---|\n| new / old (diff) | 0.967003 / 1.841788 (-0.874785) | 11.964490 / 8.074308 (3.890181) | 10.493812 / 10.191392 (0.302420) | 0.132177 / 0.680424 (-0.548247) | 0.015149 / 0.534201 (-0.519052) | 0.289011 / 0.579283 (-0.290272) | 0.285479 / 0.434364 (-0.148885) | 0.327090 / 0.540337 (-0.213248) | 0.571747 / 1.386936 (-0.815189) |\n\n</details>\n</details>\n\n![](https://cml.dev/watermark.png#4c9b4cb7ee4720415261216d72051e2a3320fe41 \"CML watermark\")\n" ]
"2023-11-23T17:31:57Z"
"2023-12-01T17:57:17Z"
"2023-12-01T17:50:59Z"
MEMBER
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/6448.diff", "html_url": "https://github.com/huggingface/datasets/pull/6448", "merged_at": "2023-12-01T17:50:59Z", "patch_url": "https://github.com/huggingface/datasets/pull/6448.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/6448" }
The idea is to make this code work for datasets with scripts if they have a Parquet export ```python ds = load_dataset("squad", trust_remote_code=False) ``` And more generally, it means we use the Parquet export whenever it's possible (it's safer and faster than dataset scripts). I also added a `config.USE_PARQUET_EXPORT` variable to use in the datasets-server parquet conversion job - [x] Needs https://github.com/huggingface/datasets/pull/6429 to be merged first cc @severo I use the /parquet and /info endpoints from datasets-server
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 2, "laugh": 0, "rocket": 0, "total_count": 2, "url": "https://api.github.com/repos/huggingface/datasets/issues/6448/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/6448/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1042
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1042/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1042/comments
https://api.github.com/repos/huggingface/datasets/issues/1042/events
https://github.com/huggingface/datasets/pull/1042
756,097,583
MDExOlB1bGxSZXF1ZXN0NTMxNjk3NDU4
1,042
Add Big Patent dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/46804938?v=4", "events_url": "https://api.github.com/users/mattbui/events{/privacy}", "followers_url": "https://api.github.com/users/mattbui/followers", "following_url": "https://api.github.com/users/mattbui/following{/other_user}", "gists_url": "https://api.github.com/users/mattbui/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mattbui", "id": 46804938, "login": "mattbui", "node_id": "MDQ6VXNlcjQ2ODA0OTM4", "organizations_url": "https://api.github.com/users/mattbui/orgs", "received_events_url": "https://api.github.com/users/mattbui/received_events", "repos_url": "https://api.github.com/users/mattbui/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mattbui/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mattbui/subscriptions", "type": "User", "url": "https://api.github.com/users/mattbui" }
[]
closed
false
null
[]
null
[ "Looks like this PR include changes about many other files than the ones related to big patent.\r\nCould you create another branch and another PR ?", "@lhoestq Just created a new PR here: https://github.com/huggingface/datasets/pull/1087" ]
"2020-12-03T11:07:59Z"
"2020-12-04T04:38:26Z"
"2020-12-04T04:38:26Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1042.diff", "html_url": "https://github.com/huggingface/datasets/pull/1042", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1042.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1042" }
- More info on the dataset: https://evasharma.github.io/bigpatent/ - There's another raw version of the dataset available from tfds. However, they're quite large so I don't have the resources to fully test all the configs for that version yet. We'll try to add it later. - ~Currently, there are no dummy data for this dataset yet as I'm facing some problems with generating them. I'm trying to add them later.~
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1042/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1042/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/4622
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4622/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4622/comments
https://api.github.com/repos/huggingface/datasets/issues/4622/events
https://github.com/huggingface/datasets/pull/4622
1,293,031,939
PR_kwDODunzps46ynmT
4,622
Fix ImageFolder with parameters drop_metadata=True and drop_labels=False (when metadata.jsonl is present)
{ "avatar_url": "https://avatars.githubusercontent.com/u/16348744?v=4", "events_url": "https://api.github.com/users/polinaeterna/events{/privacy}", "followers_url": "https://api.github.com/users/polinaeterna/followers", "following_url": "https://api.github.com/users/polinaeterna/following{/other_user}", "gists_url": "https://api.github.com/users/polinaeterna/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/polinaeterna", "id": 16348744, "login": "polinaeterna", "node_id": "MDQ6VXNlcjE2MzQ4NzQ0", "organizations_url": "https://api.github.com/users/polinaeterna/orgs", "received_events_url": "https://api.github.com/users/polinaeterna/received_events", "repos_url": "https://api.github.com/users/polinaeterna/repos", "site_admin": false, "starred_url": "https://api.github.com/users/polinaeterna/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/polinaeterna/subscriptions", "type": "User", "url": "https://api.github.com/users/polinaeterna" }
[]
closed
false
null
[]
null
[ "_The documentation is not available anymore as the PR was closed or merged._", "@lhoestq @mariosasko pls take a look at https://github.com/huggingface/datasets/pull/4622/commits/769e4c046a5bd5e3a4dbd09cfad1f4cf60677869. I modified `_generate_examples()` according to the same logic too: removed checking if `metadata_files` are not empty for the case when `self.config.drop_metadata=True` because I think we should be aligned with the config and preserve labels if `self.config.drop_labels=False` (the default value) and `self.config.drop_metadata=True` but `metadata_files` are passed. This is an extremely unlikely use case (when `self.config.drop_metadata=True`, but `metadata_files` are passed to `_generate_examples()`) since users usually do not use `_generate_examples()` alone but I believe it would be consistent to have the same behavior as in `_splits_generators()`. This change requires change in tests too if we suppose that we want to preserve labels (default value of `self.config.drop_labels` is False) when `self.config.drop_metadata=True`, even if `metadata_files` are for some reason provided (as it is done in tests). \r\n\r\nwdyt about this change?\r\n", "@lhoestq it wouldn't raise an error if we check `example.keys() == {\"image\", \"label\"}` as test checks only `_generate_examples`, not `encode_example`. and in the implementation of this PR `_generate_examples` would return both `image` and `label` key in the case when `drop_metadata=True` and `drop_labels=False` (default) as it seems that we agreed on that :)", "and on the other hand it would raise an error if `label` column is missing in _generate_examples when `drop_metadata=True` and `drop_labels=False`\r\n\r\nby \"it\" i mean tests :D (`test_generate_examples_with_metadata_that_misses_one_image`, `test_generate_examples_with_metadata_in_wrong_location` and `test_generate_examples_drop_metadata`)", "Perhaps we could make `self.config.drop_metadata = None` and `self.config.drop_labels = None` the defaults to see explicitly what the user wants. This would then turn into `self.config.drop_metadata = False` and `self.config.drop_labels = True` if metadata files are present and `self.config.drop_metadata = True` and `self.config.drop_labels = False` if not. And if the user wants to have the `label` column alongside metadata columns, it can do so by passing `drop_labels = False` explicitely (in that scenario we have to check that the `label` column is not already present in metadata files). And maybe we can also improve the logging messages.\r\n\r\nI find it problematic that the current implementation drops labels in some scenarios even if `self.config.drop_labels = False`, and the user doesn't have control over this behavior.\r\n\r\nLet me know what you think." ]
"2022-07-04T11:23:20Z"
"2022-07-15T14:37:23Z"
"2022-07-15T14:24:24Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/4622.diff", "html_url": "https://github.com/huggingface/datasets/pull/4622", "merged_at": "2022-07-15T14:24:24Z", "patch_url": "https://github.com/huggingface/datasets/pull/4622.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/4622" }
Will fix #4621 ImageFolder raises `KeyError: 'label'` with params `drop_metadata=True` and `drop_labels=False` (if there is at least one metadata.jsonl file a data directory). This happens because metadata files are collected inside `analyze()` function regardless of `drop_metadata` value. And then the following condition doesn't pass: https://github.com/huggingface/datasets/blob/master/src/datasets/packaged_modules/imagefolder/imagefolder.py#L167 So I suggest to double check it inside `analyze()` not to collect metadata files if they are not needed. (and labels too, to be consistent) --- Also, I added a test to check if labels are inferred correctly from directories names in general (because we didn't have it) :)
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4622/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4622/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1510
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1510/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1510/comments
https://api.github.com/repos/huggingface/datasets/issues/1510/events
https://github.com/huggingface/datasets/pull/1510
763,980,369
MDExOlB1bGxSZXF1ZXN0NTM4MjU4NDg3
1,510
Add Dataset for (qa_srl)Question-Answer Driven Semantic Role Labeling
{ "avatar_url": "https://avatars.githubusercontent.com/u/12439573?v=4", "events_url": "https://api.github.com/users/bpatidar/events{/privacy}", "followers_url": "https://api.github.com/users/bpatidar/followers", "following_url": "https://api.github.com/users/bpatidar/following{/other_user}", "gists_url": "https://api.github.com/users/bpatidar/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/bpatidar", "id": 12439573, "login": "bpatidar", "node_id": "MDQ6VXNlcjEyNDM5NTcz", "organizations_url": "https://api.github.com/users/bpatidar/orgs", "received_events_url": "https://api.github.com/users/bpatidar/received_events", "repos_url": "https://api.github.com/users/bpatidar/repos", "site_admin": false, "starred_url": "https://api.github.com/users/bpatidar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/bpatidar/subscriptions", "type": "User", "url": "https://api.github.com/users/bpatidar" }
[]
closed
false
null
[]
null
[ "Hii please follow me", "merging since the CI is fixed on master" ]
"2020-12-12T15:48:11Z"
"2020-12-17T16:06:22Z"
"2020-12-17T16:06:22Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1510.diff", "html_url": "https://github.com/huggingface/datasets/pull/1510", "merged_at": "2020-12-17T16:06:22Z", "patch_url": "https://github.com/huggingface/datasets/pull/1510.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1510" }
- Added tags, Readme file - Added code changes
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1510/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1510/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/1263
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1263/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1263/comments
https://api.github.com/repos/huggingface/datasets/issues/1263/events
https://github.com/huggingface/datasets/pull/1263
758,663,787
MDExOlB1bGxSZXF1ZXN0NTMzNzk5NzU5
1,263
Added kannada news headlines classification dataset.
{ "avatar_url": "https://avatars.githubusercontent.com/u/16264631?v=4", "events_url": "https://api.github.com/users/vrindaprabhu/events{/privacy}", "followers_url": "https://api.github.com/users/vrindaprabhu/followers", "following_url": "https://api.github.com/users/vrindaprabhu/following{/other_user}", "gists_url": "https://api.github.com/users/vrindaprabhu/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/vrindaprabhu", "id": 16264631, "login": "vrindaprabhu", "node_id": "MDQ6VXNlcjE2MjY0NjMx", "organizations_url": "https://api.github.com/users/vrindaprabhu/orgs", "received_events_url": "https://api.github.com/users/vrindaprabhu/received_events", "repos_url": "https://api.github.com/users/vrindaprabhu/repos", "site_admin": false, "starred_url": "https://api.github.com/users/vrindaprabhu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/vrindaprabhu/subscriptions", "type": "User", "url": "https://api.github.com/users/vrindaprabhu" }
[]
closed
false
null
[]
null
[ "Hi! Let me know if any more comments! Will fix it! :-)" ]
"2020-12-07T16:35:37Z"
"2020-12-10T14:30:55Z"
"2020-12-09T18:01:31Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1263.diff", "html_url": "https://github.com/huggingface/datasets/pull/1263", "merged_at": "2020-12-09T18:01:31Z", "patch_url": "https://github.com/huggingface/datasets/pull/1263.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1263" }
Manual Download of a kaggle dataset. Mostly followed process as ms_terms.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1263/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1263/timeline
null
null
true
https://api.github.com/repos/huggingface/datasets/issues/3320
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/3320/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/3320/comments
https://api.github.com/repos/huggingface/datasets/issues/3320/events
https://github.com/huggingface/datasets/issues/3320
1,063,531,992
I_kwDODunzps4_ZDXY
3,320
Can't get tatoeba.rus dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/65535131?v=4", "events_url": "https://api.github.com/users/mmg10/events{/privacy}", "followers_url": "https://api.github.com/users/mmg10/followers", "following_url": "https://api.github.com/users/mmg10/following{/other_user}", "gists_url": "https://api.github.com/users/mmg10/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/mmg10", "id": 65535131, "login": "mmg10", "node_id": "MDQ6VXNlcjY1NTM1MTMx", "organizations_url": "https://api.github.com/users/mmg10/orgs", "received_events_url": "https://api.github.com/users/mmg10/received_events", "repos_url": "https://api.github.com/users/mmg10/repos", "site_admin": false, "starred_url": "https://api.github.com/users/mmg10/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mmg10/subscriptions", "type": "User", "url": "https://api.github.com/users/mmg10" }
[ { "color": "d73a4a", "default": true, "description": "Something isn't working", "id": 1935892857, "name": "bug", "node_id": "MDU6TGFiZWwxOTM1ODkyODU3", "url": "https://api.github.com/repos/huggingface/datasets/labels/bug" } ]
closed
false
null
[]
null
[]
"2021-11-25T12:31:11Z"
"2021-11-26T10:30:29Z"
"2021-11-26T10:30:29Z"
NONE
null
null
null
## Describe the bug It gives an error. > FileNotFoundError: Couldn't find file at https://github.com/facebookresearch/LASER/raw/master/data/tatoeba/v1/tatoeba.rus-eng.rus ## Steps to reproduce the bug ```python data=load_dataset("xtreme","tatoeba.rus", split="validation") ``` ## Solution The library tries to access the **master** branch. In the github repo of facebookresearch, it is in the **main** branch.
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/3320/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/3320/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/4181
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/4181/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/4181/comments
https://api.github.com/repos/huggingface/datasets/issues/4181/events
https://github.com/huggingface/datasets/issues/4181
1,208,194,805
I_kwDODunzps5IA5b1
4,181
Support streaming FLEURS dataset
{ "avatar_url": "https://avatars.githubusercontent.com/u/23423619?v=4", "events_url": "https://api.github.com/users/patrickvonplaten/events{/privacy}", "followers_url": "https://api.github.com/users/patrickvonplaten/followers", "following_url": "https://api.github.com/users/patrickvonplaten/following{/other_user}", "gists_url": "https://api.github.com/users/patrickvonplaten/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/patrickvonplaten", "id": 23423619, "login": "patrickvonplaten", "node_id": "MDQ6VXNlcjIzNDIzNjE5", "organizations_url": "https://api.github.com/users/patrickvonplaten/orgs", "received_events_url": "https://api.github.com/users/patrickvonplaten/received_events", "repos_url": "https://api.github.com/users/patrickvonplaten/repos", "site_admin": false, "starred_url": "https://api.github.com/users/patrickvonplaten/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/patrickvonplaten/subscriptions", "type": "User", "url": "https://api.github.com/users/patrickvonplaten" }
[ { "color": "2edb81", "default": false, "description": "A bug in a dataset script provided in the library", "id": 2067388877, "name": "dataset bug", "node_id": "MDU6TGFiZWwyMDY3Mzg4ODc3", "url": "https://api.github.com/repos/huggingface/datasets/labels/dataset%20bug" } ]
closed
false
null
[]
null
[ "Yes, you just have to use `dl_manager.iter_archive` instead of `dl_manager.download_and_extract`.\r\n\r\nThat's because `download_and_extract` doesn't support TAR archives in streaming mode.", "Tried to make it streamable, but I don't think it's really possible. @lhoestq @polinaeterna maybe you guys can check: \r\nhttps://huggingface.co/datasets/google/fleurs/commit/dcf80160cd77977490a8d32b370c027107f2407b \r\n\r\nreal quick. \r\n\r\nI think the problem is that we cannot ensure that the metadata file is found before the audio. Or is this possible somehow @lhoestq ? ", "@patrickvonplaten I think the metadata file should be found first because the audio files are contained in a folder next to the metadata files (just as in common voice), so the metadata files should be \"on top of the list\" as they are closer to the root in the directories hierarchy ", "@patrickvonplaten but apparently it doesn't... I don't really know why.", "Yeah! Any ideas what could be the reason here? cc @lhoestq ?", "The order of the files is determined when the TAR archive is created, depending on the commands the creator ran.\r\nIf the metadata file is not at the beginning of the file, that makes streaming completely inefficient. In this case the TAR archive needs to be recreated in an appropriate order.", "Actually we could maybe just host the metadata file ourselves and then stream the audio data only. Don't think that this would be a problem for the FLEURS authors (I can ask them :-)) ", "I made a PR to their repo to support streaming (by uploading the metadata file to the Hub). See:\r\n- https://huggingface.co/datasets/google/fleurs/discussions/4", "I'm closing this issue as the PR above has been merged." ]
"2022-04-19T11:09:56Z"
"2022-07-25T11:44:02Z"
"2022-07-25T11:44:02Z"
MEMBER
null
null
null
## Dataset viewer issue for '*name of the dataset*' https://huggingface.co/datasets/google/fleurs ``` Status code: 400 Exception: NotImplementedError Message: Extraction protocol for TAR archives like 'https://storage.googleapis.com/xtreme_translations/FLEURS/af_za.tar.gz' is not implemented in streaming mode. Please use `dl_manager.iter_archive` instead. ``` Am I the one who added this dataset ? Yes Can I fix this somehow in the script? @lhoestq @severo
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/4181/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/4181/timeline
null
completed
false
https://api.github.com/repos/huggingface/datasets/issues/1421
https://api.github.com/repos/huggingface/datasets
https://api.github.com/repos/huggingface/datasets/issues/1421/labels{/name}
https://api.github.com/repos/huggingface/datasets/issues/1421/comments
https://api.github.com/repos/huggingface/datasets/issues/1421/events
https://github.com/huggingface/datasets/pull/1421
760,706,851
MDExOlB1bGxSZXF1ZXN0NTM1NDkzMzU4
1,421
adding fake-news-english-2
{ "avatar_url": "https://avatars.githubusercontent.com/u/15351802?v=4", "events_url": "https://api.github.com/users/MisbahKhan789/events{/privacy}", "followers_url": "https://api.github.com/users/MisbahKhan789/followers", "following_url": "https://api.github.com/users/MisbahKhan789/following{/other_user}", "gists_url": "https://api.github.com/users/MisbahKhan789/gists{/gist_id}", "gravatar_id": "", "html_url": "https://github.com/MisbahKhan789", "id": 15351802, "login": "MisbahKhan789", "node_id": "MDQ6VXNlcjE1MzUxODAy", "organizations_url": "https://api.github.com/users/MisbahKhan789/orgs", "received_events_url": "https://api.github.com/users/MisbahKhan789/received_events", "repos_url": "https://api.github.com/users/MisbahKhan789/repos", "site_admin": false, "starred_url": "https://api.github.com/users/MisbahKhan789/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MisbahKhan789/subscriptions", "type": "User", "url": "https://api.github.com/users/MisbahKhan789" }
[]
closed
false
null
[]
null
[]
"2020-12-09T22:05:13Z"
"2020-12-13T00:48:49Z"
"2020-12-13T00:48:49Z"
CONTRIBUTOR
null
0
{ "diff_url": "https://github.com/huggingface/datasets/pull/1421.diff", "html_url": "https://github.com/huggingface/datasets/pull/1421", "merged_at": null, "patch_url": "https://github.com/huggingface/datasets/pull/1421.patch", "url": "https://api.github.com/repos/huggingface/datasets/pulls/1421" }
{ "+1": 0, "-1": 0, "confused": 0, "eyes": 0, "heart": 0, "hooray": 0, "laugh": 0, "rocket": 0, "total_count": 0, "url": "https://api.github.com/repos/huggingface/datasets/issues/1421/reactions" }
https://api.github.com/repos/huggingface/datasets/issues/1421/timeline
null
null
true