parquet-converter commited on
Commit
f349b68
1 Parent(s): 7b0b1a6

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,52 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ftz filter=lfs diff=lfs merge=lfs -text
6
- *.gz filter=lfs diff=lfs merge=lfs -text
7
- *.h5 filter=lfs diff=lfs merge=lfs -text
8
- *.joblib filter=lfs diff=lfs merge=lfs -text
9
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
- *.lz4 filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.npy filter=lfs diff=lfs merge=lfs -text
14
- *.npz filter=lfs diff=lfs merge=lfs -text
15
- *.onnx filter=lfs diff=lfs merge=lfs -text
16
- *.ot filter=lfs diff=lfs merge=lfs -text
17
- *.parquet filter=lfs diff=lfs merge=lfs -text
18
- *.pb filter=lfs diff=lfs merge=lfs -text
19
- *.pickle filter=lfs diff=lfs merge=lfs -text
20
- *.pkl filter=lfs diff=lfs merge=lfs -text
21
- *.pt filter=lfs diff=lfs merge=lfs -text
22
- *.pth filter=lfs diff=lfs merge=lfs -text
23
- *.rar filter=lfs diff=lfs merge=lfs -text
24
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
- *.tar.* filter=lfs diff=lfs merge=lfs -text
26
- *.tflite filter=lfs diff=lfs merge=lfs -text
27
- *.tgz filter=lfs diff=lfs merge=lfs -text
28
- *.wasm filter=lfs diff=lfs merge=lfs -text
29
- *.xz filter=lfs diff=lfs merge=lfs -text
30
- *.zip filter=lfs diff=lfs merge=lfs -text
31
- *.zst filter=lfs diff=lfs merge=lfs -text
32
- *tfevents* filter=lfs diff=lfs merge=lfs -text
33
- # Audio files - uncompressed
34
- *.pcm filter=lfs diff=lfs merge=lfs -text
35
- *.sam filter=lfs diff=lfs merge=lfs -text
36
- *.raw filter=lfs diff=lfs merge=lfs -text
37
- # Audio files - compressed
38
- *.aac filter=lfs diff=lfs merge=lfs -text
39
- *.flac filter=lfs diff=lfs merge=lfs -text
40
- *.mp3 filter=lfs diff=lfs merge=lfs -text
41
- *.ogg filter=lfs diff=lfs merge=lfs -text
42
- *.wav filter=lfs diff=lfs merge=lfs -text
43
- # Image files - uncompressed
44
- *.bmp filter=lfs diff=lfs merge=lfs -text
45
- *.gif filter=lfs diff=lfs merge=lfs -text
46
- *.png filter=lfs diff=lfs merge=lfs -text
47
- *.tiff filter=lfs diff=lfs merge=lfs -text
48
- # Image files - compressed
49
- *.jpg filter=lfs diff=lfs merge=lfs -text
50
- *.jpeg filter=lfs diff=lfs merge=lfs -text
51
- *.webp filter=lfs diff=lfs merge=lfs -text
52
- unpredictable_unique.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,164 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - no-annotation
4
- language_creators:
5
- - found
6
- language:
7
- - en
8
- license:
9
- - apache-2.0
10
- multilinguality:
11
- - monolingual
12
- pretty_name: UnpredicTable-unique
13
- size_categories:
14
- - 100K<n<1M
15
- source_datasets: []
16
- task_categories:
17
- - multiple-choice
18
- - question-answering
19
- - zero-shot-classification
20
- - text2text-generation
21
- - table-question-answering
22
- - text-generation
23
- - text-classification
24
- - tabular-classification
25
- task_ids:
26
- - multiple-choice-qa
27
- - extractive-qa
28
- - open-domain-qa
29
- - closed-domain-qa
30
- - closed-book-qa
31
- - open-book-qa
32
- - language-modeling
33
- - multi-class-classification
34
- - natural-language-inference
35
- - topic-classification
36
- - multi-label-classification
37
- - tabular-multi-class-classification
38
- - tabular-multi-label-classification
39
- ---
40
-
41
-
42
- # Dataset Card for "UnpredicTable-unique" - Dataset of Few-shot Tasks from Tables
43
-
44
- ## Table of Contents
45
- - [Dataset Description](#dataset-description)
46
- - [Dataset Summary](#dataset-summary)
47
- - [Supported Tasks](#supported-tasks-and-leaderboards)
48
- - [Languages](#languages)
49
- - [Dataset Structure](#dataset-structure)
50
- - [Data Instances](#data-instances)
51
- - [Data Fields](#data-instances)
52
- - [Data Splits](#data-instances)
53
- - [Dataset Creation](#dataset-creation)
54
- - [Curation Rationale](#curation-rationale)
55
- - [Source Data](#source-data)
56
- - [Annotations](#annotations)
57
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
58
- - [Considerations for Using the Data](#considerations-for-using-the-data)
59
- - [Social Impact of Dataset](#social-impact-of-dataset)
60
- - [Discussion of Biases](#discussion-of-biases)
61
- - [Other Known Limitations](#other-known-limitations)
62
- - [Additional Information](#additional-information)
63
- - [Dataset Curators](#dataset-curators)
64
- - [Licensing Information](#licensing-information)
65
- - [Citation Information](#citation-information)
66
-
67
- ## Dataset Description
68
-
69
- - **Repository:** https://github.com/AnonCodeShare/few-shot-adaptation
70
- - **Paper:** Few-shot Adaptation Works with UnpredicTable Data
71
-
72
- ### Dataset Summary
73
-
74
- The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance.
75
-
76
- There are several dataset versions available:
77
-
78
- * [UnpredicTable-full](https://huggingface.co/datasets/unpredictable/unpredictable_full): Starting from the initial WTC corpus of 50M tables, we apply our tables-to-tasks procedure to produce our resulting dataset, [UnpredicTable-full](https://huggingface.co/datasets/unpredictable/unpredictable_full), which comprises 413,299 tasks from 23,744 unique websites.
79
-
80
- * [UnpredicTable-unique](https://huggingface.co/datasets/unpredictable/unpredictable_unique): This is the same as [UnpredicTable-full](https://huggingface.co/datasets/unpredictable/unpredictable_full) but filtered to have a maximum of one task per website. [UnpredicTable-unique](https://huggingface.co/datasets/unpredictable/unpredictable_unique) contains exactly 23,744 tasks from 23,744 websites.
81
-
82
- * [UnpredicTable-5k](https://huggingface.co/datasets/unpredictable/unpredictable_5k): This dataset contains 5k random tables from the full dataset.
83
-
84
- * UnpredicTable data subsets based on the website of origin:
85
- * [UnpredicTable-support-google-com](https://huggingface.co/datasets/unpredictable/unpredictable_support-google-com)
86
-
87
- ### Supported Tasks and Leaderboards
88
-
89
- Since the tables come from the web, the distribution of tasks and topics is very broad. The shape of our dataset is very wide, i.e., we have 1000's of tasks, while each task has only a few examples, compared to most current NLP datasets which are very deep, i.e., 10s of tasks with many examples. This implies that our dataset covers a broad range of potential tasks, e.g., multiple-choice, question-answering, table-question-answering, text-classification, etc.
90
-
91
- The intended use of this dataset is to improve few-shot performance by fine-tuning/pre-training on our dataset.
92
-
93
- ### Languages
94
-
95
- English
96
-
97
- ## Dataset Structure
98
-
99
- ### Data Instances
100
-
101
- Each task is represented as a jsonline file and consists of several few-shot examples. Each example is a dictionary containing a field 'task', which identifies the task, followed by an 'input', 'options', and 'output' field. The 'input' field contains several column elements of the same row in the table, while the 'output' field is a target which represents an individual column of the same row. Each task contains several such examples which can be concatenated as a few-shot task. In the case of multiple choice classification, the 'options' field contains the possible classes that a model needs to choose from.
102
-
103
- There are also additional meta-data fields such as 'pageTitle', 'title', 'outputColName', 'url', 'wdcFile'.
104
-
105
- ### Data Fields
106
-
107
- 'task': task identifier
108
-
109
- 'input': column elements of a specific row in the table.
110
-
111
- 'options': for multiple choice classification, it provides the options to choose from.
112
-
113
- 'output': target column element of the same row as input.
114
-
115
- 'pageTitle': the title of the page containing the table.
116
-
117
- 'outputColName': output column name
118
-
119
- 'url': url to the website containing the table
120
-
121
- 'wdcFile': WDC Web Table Corpus file
122
-
123
- ### Data Splits
124
-
125
- The UnpredicTable datasets do not come with additional data splits.
126
-
127
- ## Dataset Creation
128
-
129
- ### Curation Rationale
130
-
131
- Few-shot training on multi-task datasets has been demonstrated to improve language models' few-shot learning (FSL) performance on new tasks, but it is unclear which training tasks lead to effective downstream task adaptation. Few-shot learning datasets are typically produced with expensive human curation, limiting the scale and diversity of the training tasks available to study. As an alternative source of few-shot data, we automatically extract 413,299 tasks from diverse internet tables. We provide this as a research resource to investigate the relationship between training data and few-shot learning.
132
-
133
- ### Source Data
134
-
135
- #### Initial Data Collection and Normalization
136
-
137
- We use internet tables from the English-language Relational Subset of the WDC Web Table Corpus 2015 (WTC). The WTC dataset tables were extracted from the July 2015 Common Crawl web corpus (http://webdatacommons.org/webtables/2015/EnglishStatistics.html). The dataset contains 50,820,165 tables from 323,160 web domains. We then convert the tables into few-shot learning tasks. Please see our publication for more details on the data collection and conversion pipeline.
138
-
139
- #### Who are the source language producers?
140
-
141
- The dataset is extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/).
142
-
143
- ### Personal and Sensitive Information
144
-
145
- The data was extracted from [WDC Web Table Corpora](http://webdatacommons.org/webtables/), which in turn extracted tables from the [Common Crawl](https://commoncrawl.org/). We did not filter the data in any way. Thus any user identities or otherwise sensitive information (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history, etc.) might be contained in our dataset.
146
-
147
- ## Considerations for Using the Data
148
-
149
- ### Social Impact of Dataset
150
-
151
- This dataset is intended for use as a research resource to investigate the relationship between training data and few-shot learning. As such, it contains high- and low-quality data, as well as diverse content that may be untruthful or inappropriate. Without careful investigation, it should not be used for training models that will be deployed for use in decision-critical or user-facing situations.
152
-
153
- ### Discussion of Biases
154
-
155
- Since our dataset contains tables that are scraped from the web, it will also contain many toxic, racist, sexist, and otherwise harmful biases and texts. We have not run any analysis on the biases prevalent in our datasets. Neither have we explicitly filtered the content. This implies that a model trained on our dataset may potentially reflect harmful biases and toxic text that exist in our dataset.
156
-
157
- ### Other Known Limitations
158
-
159
- No additional known limitations.
160
-
161
- ## Additional Information
162
-
163
- ### Licensing Information
164
- Apache 2.0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
unpredictable_unique.jsonl → default/unpredictable_unique-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:685ec65a795f2b57f5572b17e94b2adbcb320f959efb1143acea353f97476051
3
- size 226273291
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2762696311d1b652d45cecda188e98a1bb4e6a07a6be83db9766d34ac03f2bb
3
+ size 46109365
unpredictable_unique.py DELETED
@@ -1,85 +0,0 @@
1
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
2
- #
3
- # Licensed under the Apache License, Version 2.0 (the "License");
4
- # you may not use this file except in compliance with the License.
5
- # You may obtain a copy of the License at
6
- #
7
- # http://www.apache.org/licenses/LICENSE-2.0
8
- #
9
- # Unless required by applicable law or agreed to in writing, software
10
- # distributed under the License is distributed on an "AS IS" BASIS,
11
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
- # See the License for the specific language governing permissions and
13
- # limitations under the License.
14
- """This loads the UnpredicTable-unique dataset."""
15
-
16
- import json
17
- import os
18
- import pandas as pd
19
-
20
- import datasets
21
-
22
-
23
- _DESCRIPTION = """\
24
- The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
25
- """
26
-
27
- _LICENSE = "Apache 2.0"
28
-
29
- _URL = "https://huggingface.co/datasets/unpredictable/unpredictable_unique/resolve/main/unpredictable_unique.jsonl"
30
-
31
- logger = datasets.logging.get_logger(__name__)
32
-
33
-
34
- class UnpredicTable(datasets.GeneratorBasedBuilder):
35
- """
36
- The UnpredicTable dataset consists of web tables formatted as few-shot tasks for fine-tuning language models to improve their few-shot performance. For more details please see the accompanying dataset card.
37
- """
38
-
39
- VERSION = datasets.Version("1.0.0")
40
-
41
- def _info(self):
42
- features = datasets.Features(
43
- {
44
- "task": datasets.Value("string"),
45
- "input": datasets.Value("string"),
46
- "output": datasets.Value("string"),
47
- "options": datasets.Sequence([datasets.Value("string")]),
48
- "pageTitle": datasets.Value("string"),
49
- "outputColName": datasets.Value("string"),
50
- "url": datasets.Value("string"),
51
- "wdcFile": datasets.Value("string")
52
- }
53
- )
54
- return datasets.DatasetInfo(
55
- description=_DESCRIPTION,
56
- features=features,
57
- license=_LICENSE,
58
- )
59
-
60
- def _split_generators(self, dl_manager):
61
- """Returns SplitGenerators."""
62
- data_dir = dl_manager.download_and_extract(_URL)
63
- return [
64
- datasets.SplitGenerator(
65
- name=datasets.Split.TRAIN,
66
- gen_kwargs={"filepath": data_dir},
67
- ),
68
- ]
69
-
70
- def _generate_examples(self, filepath):
71
- """Yields examples."""
72
- with open(filepath, encoding="utf-8") as f:
73
- for i, row in enumerate(f):
74
- data = json.loads(row)
75
- key = f"{data['task']}_{i}"
76
- yield key, {
77
- "task": data["task"],
78
- "input": data["input"],
79
- "output": data["output"],
80
- "options": data["options"],
81
- "pageTitle": data["pageTitle"],
82
- "outputColName": data["outputColName"],
83
- "url": data["url"],
84
- "wdcFile": data["wdcFile"],
85
- }