Datasets:
Tasks:
Table to Text
Modalities:
Text
Languages:
English
Size:
10K - 100K
Tags:
data-to-text
License:
Sebastian Gehrmann
commited on
Commit
•
8c2865c
1
Parent(s):
63abb80
migrate data card.
Browse files- e2e_nlg.json +179 -0
e2e_nlg.json
ADDED
@@ -0,0 +1,179 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"overview": {
|
3 |
+
"where": {
|
4 |
+
"has-leaderboard": "no",
|
5 |
+
"leaderboard-url": "N/A",
|
6 |
+
"leaderboard-description": "N/A",
|
7 |
+
"website": "http://www.macs.hw.ac.uk/InteractionLab/E2E/",
|
8 |
+
"data-url": "https://github.com/tuetschek/e2e-cleaning",
|
9 |
+
"paper-url": "[First data release](https://www.aclweb.org/anthology/W17-5525/), [Detailed E2E Challenge writeup](https://doi.org/10.1016/j.csl.2019.06.009), [Cleaned E2E version](https://www.aclweb.org/anthology/W19-8652/)",
|
10 |
+
"paper-bibtext": "@inproceedings{e2e_cleaned,\n\taddress = {Tokyo, Japan},\n\ttitle = {Semantic {Noise} {Matters} for {Neural} {Natural} {Language} {Generation}},\n\turl = {https://www.aclweb.org/anthology/W19-8652/},\n\tbooktitle = {Proceedings of the 12th {International} {Conference} on {Natural} {Language} {Generation} ({INLG} 2019)},\n\tauthor = {Du\u0161ek, Ond\u0159ej and Howcroft, David M and Rieser, Verena},\n\tyear = {2019},\n\tpages = {421--426},\n}",
|
11 |
+
"contact-name": "Ondrej Dusek",
|
12 |
+
"contact-email": "odusek@ufal.mff.cuni.cz"
|
13 |
+
},
|
14 |
+
"languages": {
|
15 |
+
"is-multilingual": "no",
|
16 |
+
"license": "cc-by-sa-4.0: Creative Commons Attribution Share Alike 4.0 International",
|
17 |
+
"task-other": "N/A",
|
18 |
+
"language-names": [
|
19 |
+
"English"
|
20 |
+
],
|
21 |
+
"language-speakers": "The original dataset was collected using the CrowdFlower (now Appen) platform using native English speakers (self-reported). No demographic information was provided, but the collection was geographically limited to English-speaking countries. ",
|
22 |
+
"language-dialects": "Dialect-specific data was not collected and the language is general British English. ",
|
23 |
+
"intended-use": "The dataset was collected to test neural model on a very well specified realization task. ",
|
24 |
+
"license-other": "N/A",
|
25 |
+
"task": "Data-to-Text",
|
26 |
+
"communicative": "Producing a text informing/recommending a restaurant, given all and only the attributes specified on the input."
|
27 |
+
},
|
28 |
+
"credit": {
|
29 |
+
"organization-type": [
|
30 |
+
"academic"
|
31 |
+
],
|
32 |
+
"organization-names": "Heriot-Watt University",
|
33 |
+
"creators": "Jekaterina Novikova, Ondrej Dusek and Verena Rieser",
|
34 |
+
"funding": "This research received funding from the EPSRC projects DILiGENt (EP/M005429/1) and MaDrIgAL (EP/N017536/1).",
|
35 |
+
"gem-added-by": "Simon Mille wrote the initial data card and Yacine Jernite the data loader. Sebastian Gehrmann migrated the data card to the v2 format and moved the data loader to the hub."
|
36 |
+
},
|
37 |
+
"structure": {
|
38 |
+
"data-fields": "The data is in a CSV format, with the following fields:\n\n* `mr` -- the meaning representation (MR, input)\n* `ref` -- reference, i.e. the corresponding natural-language description (output)\n\nThere are additional fields (`fixed`, `orig_mr`) indicating whether the data was modified in the\ncleaning process and what was the original MR before cleaning, but these aren't used for NLG.\n\nThe MR has a flat structure -- attribute-value pairs are comma separated, with values\nenclosed in brackets (see example above). There are 8 attributes:\n* `name` -- restaurant name\n* `near` -- a landmark close to the restaurant\n* `area` -- location (riverside, city centre)\n* `food` -- food type / cuisine (e.g. Japanese, Indian, English etc.)\n* `eatType` -- restaurant type (restaurant, coffee shop, pub)\n* `priceRange` -- price range (low, medium, high, <\u00a320, \u00a320-30, >\u00a330)\n* `rating` -- customer rating (low, medium, high, 1/5, 3/5, 5/5)\n* `familyFriendly` -- is the restaurant family-friendly (yes/no)\n\nThe same MR is often repeated multiple times with different synonymous references.\n",
|
39 |
+
"structure-splits": "\n| | MRs | Distinct MRs | References |\n|-------------|------|--------------|------------|\n| Training |12,568| 8,362 | 33,525 |\n| Development | 1,484| 1,132 | 4,299 |\n| Test | 1,847| 1,358 | 4,693 |\n| Total |15,899| 10,852 | 42,517 |\n\n\n\u201cDistinct MRs\u201d are MRs that remain distinct even if restaurant/place names (attributes `name`, `near`)\nare delexicalized, i.e., replaced with a placeholder.",
|
40 |
+
"structure-splits-criteria": "The data are divided so that MRs in different splits do not overlap.\n",
|
41 |
+
"structure-description": "n/a",
|
42 |
+
"structure-labels": "The source MRs were generated automatically at random from a set of valid attribute values. The labels were crowdsourced and are natural language",
|
43 |
+
"structure-example": "{\n \"input\": \"name[Alimentum], area[riverside], familyFriendly[yes], near[Burger King]\",\n \"target\": \"Alimentum is a kids friendly place in the riverside area near Burger King.\" \n}",
|
44 |
+
"structure-outlier": "n/a"
|
45 |
+
}
|
46 |
+
},
|
47 |
+
"gem": {
|
48 |
+
"rationale": {
|
49 |
+
"sole-task-dataset": "yes",
|
50 |
+
"sole-language-task-dataset": "no",
|
51 |
+
"distinction-description": "The dataset is much cleaner than comparable datasets, and it is also a relatively easy task, making for a straightforward evaluation. ",
|
52 |
+
"contribution": "The E2E dataset is one of the largest limited-domain NLG datasets and is frequently used as a data-to-text generation benchmark. The E2E Challenge included 20 systems of very different architectures, with system outputs available for download.\n\n",
|
53 |
+
"model-ability": "surface realization."
|
54 |
+
},
|
55 |
+
"curation": {
|
56 |
+
"has-additional-curation": "yes",
|
57 |
+
"modification-types": [],
|
58 |
+
"modification-description": "N/A",
|
59 |
+
"has-additional-splits": "yes",
|
60 |
+
"additional-splits-description": "4 special test sets for E2E were added to the GEM evaluation suite.\n\n1. We created subsets of the training and development sets of ~500 randomly selected inputs each.\n2. We applied input scrambling on a subset of 500 randomly selected test instances; the order of the input properties was randomly reassigned.\n3. For the input size, we created subpopulations based on the number of restaurant properties in the input.\n\n| Input length | Frequency English |\n|---------------|-------------------|\n| 2 | 5 |\n| 3 | 120 |\n| 4 | 389 |\n| 5 | 737 |\n| 6 | 1187 |\n| 7 | 1406 |\n| 8 | 774 |\n| 9 | 73 |\n| 10 | 2 |",
|
61 |
+
"additional-splits-capacicites": "Generalization and robustness"
|
62 |
+
},
|
63 |
+
"starting": {
|
64 |
+
"research-pointers": "n/a",
|
65 |
+
"technical-terms": "n/a"
|
66 |
+
}
|
67 |
+
},
|
68 |
+
"curation": {
|
69 |
+
"original": {
|
70 |
+
"is-aggregated": "no",
|
71 |
+
"aggregated-sources": "N/A",
|
72 |
+
"rationale": "The dataset was collected to showcase/test neural NLG models. It is larger and contains more lexical richness and syntactic variation than previous closed-domain NLG datasets.\n\n",
|
73 |
+
"communicative": "Producing a text informing/recommending a restaurant, given all and only the attributes specified on the input."
|
74 |
+
},
|
75 |
+
"language": {
|
76 |
+
"found": [],
|
77 |
+
"crowdsourced": [
|
78 |
+
"Other crowdworker platform"
|
79 |
+
],
|
80 |
+
"created": "N/A",
|
81 |
+
"machine-generated": "N/A",
|
82 |
+
"validated": "validated by data curator",
|
83 |
+
"is-filtered": "algorithmically",
|
84 |
+
"filtered-criteria": "The cleaned version of the dataset which we are using in GEM was algorithmically filtered. They used regular expressions to match all human-generated references with a more accurate input when attributes were hallucinated or dropped. Additionally, train-test overlap stemming from the transformation was removed. As a result, this data is much cleaner than the original dataset but not perfect (about 20% of instances may have misaligned slots, compared to 40% of the original data. ",
|
85 |
+
"obtained": [
|
86 |
+
"Crowdsourced"
|
87 |
+
],
|
88 |
+
"producers-description": "Human references describing the MRs were collected by crowdsourcing on the CrowdFlower (now Appen) platform,\nwith either textual or pictorial MRs as a baseline. \nThe pictorial MRs were used in 20% of cases -- these yield higher lexical variation but introduce noise.",
|
89 |
+
"topics": "The dataset is focused on descriptions of restaurants.",
|
90 |
+
"pre-processed": "There were basic checks (length, valid characters, repetition).\n"
|
91 |
+
},
|
92 |
+
"annotations": {
|
93 |
+
"origin": "none",
|
94 |
+
"rater-number": "N/A",
|
95 |
+
"rater-qualifications": "N/A",
|
96 |
+
"rater-training-num": "N/A",
|
97 |
+
"rater-test-num": "N/A",
|
98 |
+
"rater-annotation-service-bool": "no",
|
99 |
+
"rater-annotation-service": [],
|
100 |
+
"values": "N/A",
|
101 |
+
"quality-control": [],
|
102 |
+
"quality-control-details": "N/A"
|
103 |
+
},
|
104 |
+
"consent": {
|
105 |
+
"has-consent": "yes",
|
106 |
+
"consent-policy": "Since a crowdsourcing platform was used, the involved raters waived their rights to the data and are aware that the produced annotations can be publicly released. ",
|
107 |
+
"consent-other": "N/A",
|
108 |
+
"no-consent-justification": "N/A"
|
109 |
+
},
|
110 |
+
"pii": {
|
111 |
+
"has-pii": "no PII",
|
112 |
+
"no-pii-justification": "The dataset is artificial and does not contain any description of people.",
|
113 |
+
"is-pii-identified": "N/A",
|
114 |
+
"pii-identified-method": "N/A",
|
115 |
+
"is-pii-replaced": "N/A",
|
116 |
+
"pii-replaced-method": "N/A",
|
117 |
+
"pii-categories": []
|
118 |
+
},
|
119 |
+
"maintenance": {
|
120 |
+
"has-maintenance": "no",
|
121 |
+
"description": "N/A",
|
122 |
+
"contact": "N/A",
|
123 |
+
"contestation-mechanism": "N/A",
|
124 |
+
"contestation-link": "N/A",
|
125 |
+
"contestation-description": "N/A"
|
126 |
+
}
|
127 |
+
},
|
128 |
+
"results": {
|
129 |
+
"results": {
|
130 |
+
"other-metrics-definitions": "N/A",
|
131 |
+
"has-previous-results": "yes",
|
132 |
+
"current-evaluation": "Most previous results, including the shared task results, used the library provided by the dataset creators. The shared task also conducted a human evaluation using the following two criteria: \n\n- `Quality`: When collecting quality ratings, system outputs were presented to crowd workers together with the corresponding meaning representation, which implies that correctness of the NL utterance relative to the MR should also influence this ranking. The crowd workers were asked: \u201cHow do you judge the overall quality of the utterance in terms of its grammatical correctness, fluency, adequacy and other important factors?\u201d\n- `Naturalness`: When collecting naturalness ratings, system outputs were presented to crowd workers without the corresponding meaning representation. The crowd workers were asked: \u201cCould the utterance have been produced by a native speaker?\u201d",
|
133 |
+
"previous-results": "The shared task writeup has in-depth evaluations of systems (https://www.sciencedirect.com/science/article/pii/S0885230819300919) ",
|
134 |
+
"model-abilities": "Surface realization.",
|
135 |
+
"metrics": [
|
136 |
+
"BLEU",
|
137 |
+
"METEOR",
|
138 |
+
"ROUGE"
|
139 |
+
],
|
140 |
+
"original-evaluation": "The official evaluation script combines the MT-Eval and COCO Captioning libraries with the following metrics.\n\n- BLEU\n- CIDEr\n- NIST\n- METEOR\n- ROUGE-L"
|
141 |
+
}
|
142 |
+
},
|
143 |
+
"considerations": {
|
144 |
+
"pii": {
|
145 |
+
"risks-description": "n/a"
|
146 |
+
},
|
147 |
+
"licenses": {
|
148 |
+
"dataset-restrictions-other": "N/A",
|
149 |
+
"data-copyright-other": "N/A",
|
150 |
+
"dataset-restrictions": [
|
151 |
+
"open license - commercial use allowed"
|
152 |
+
],
|
153 |
+
"data-copyright": [
|
154 |
+
"open license - commercial use allowed"
|
155 |
+
]
|
156 |
+
},
|
157 |
+
"limitations": {
|
158 |
+
"data-technical-limitations": "The cleaned version still has data points with hallucinated or omitted attributes. ",
|
159 |
+
"data-unsuited-applications": "The data only pertains to the restaurant domain and the included attributes. A model cannot be expected to handle other domains or attributes. ",
|
160 |
+
"data-discouraged-use": "n/a"
|
161 |
+
}
|
162 |
+
},
|
163 |
+
"context": {
|
164 |
+
"previous": {
|
165 |
+
"is-deployed": "no",
|
166 |
+
"described-risks": "N/A",
|
167 |
+
"changes-from-observation": "N/A"
|
168 |
+
},
|
169 |
+
"underserved": {
|
170 |
+
"helps-underserved": "no",
|
171 |
+
"underserved-description": "N/A"
|
172 |
+
},
|
173 |
+
"biases": {
|
174 |
+
"has-biases": "no",
|
175 |
+
"bias-analyses": "N/A",
|
176 |
+
"speaker-distibution": "The source data is generated randomly, so it should not contain biases. The human references may be biased by the workers' demographic, but that was not investigated upon data collection."
|
177 |
+
}
|
178 |
+
}
|
179 |
+
}
|