Commit
·
b5b8a87
1
Parent(s):
99fba31
Add missing columns to `blended_skill_talk` (#4437)
Browse filesCommit from https://github.com/huggingface/datasets/commit/5b1087e8b54f9c7ac3b08d4e482b178c84d7e569
- README.md +29 -12
- blended_skill_talk.py +12 -1
- dataset_infos.json +1 -1
README.md
CHANGED
@@ -1,7 +1,23 @@
|
|
1 |
---
|
2 |
-
|
|
|
|
|
|
|
3 |
languages:
|
4 |
- en
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
paperswithcode_id: blended-skill-talk
|
6 |
---
|
7 |
|
@@ -35,7 +51,7 @@ paperswithcode_id: blended-skill-talk
|
|
35 |
|
36 |
- **Homepage:** [https://parl.ai/projects/bst/](https://parl.ai/projects/bst/)
|
37 |
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
38 |
-
- **Paper:** [
|
39 |
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
40 |
- **Size of downloaded dataset files:** 36.34 MB
|
41 |
- **Size of the generated dataset:** 14.38 MB
|
@@ -65,17 +81,16 @@ A dataset of 7k conversations explicitly designed to exhibit multiple conversati
|
|
65 |
|
66 |
An example of 'train' looks as follows.
|
67 |
```
|
68 |
-
This example was too long and was cropped:
|
69 |
-
|
70 |
{
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
|
|
79 |
```
|
80 |
|
81 |
### Data Fields
|
@@ -93,6 +108,8 @@ The data fields are the same among all splits.
|
|
93 |
- `convai2`: a `string` feature.
|
94 |
- `empathetic_dialogues`: a `string` feature.
|
95 |
- `wizard_of_wikipedia`: a `string` feature.
|
|
|
|
|
96 |
|
97 |
### Data Splits
|
98 |
|
|
|
1 |
---
|
2 |
+
annotations_creators:
|
3 |
+
- crowdsourced
|
4 |
+
language_creators:
|
5 |
+
- crowdsourced
|
6 |
languages:
|
7 |
- en
|
8 |
+
licenses:
|
9 |
+
- unknown
|
10 |
+
multilinguality:
|
11 |
+
- monolingual
|
12 |
+
pretty_name: BlendedSkillTalk
|
13 |
+
size_categories:
|
14 |
+
- 1K<n<10K
|
15 |
+
source_datasets:
|
16 |
+
- original
|
17 |
+
task_categories:
|
18 |
+
- conversational
|
19 |
+
task_ids:
|
20 |
+
- dialogue-generation
|
21 |
paperswithcode_id: blended-skill-talk
|
22 |
---
|
23 |
|
|
|
51 |
|
52 |
- **Homepage:** [https://parl.ai/projects/bst/](https://parl.ai/projects/bst/)
|
53 |
- **Repository:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
54 |
+
- **Paper:** [Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills](https://arxiv.org/abs/2004.08449v1)
|
55 |
- **Point of Contact:** [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
|
56 |
- **Size of downloaded dataset files:** 36.34 MB
|
57 |
- **Size of the generated dataset:** 14.38 MB
|
|
|
81 |
|
82 |
An example of 'train' looks as follows.
|
83 |
```
|
|
|
|
|
84 |
{
|
85 |
+
'personas': ['my parents don t really speak english , but i speak italian and english.', 'i have three children.'],
|
86 |
+
'additional_context': 'Backstreet Boys',
|
87 |
+
'previous_utterance': ['Oh, I am a BIG fan of the Backstreet Boys! Have you ever seen them performing live?', "No,I listen to their music a lot, mainly the unbreakable which is the Backstreet Boys' sixth studio album. "],
|
88 |
+
'context': 'wizard_of_wikipedia',
|
89 |
+
'free_messages': ['you are very knowledgeable, do you prefer nsync or bsb?', "haha kids of this days don't know them, i'm 46 and i still enjoying them, my kids only listen k-pop", "italian?haha that's strange, i only talk english and a little spanish "],
|
90 |
+
'guided_messages': ["i don't have a preference, they are both great. All 3 of my kids get annoyed when I listen to them though.", 'Sometimes I sing their songs in Italian, that really annoys them lol.', 'My parents barely speak English, so I was taught both. By the way, what is k-pop?'],
|
91 |
+
'suggestions': {'convai2': ["i don't have a preference , both are pretty . do you have any hobbies ?", "do they the backstreet boys ? that's my favorite group .", 'are your kids interested in music ?'], 'empathetic_dialogues': ['I actually just discovered Imagine Dragons. I love them!', "Hahaha that just goes to show ya, age is just a umber!'", 'That would be hard! Do you now Spanish well?'], 'wizard_of_wikipedia': ['NSYNC Also had Lance Bass and Joey Fatone, sometimes called the Fat One.', 'Yes, there are a few K-Pop songs that I have heard good big in the USA. It is the most popular in South Korea and has Western elements of pop.', 'English, beleive it or not.']},
|
92 |
+
'guided_chosen_suggestions': ['convai2', '', ''],
|
93 |
+
'label_candidates': []}
|
94 |
```
|
95 |
|
96 |
### Data Fields
|
|
|
108 |
- `convai2`: a `string` feature.
|
109 |
- `empathetic_dialogues`: a `string` feature.
|
110 |
- `wizard_of_wikipedia`: a `string` feature.
|
111 |
+
- `guided_chosen_suggestions`: a `list` of `string` features.
|
112 |
+
- `label_candidates`: a `list` of `lists` of `string` features.
|
113 |
|
114 |
### Data Splits
|
115 |
|
blended_skill_talk.py
CHANGED
@@ -47,7 +47,11 @@ class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
|
|
47 |
"context": datasets.Value("string"),
|
48 |
"free_messages": datasets.features.Sequence(datasets.Value("string")),
|
49 |
"guided_messages": datasets.features.Sequence(datasets.Value("string")),
|
50 |
-
"suggestions": datasets.features.Sequence({task: datasets.Value("string") for task in _TASK})
|
|
|
|
|
|
|
|
|
51 |
# These are the features of your dataset like images, labels ...
|
52 |
}
|
53 |
),
|
@@ -119,6 +123,11 @@ class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
|
|
119 |
convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
|
120 |
empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
|
121 |
wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
|
|
|
|
|
|
|
|
|
|
|
122 |
yield id_, {
|
123 |
"personas": personas,
|
124 |
"additional_context": add_context,
|
@@ -131,5 +140,7 @@ class BlendedSkillTalk(datasets.GeneratorBasedBuilder):
|
|
131 |
"empathetic_dialogues": empathetic_suggestions,
|
132 |
"wizard_of_wikipedia": wow_suggestions,
|
133 |
},
|
|
|
|
|
134 |
}
|
135 |
break
|
|
|
47 |
"context": datasets.Value("string"),
|
48 |
"free_messages": datasets.features.Sequence(datasets.Value("string")),
|
49 |
"guided_messages": datasets.features.Sequence(datasets.Value("string")),
|
50 |
+
"suggestions": datasets.features.Sequence({task: datasets.Value("string") for task in _TASK}),
|
51 |
+
"guided_chosen_suggestions": datasets.features.Sequence(datasets.Value("string")),
|
52 |
+
"label_candidates": datasets.features.Sequence(
|
53 |
+
datasets.features.Sequence(datasets.Value("string"))
|
54 |
+
),
|
55 |
# These are the features of your dataset like images, labels ...
|
56 |
}
|
57 |
),
|
|
|
123 |
convai_suggestions.append(suggestions[2 * i + 1]["convai2"])
|
124 |
empathetic_suggestions.append(suggestions[2 * i + 1]["empathetic_dialogues"])
|
125 |
wow_suggestions.append(suggestions[2 * i + 1]["wizard_of_wikipedia"])
|
126 |
+
chosen_suggestions = row["chosen_suggestions"]
|
127 |
+
guided_chosen_suggestions = []
|
128 |
+
for i in range(len(chosen_suggestions) // 2):
|
129 |
+
guided_chosen_suggestions.append(chosen_suggestions[2 * i + 1])
|
130 |
+
label_candidates = row["label_candidates"] if "label_candidates" in row else []
|
131 |
yield id_, {
|
132 |
"personas": personas,
|
133 |
"additional_context": add_context,
|
|
|
140 |
"empathetic_dialogues": empathetic_suggestions,
|
141 |
"wizard_of_wikipedia": wow_suggestions,
|
142 |
},
|
143 |
+
"guided_chosen_suggestions": guided_chosen_suggestions,
|
144 |
+
"label_candidates": label_candidates,
|
145 |
}
|
146 |
break
|
dataset_infos.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"default": {"description": "A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.\n", "citation": "@misc{smith2020evaluating,\n title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},\n author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},\n year={2020},\n eprint={2004.08449},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://parl.ai/projects/bst/", "license": "", "features": {"personas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "additional_context": {"dtype": "string", "id": null, "_type": "Value"}, "previous_utterance": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "free_messages": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "
|
|
|
1 |
+
{"default": {"description": "A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.\n", "citation": "@misc{smith2020evaluating,\n title={Can You Put it All Together: Evaluating Conversational Agents' Ability to Blend Skills},\n author={Eric Michael Smith and Mary Williamson and Kurt Shuster and Jason Weston and Y-Lan Boureau},\n year={2020},\n eprint={2004.08449},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://parl.ai/projects/bst/", "license": "", "features": {"personas": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "additional_context": {"dtype": "string", "id": null, "_type": "Value"}, "previous_utterance": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "free_messages": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "guided_messages": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "suggestions": {"feature": {"convai2": {"dtype": "string", "id": null, "_type": "Value"}, "empathetic_dialogues": {"dtype": "string", "id": null, "_type": "Value"}, "wizard_of_wikipedia": {"dtype": "string", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}, "guided_chosen_suggestions": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "label_candidates": {"feature": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "length": -1, "id": null, "_type": "Sequence"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "blended_skill_talk", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 10831361, "num_examples": 4819, "dataset_name": "blended_skill_talk"}, "validation": {"name": "validation", "num_bytes": 43961658, "num_examples": 1009, "dataset_name": "blended_skill_talk"}, "test": {"name": "test", "num_bytes": 44450102, "num_examples": 980, "dataset_name": "blended_skill_talk"}}, "download_checksums": {"http://parl.ai/downloads/blended_skill_talk/blended_skill_talk.tar.gz": {"num_bytes": 38101408, "checksum": "5fbed0068ee89e2d43b93c3ecb341e784617033efa5e8e911a219d4eda6134a6"}}, "download_size": 38101408, "post_processing_size": null, "dataset_size": 99243121, "size_in_bytes": 137344529}}
|