Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
Size:
< 1K
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
7a3f932
1 Parent(s): 3454f71

Convert dataset to Parquet (#3)

Browse files

- Convert dataset to Parquet (c5bcd15c6149bbcf7b9463abd90e784ba0903b75)
- Delete loading script (bb5ca8e258c4159c674463b75b6632fe99e8b4b5)

README.md CHANGED
@@ -39,10 +39,15 @@ dataset_info:
39
  '9': ShareETA
40
  splits:
41
  - name: train
42
- num_bytes: 19431
43
  num_examples: 328
44
- download_size: 9130264
45
- dataset_size: 19431
 
 
 
 
 
46
  train-eval-index:
47
  - config: default
48
  task: text-classification
 
39
  '9': ShareETA
40
  splits:
41
  - name: train
42
+ num_bytes: 19427
43
  num_examples: 328
44
+ download_size: 11158
45
+ dataset_size: 19427
46
+ configs:
47
+ - config_name: default
48
+ data_files:
49
+ - split: train
50
+ path: data/train-*
51
  train-eval-index:
52
  - config: default
53
  task: text-classification
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37779fd5d527fb351c4b4d7adbc2596476d38c0239a34d0313dae0c16e618f9b
3
+ size 11158
snips_built_in_intents.py DELETED
@@ -1,125 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """Snips built in intents (2016-12-built-in-intents) dataset."""
18
-
19
-
20
- import json
21
-
22
- import datasets
23
- from datasets.tasks import TextClassification
24
-
25
-
26
- _DESCRIPTION = """\
27
- Snips' built in intents dataset was initially used to compare different voice assistants and released as a public dataset hosted at
28
- https://github.com/sonos/nlu-benchmark 2016-12-built-in-intents. The dataset contains 328 utterances over 10 intent classes. The
29
- related paper mentioned on the github page is https://arxiv.org/abs/1805.10190 and a related Medium post is
30
- https://medium.com/snips-ai/benchmarking-natural-language-understanding-systems-d35be6ce568d .
31
- """
32
-
33
- _CITATION = """\
34
- @article{DBLP:journals/corr/abs-1805-10190,
35
- author = {Alice Coucke and
36
- Alaa Saade and
37
- Adrien Ball and
38
- Th{\'{e}}odore Bluche and
39
- Alexandre Caulier and
40
- David Leroy and
41
- Cl{\'{e}}ment Doumouro and
42
- Thibault Gisselbrecht and
43
- Francesco Caltagirone and
44
- Thibaut Lavril and
45
- Ma{\"{e}}l Primet and
46
- Joseph Dureau},
47
- title = {Snips Voice Platform: an embedded Spoken Language Understanding system
48
- for private-by-design voice interfaces},
49
- journal = {CoRR},
50
- volume = {abs/1805.10190},
51
- year = {2018},
52
- url = {http://arxiv.org/abs/1805.10190},
53
- archivePrefix = {arXiv},
54
- eprint = {1805.10190},
55
- timestamp = {Mon, 13 Aug 2018 16:46:59 +0200},
56
- biburl = {https://dblp.org/rec/journals/corr/abs-1805-10190.bib},
57
- bibsource = {dblp computer science bibliography, https://dblp.org}
58
- }
59
- """
60
-
61
- _DOWNLOAD_URL = (
62
- "https://raw.githubusercontent.com/sonos/nlu-benchmark/master/2016-12-built-in-intents/benchmark_data.json"
63
- )
64
-
65
-
66
- class SnipsBuiltInIntents(datasets.GeneratorBasedBuilder):
67
- """Snips built in intents (2016-12-built-in-intents) dataset."""
68
-
69
- def _info(self):
70
- # ToDo: Consider adding an alternate configuration for the entity slots. The default is to only return the intent labels.
71
-
72
- return datasets.DatasetInfo(
73
- description=_DESCRIPTION,
74
- features=datasets.Features(
75
- {
76
- "text": datasets.Value("string"),
77
- "label": datasets.features.ClassLabel(
78
- names=[
79
- "ComparePlaces",
80
- "RequestRide",
81
- "GetWeather",
82
- "SearchPlace",
83
- "GetPlaceDetails",
84
- "ShareCurrentLocation",
85
- "GetTrafficInformation",
86
- "BookRestaurant",
87
- "GetDirections",
88
- "ShareETA",
89
- ]
90
- ),
91
- }
92
- ),
93
- homepage="https://github.com/sonos/nlu-benchmark/tree/master/2016-12-built-in-intents",
94
- citation=_CITATION,
95
- task_templates=[TextClassification(text_column="text", label_column="label")],
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- # Note: The source dataset doesn't have a train-test split.
100
- # ToDo: Consider splitting the data into train-test sets and re-hosting.
101
- samples_path = dl_manager.download_and_extract(_DOWNLOAD_URL)
102
- return [
103
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": samples_path}),
104
- ]
105
-
106
- def _generate_examples(self, filepath):
107
- """Snips built in intent examples."""
108
- num_examples = 0
109
-
110
- with open(filepath, encoding="utf-8") as file_obj:
111
- snips_dict = json.load(file_obj)
112
- domains = snips_dict["domains"]
113
-
114
- for domain_dict in domains:
115
- intents = domain_dict["intents"]
116
-
117
- for intent_dict in intents:
118
- label = intent_dict["benchmark"]["Snips"]["original_intent_name"]
119
- queries = intent_dict["queries"]
120
-
121
- for query_dict in queries:
122
- query_text = query_dict["text"]
123
-
124
- yield num_examples, {"text": query_text, "label": label}
125
- num_examples += 1 # Explicitly keep track of the number of examples.