system HF staff commited on
Commit
9b97275
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. dataset_infos.json +1 -0
  3. dummy/1.0.0/dummy_data.zip +3 -0
  4. scicite.py +153 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "\nThis is a dataset for classifying citation intents in academic papers.\nThe main citation intent label for each Json object is specified with the label\nkey while the citation context is specified in with a context key. Example:\n{\n 'string': 'In chacma baboons, male-infant relationships can be linked to both\n formation of friendships and paternity success [30,31].'\n 'sectionName': 'Introduction',\n 'label': 'background',\n 'citingPaperId': '7a6b2d4b405439',\n 'citedPaperId': '9d1abadc55b5e0',\n ...\n }\nYou may obtain the full information about the paper using the provided paper ids\nwith the Semantic Scholar API (https://api.semanticscholar.org/).\nThe labels are:\nMethod, Background, Result\n", "citation": "\n@InProceedings{Cohan2019Structural,\n author={Arman Cohan and Waleed Ammar and Madeleine Van Zuylen and Field Cady},\n title={Structural Scaffolds for Citation Intent Classification in Scientific Publications},\n booktitle=\"NAACL\",\n year=\"2019\"\n}\n", "homepage": "https://github.com/allenai/scicite", "license": "", "features": {"string": {"dtype": "string", "id": null, "_type": "Value"}, "sectionName": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["method", "background", "result"], "names_file": null, "id": null, "_type": "ClassLabel"}, "citingPaperId": {"dtype": "string", "id": null, "_type": "Value"}, "citedPaperId": {"dtype": "string", "id": null, "_type": "Value"}, "excerpt_index": {"dtype": "int32", "id": null, "_type": "Value"}, "isKeyCitation": {"dtype": "bool", "id": null, "_type": "Value"}, "label2": {"num_classes": 4, "names": ["supportive", "not_supportive", "cant_determine", "none"], "names_file": null, "id": null, "_type": "ClassLabel"}, "citeEnd": {"dtype": "int64", "id": null, "_type": "Value"}, "citeStart": {"dtype": "int64", "id": null, "_type": "Value"}, "source": {"num_classes": 7, "names": ["properNoun", "andPhrase", "acronym", "etAlPhrase", "explicit", "acronymParen", "nan"], "names_file": null, "id": null, "_type": "ClassLabel"}, "label_confidence": {"dtype": "float32", "id": null, "_type": "Value"}, "label2_confidence": {"dtype": "float32", "id": null, "_type": "Value"}, "id": {"dtype": "string", "id": null, "_type": "Value"}}, "supervised_keys": null, "builder_name": "scicite", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "datasets_version_to_prepare": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"test": {"name": "test", "num_bytes": 870809, "num_examples": 1859, "dataset_name": "scicite"}, "train": {"name": "train", "num_bytes": 3843904, "num_examples": 8194, "dataset_name": "scicite"}, "validation": {"name": "validation", "num_bytes": 430296, "num_examples": 916, "dataset_name": "scicite"}}, "download_checksums": {"https://s3-us-west-2.amazonaws.com/ai2-s2-research/scicite/scicite.tar.gz": {"num_bytes": 23189911, "checksum": "711ece2c4e61d116c8ae5bb07e9fbb2ee9ff7bba004b4cab7fbd0ac3af499193"}}, "download_size": 23189911, "dataset_size": 5145009, "size_in_bytes": 28334920}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fc8243e61ee200c6f7b6c4c3859e234dd07ad7bcd4f789edd17435ca514fe82
3
+ size 3118
scicite.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """TODO(scicite): Add a description here."""
18
+
19
+ from __future__ import absolute_import, division, print_function
20
+
21
+ import json
22
+ import os
23
+
24
+ import datasets
25
+
26
+
27
+ _CITATION = """
28
+ @InProceedings{Cohan2019Structural,
29
+ author={Arman Cohan and Waleed Ammar and Madeleine Van Zuylen and Field Cady},
30
+ title={Structural Scaffolds for Citation Intent Classification in Scientific Publications},
31
+ booktitle={NAACL},
32
+ year={2019}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = """
37
+ This is a dataset for classifying citation intents in academic papers.
38
+ The main citation intent label for each Json object is specified with the label
39
+ key while the citation context is specified in with a context key. Example:
40
+ {
41
+ 'string': 'In chacma baboons, male-infant relationships can be linked to both
42
+ formation of friendships and paternity success [30,31].'
43
+ 'sectionName': 'Introduction',
44
+ 'label': 'background',
45
+ 'citingPaperId': '7a6b2d4b405439',
46
+ 'citedPaperId': '9d1abadc55b5e0',
47
+ ...
48
+ }
49
+ You may obtain the full information about the paper using the provided paper ids
50
+ with the Semantic Scholar API (https://api.semanticscholar.org/).
51
+ The labels are:
52
+ Method, Background, Result
53
+ """
54
+
55
+ _SOURCE_NAMES = ["properNoun", "andPhrase", "acronym", "etAlPhrase", "explicit", "acronymParen", "nan"]
56
+
57
+
58
+ class Scicite(datasets.GeneratorBasedBuilder):
59
+ """This is a dataset for classifying citation intents in academic papers."""
60
+
61
+ VERSION = datasets.Version("1.0.0")
62
+
63
+ def _info(self):
64
+ return datasets.DatasetInfo(
65
+ # This is the description that will appear on the datasets page.
66
+ description=_DESCRIPTION,
67
+ # datasets.features.FeatureConnectors
68
+ features=datasets.Features(
69
+ {
70
+ "string": datasets.Value("string"),
71
+ "sectionName": datasets.Value("string"),
72
+ "label": datasets.features.ClassLabel(names=["method", "background", "result"]),
73
+ "citingPaperId": datasets.Value("string"),
74
+ "citedPaperId": datasets.Value("string"),
75
+ "excerpt_index": datasets.Value("int32"),
76
+ "isKeyCitation": datasets.Value("bool"),
77
+ "label2": datasets.features.ClassLabel(
78
+ names=["supportive", "not_supportive", "cant_determine", "none"]
79
+ ),
80
+ "citeEnd": datasets.Value("int64"),
81
+ "citeStart": datasets.Value("int64"),
82
+ "source": datasets.features.ClassLabel(names=_SOURCE_NAMES),
83
+ "label_confidence": datasets.Value("float32"),
84
+ "label2_confidence": datasets.Value("float32"),
85
+ "id": datasets.Value("string"),
86
+ }
87
+ ),
88
+ # If there's a common (input, target) tuple from the features,
89
+ # specify them here. They'll be used if as_supervised=True in
90
+ # builder.as_dataset.
91
+ supervised_keys=None,
92
+ # Homepage of the dataset for documentation
93
+ homepage="https://github.com/allenai/scicite",
94
+ citation=_CITATION,
95
+ )
96
+
97
+ def _split_generators(self, dl_manager):
98
+ """Returns SplitGenerators."""
99
+ dl_paths = dl_manager.download_and_extract(
100
+ {
101
+ "scicite": "https://s3-us-west-2.amazonaws.com/ai2-s2-research/scicite/scicite.tar.gz",
102
+ }
103
+ )
104
+ path = os.path.join(dl_paths["scicite"], "scicite")
105
+ return [
106
+ datasets.SplitGenerator(
107
+ name=datasets.Split.TRAIN,
108
+ gen_kwargs={"path": os.path.join(path, "train.jsonl")},
109
+ ),
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.VALIDATION,
112
+ gen_kwargs={"path": os.path.join(path, "dev.jsonl")},
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.TEST,
116
+ gen_kwargs={"path": os.path.join(path, "test.jsonl")},
117
+ ),
118
+ ]
119
+
120
+ def _generate_examples(self, path=None):
121
+ """Yields examples."""
122
+ with open(path, encoding="utf-8") as f:
123
+ unique_ids = {}
124
+ for line in f:
125
+ d = json.loads(line)
126
+ unique_id = str(d["unique_id"])
127
+ if unique_id in unique_ids:
128
+ continue
129
+ unique_ids[unique_id] = True
130
+ yield unique_id, {
131
+ "string": d["string"],
132
+ "label": str(d["label"]),
133
+ "sectionName": str(d["sectionName"]),
134
+ "citingPaperId": str(d["citingPaperId"]),
135
+ "citedPaperId": str(d["citedPaperId"]),
136
+ "excerpt_index": int(d["excerpt_index"]),
137
+ "isKeyCitation": bool(d["isKeyCitation"]),
138
+ "label2": str(d.get("label2", "none")),
139
+ "citeEnd": _safe_int(d["citeEnd"]),
140
+ "citeStart": _safe_int(d["citeStart"]),
141
+ "source": str(d["source"]),
142
+ "label_confidence": float(d.get("label_confidence", 0.0)),
143
+ "label2_confidence": float(d.get("label2_confidence", 0.0)),
144
+ "id": str(d["id"]),
145
+ }
146
+
147
+
148
+ def _safe_int(a):
149
+ try:
150
+ # skip NaNs
151
+ return int(a)
152
+ except ValueError:
153
+ return -1