Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
302b413
1 Parent(s): b622e61

Convert dataset to Parquet (#6)

Browse files

- Convert dataset to Parquet (80bf8bc45b509641bd203107fff6a1db72949eaa)
- Delete loading script (77f357589ff7cda19ceb18a8f1160f6694c31c6a)
- Delete legacy dataset_infos.json (8bb75594e5631b52469d3b86a726416a816393cb)

README.md CHANGED
@@ -18,19 +18,6 @@ task_categories:
18
  task_ids: []
19
  paperswithcode_id: billsum
20
  pretty_name: BillSum
21
- train-eval-index:
22
- - config: default
23
- task: summarization
24
- task_id: summarization
25
- splits:
26
- train_split: train
27
- eval_split: test
28
- col_mapping:
29
- text: text
30
- summary: target
31
- metrics:
32
- - type: rouge
33
- name: Rouge
34
  tags:
35
  - bills-summarization
36
  dataset_info:
@@ -51,8 +38,30 @@ dataset_info:
51
  - name: ca_test
52
  num_bytes: 14945291
53
  num_examples: 1237
54
- download_size: 67260676
55
  dataset_size: 272407638
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
56
  ---
57
 
58
  # Dataset Card for "billsum"
 
18
  task_ids: []
19
  paperswithcode_id: billsum
20
  pretty_name: BillSum
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  tags:
22
  - bills-summarization
23
  dataset_info:
 
38
  - name: ca_test
39
  num_bytes: 14945291
40
  num_examples: 1237
41
+ download_size: 113729382
42
  dataset_size: 272407638
43
+ configs:
44
+ - config_name: default
45
+ data_files:
46
+ - split: train
47
+ path: data/train-*
48
+ - split: test
49
+ path: data/test-*
50
+ - split: ca_test
51
+ path: data/ca_test-*
52
+ train-eval-index:
53
+ - config: default
54
+ task: summarization
55
+ task_id: summarization
56
+ splits:
57
+ train_split: train
58
+ eval_split: test
59
+ col_mapping:
60
+ text: text
61
+ summary: target
62
+ metrics:
63
+ - type: rouge
64
+ name: Rouge
65
  ---
66
 
67
  # Dataset Card for "billsum"
billsum.py DELETED
@@ -1,108 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- # Lint as: python3
17
- """BillSum Dataset."""
18
-
19
-
20
- import json
21
- import os
22
-
23
- import datasets
24
-
25
-
26
- _CITATION = """
27
- @misc{kornilova2019billsum,
28
- title={BillSum: A Corpus for Automatic Summarization of US Legislation},
29
- author={Anastassia Kornilova and Vlad Eidelman},
30
- year={2019},
31
- eprint={1910.00523},
32
- archivePrefix={arXiv},
33
- primaryClass={cs.CL}
34
- }
35
- """
36
-
37
- _DESCRIPTION = """
38
- BillSum, summarization of US Congressional and California state bills.
39
-
40
- There are several features:
41
- - text: bill text.
42
- - summary: summary of the bills.
43
- - title: title of the bills.
44
- features for us bills. ca bills does not have.
45
- - text_len: number of chars in text.
46
- - sum_len: number of chars in summary.
47
- """
48
-
49
- _URL = "https://drive.google.com/uc?export=download&id=1g89WgFHMRbr4QrvA0ngh26PY081Nv3lx"
50
-
51
- _LICENSE = "CC0"
52
-
53
- _DOCUMENT = "text"
54
- _SUMMARY = "summary"
55
-
56
-
57
- class Billsum(datasets.GeneratorBasedBuilder):
58
- """BillSum Dataset."""
59
-
60
- # 2.0.0 data source updated to filter near duplicates.
61
- # 3.0.0 none of the test examples are 'near duplicates' of an example in the
62
- # train set AND they dont have the same title, regardless of similarity.
63
- VERSION = datasets.Version("3.0.0")
64
-
65
- def _info(self):
66
- return datasets.DatasetInfo(
67
- description=_DESCRIPTION,
68
- license=_LICENSE,
69
- features=datasets.Features(
70
- {
71
- _DOCUMENT: datasets.Value("string"),
72
- _SUMMARY: datasets.Value("string"),
73
- "title": datasets.Value("string"),
74
- }
75
- ),
76
- supervised_keys=(_DOCUMENT, _SUMMARY),
77
- homepage="https://github.com/FiscalNote/BillSum",
78
- citation=_CITATION,
79
- )
80
-
81
- def _split_generators(self, dl_manager):
82
- """Returns SplitGenerators."""
83
- dl_path = dl_manager.download_and_extract(_URL)
84
- return [
85
- datasets.SplitGenerator(
86
- name=datasets.Split.TRAIN,
87
- gen_kwargs={"path": os.path.join(dl_path, "us_train_data_final_OFFICIAL.jsonl"), "key": "bill_id"},
88
- ),
89
- datasets.SplitGenerator(
90
- name=datasets.Split.TEST,
91
- gen_kwargs={"path": os.path.join(dl_path, "us_test_data_final_OFFICIAL.jsonl"), "key": "bill_id"},
92
- ),
93
- datasets.SplitGenerator(
94
- name="ca_test",
95
- gen_kwargs={"path": os.path.join(dl_path, "ca_test_data_final_OFFICIAL.jsonl"), "key": "external_id"},
96
- ),
97
- ]
98
-
99
- def _generate_examples(self, path=None, key=None):
100
- """Yields examples."""
101
- with open(path, encoding="utf-8") as f:
102
- for line in f:
103
- # in us bills, json has fields:
104
- # text, summary, title, bill_id, text_len, sum_len
105
- # in ca bills, json has fields:
106
- # text, summary, title, external_id
107
- d = json.loads(line)
108
- yield d[key], {k: d[k] for k in [_DOCUMENT, _SUMMARY, "title"]}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/ca_test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fadc965dcd2c8ec7a3031ee191f58c0d6e2a5cbec1c0feaa9b547607002869eb
3
+ size 6121191
data/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2733cb656a2a6fbff0fc012ca50bbf159615ff6451c0adb229884bbd474780b3
3
+ size 15797079
data/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ca4e515ad7502ef579a055c2d64e6dfa0d64200017c15bca00f323a34cd2ae3
3
+ size 91811112
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "\nBillSum, summarization of US Congressional and California state bills.\n\nThere are several features:\n - text: bill text.\n - summary: summary of the bills.\n - title: title of the bills.\nfeatures for us bills. ca bills does not have.\n - text_len: number of chars in text.\n - sum_len: number of chars in summary.\n", "citation": "\n@misc{kornilova2019billsum,\n title={BillSum: A Corpus for Automatic Summarization of US Legislation},\n author={Anastassia Kornilova and Vlad Eidelman},\n year={2019},\n eprint={1910.00523},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n", "homepage": "https://github.com/FiscalNote/BillSum", "license": "CC0", "features": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "summary": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": {"input": "text", "output": "summary"}, "task_templates": null, "builder_name": "billsum", "config_name": "default", "version": {"version_str": "3.0.0", "description": null, "major": 3, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 219596090, "num_examples": 18949, "dataset_name": "billsum"}, "test": {"name": "test", "num_bytes": 37866257, "num_examples": 3269, "dataset_name": "billsum"}, "ca_test": {"name": "ca_test", "num_bytes": 14945291, "num_examples": 1237, "dataset_name": "billsum"}}, "download_checksums": {"https://drive.google.com/uc?export=download&id=1g89WgFHMRbr4QrvA0ngh26PY081Nv3lx": {"num_bytes": 67260676, "checksum": "5a55dfb231618d63b25cec4773280a2986d38f53d6d4d39b8256b278edf1110c"}}, "download_size": 67260676, "post_processing_size": null, "dataset_size": 272407638, "size_in_bytes": 339668314}}