Datasets:
kernelmachine
commited on
Commit
•
76fda69
1
Parent(s):
2102ac9
update
Browse files- open-license-corpus.py +110 -0
open-license-corpus.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import os
|
3 |
+
from datasets import load_dataset
|
4 |
+
import gzip
|
5 |
+
import json
|
6 |
+
|
7 |
+
logger = datasets.logging.get_logger(__name__)
|
8 |
+
|
9 |
+
|
10 |
+
CITATION = """
|
11 |
+
"""
|
12 |
+
|
13 |
+
DESCRIPTION = """
|
14 |
+
The Open License Corpus
|
15 |
+
"""
|
16 |
+
|
17 |
+
OLC_SUBSET_NAMES = [
|
18 |
+
"ccby_law",
|
19 |
+
"ccby_s2orc",
|
20 |
+
"ccby_stackexchange",
|
21 |
+
"ccby_stackoverflow",
|
22 |
+
"ccby_wikinews",
|
23 |
+
"ccby_wikipedia",
|
24 |
+
"pd_arxiv_abstracts",
|
25 |
+
"pd_books",
|
26 |
+
"pd_law",
|
27 |
+
"pd_news",
|
28 |
+
"pd_s2orc",
|
29 |
+
"sw_amps_math",
|
30 |
+
"sw_dm_math",
|
31 |
+
"sw_github",
|
32 |
+
"sw_hackernews",
|
33 |
+
"sw_ubuntu_irc"
|
34 |
+
]
|
35 |
+
|
36 |
+
URL = "https://huggingface.co/datasets/kernelmachine/open-license-corpus/"
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
N_SHARDS_PER_SPLIT = {
|
41 |
+
"ccby_s2orc": {"train": 5000},
|
42 |
+
"ccby_law": {"train": 50},
|
43 |
+
"ccby_stackexchange": {"train": 1500},
|
44 |
+
"ccby_stackoverflow": {"train": 750},
|
45 |
+
"ccby_wikinews": {"train": 42},
|
46 |
+
"ccby_wikipedia": {"train": 3000},
|
47 |
+
"pd_arxiv_abstracts": {"train": 1},
|
48 |
+
"pd_books": {"train": 150},
|
49 |
+
"pd_law": {"train": 2000},
|
50 |
+
"pd_news": {"train": 10},
|
51 |
+
"pd_s2orc": {"train": 30},
|
52 |
+
"sw_amps_math": {"train": 2},
|
53 |
+
"sw_dm_math": {"train": 239},
|
54 |
+
"sw_github": {"train": 2500},
|
55 |
+
"sw_hackernews": {"train": 16},
|
56 |
+
"sw_ubuntu_irc": {"train": 27}
|
57 |
+
}
|
58 |
+
|
59 |
+
#DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/blob/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz'
|
60 |
+
DATA_URL = 'https://huggingface.co/datasets/kernelmachine/open-license-corpus/resolve/main/data/{name}/{split}-{index:05d}-of-{n_shards:05d}.jsonl.gz'
|
61 |
+
|
62 |
+
class OpenLicenseCorpusConfig(datasets.BuilderConfig):
|
63 |
+
def __init__(self, features, citation, **kwargs):
|
64 |
+
super().__init__(**kwargs)
|
65 |
+
|
66 |
+
|
67 |
+
class OpenLicenseCorpus(datasets.GeneratorBasedBuilder):
|
68 |
+
|
69 |
+
BUILDER_CONFIGS = [
|
70 |
+
datasets.BuilderConfig(name=name)
|
71 |
+
for name in OLC_SUBSET_NAMES
|
72 |
+
]
|
73 |
+
|
74 |
+
def _info(self):
|
75 |
+
return datasets.DatasetInfo(
|
76 |
+
description=DESCRIPTION,
|
77 |
+
features=datasets.Features(
|
78 |
+
{
|
79 |
+
"text": datasets.Value("string"),
|
80 |
+
}
|
81 |
+
),
|
82 |
+
supervised_keys=None,
|
83 |
+
homepage=URL,
|
84 |
+
citation=CITATION,
|
85 |
+
)
|
86 |
+
|
87 |
+
def _split_generators(self, dl_manager):
|
88 |
+
data_urls = {}
|
89 |
+
for split in ["train"]:
|
90 |
+
n_shards = N_SHARDS_PER_SPLIT[self.config.name][split] - 1
|
91 |
+
data_urls[split] = [
|
92 |
+
DATA_URL.format(name=self.config.name, split=split, index=index, n_shards=n_shards)
|
93 |
+
for index in range(n_shards)
|
94 |
+
]
|
95 |
+
|
96 |
+
train_downloaded_files = dl_manager.download(data_urls["train"])
|
97 |
+
|
98 |
+
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_downloaded_files})]
|
99 |
+
|
100 |
+
def _generate_examples(self, filepaths):
|
101 |
+
"""This function returns the examples in the raw (text) form by iterating on all the files."""
|
102 |
+
id_ = 0
|
103 |
+
for filepath in filepaths:
|
104 |
+
logger.info("generating examples from = %s", filepath)
|
105 |
+
with gzip.open(open(filepath, "rb"), "rt", encoding="utf-8") as f:
|
106 |
+
for line in f:
|
107 |
+
if line:
|
108 |
+
example = json.loads(line)
|
109 |
+
yield id_, example
|
110 |
+
id_ += 1
|