File size: 2,672 Bytes
7a0d6d4 76e185d 7a0d6d4 76e185d 7a0d6d4 76e185d 7a0d6d4 76e185d 7a0d6d4 76e185d 7a0d6d4 76e185d 229ce93 76e185d 229ce93 76e185d 229ce93 76e185d 229ce93 76e185d 229ce93 76e185d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 |
import io
import json
import os
from glob import glob
import datasets
import zstandard as zstd
from datasets import GeneratorBasedBuilder
from datasets.utils import Version
from huggingface_hub import snapshot_download
class PileDomainDataset(GeneratorBasedBuilder):
VERSION = Version("1.0.0")
def _info(self):
return datasets.DatasetInfo(
description="Pile Domain Dataset",
features=datasets.Features(
{
"text": datasets.Value("string"),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
#snapshot_download(repo_id="Multi-Domain-Expert-Layers/uspto", repo_type="dataset")
# dl_manager.download_and_extract("https://huggingface.co/datasets/Multi-Domain-Expert-Layers/uspto/resolve/main/uspto.tar.gz")
dl_path = snapshot_download(repo_id="jordiclive/my_uspto", repo_type="dataset")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"data_dir": os.path.join(dl_path, "train"),
"split": None,
},
),
datasets.SplitGenerator(
name="validation_pile",
gen_kwargs={
"data_dir": os.path.join(dl_path, "val"),
"split": "pile",
},
),
datasets.SplitGenerator(
name="validation_domain",
gen_kwargs={
"data_dir": os.path.join(dl_path, "val"),
"split": "domain",
},
),
datasets.SplitGenerator(
name="test_pile",
gen_kwargs={"data_dir": os.path.join(dl_path, "test"), "split": "pile"},
),
datasets.SplitGenerator(
name="test_domain",
gen_kwargs={"data_dir": os.path.join(dl_path, "test"), "split": "domain"},
),
]
def _generate_examples(self, data_dir, split):
dctx = zstd.ZstdDecompressor()
idx = -1
file_paths = glob(os.path.join(data_dir, f"*.jsonl.zst"))
if split is not None:
file_paths = [f for f in file_paths if split in f]
for file in file_paths:
with open(file, "rb") as f:
reader = dctx.stream_reader(f)
buffer = io.BufferedReader(reader)
for _, line in enumerate(buffer.readlines()):
data = json.loads(line)
idx += 1
yield idx, data
|