#!/usr/bin/python3 # -*- coding: utf-8 -*- from collections import defaultdict import json from pathlib import Path import random import re from typing import Any, Dict, List, Tuple import datasets _urls = { "enron_spam": "data/enron_spam.jsonl", "enron_spam_subset": "data/enron_spam_subset.jsonl", "ling_spam": "data/ling_spam.jsonl", "sms_spam": "data/sms_spam.jsonl", "spam_assassin": "data/spam_assassin.jsonl", "spam_detection": "data/spam_detection.jsonl", "spam_emails": "data/spam_emails.jsonl", "spam_message": "data/spam_message.jsonl", "spam_message_lr": "data/spam_message_lr.jsonl", "trec07p": "data/trec07p.jsonl", "youtube_spam_collection": "data/youtube_spam_collection.jsonl", } _CITATION = """\ @dataset{spam_detect, author = {Xing Tian}, title = {spam_detect}, month = sep, year = 2023, publisher = {Xing Tian}, version = {1.0}, } """ class SpamDetect(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") intent_configs = list() for name in _urls.keys(): config = datasets.BuilderConfig(name=name, version=VERSION, description=name) intent_configs.append(config) BUILDER_CONFIGS = [ *intent_configs, ] def _info(self): features = datasets.Features({ "text": datasets.Value("string"), "label": datasets.Value("string"), "category": datasets.Value("string"), "data_source": datasets.Value("string"), }) return datasets.DatasetInfo( features=features, supervised_keys=None, homepage="", license="", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" url = _urls[self.config.name] dl_path = dl_manager.download(url) archive_path = dl_path return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"archive_path": archive_path, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"archive_path": archive_path, "split": "validation"}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"archive_path": archive_path, "split": "test"}, ), ] def _generate_examples(self, archive_path, split): """Yields examples.""" archive_path = Path(archive_path) idx = 0 with open(archive_path, "r", encoding="utf-8") as f: for row in f: sample = json.loads(row) if sample["split"] != split: continue yield idx, { "text": sample["text"], "label": sample["label"], "category": sample["category"], "data_source": sample["data_source"], } idx += 1 if __name__ == '__main__': pass