update
Browse files- README.md +6 -3
- data/email_spam.jsonl +3 -0
- data/enron_spam.jsonl +3 -0
- data/sms_spam.jsonl +2 -2
- data/spam_assassin.jsonl +3 -0
- data/spam_detection.jsonl +3 -0
- examples/preprocess/process_email_spam.py +67 -1
- examples/preprocess/process_enron_spam.py +71 -1
- examples/preprocess/process_sms_spam.py +4 -0
- examples/preprocess/process_spam_assassin.py +9 -6
- examples/preprocess/process_spam_detection.py +69 -1
- examples/preprocess/samples_count.py +26 -0
- main.py +4 -1
- requirements.txt +3 -0
- spam_detect.py +8 -1
README.md
CHANGED
@@ -11,9 +11,11 @@ license: apache-2.0
|
|
11 |
数据集从网上收集整理如下:
|
12 |
| 数据 | 语言 | 任务类型 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
|
13 |
| :--- | :---: | :---: | :---: | :---: | :---: | :---: |
|
14 |
-
| sms_spam | 英语 | 垃圾短信分类 | [SMS Spam Collection](https://archive.ics.uci.edu/dataset/228/sms+spam+collection); [SMS Spam Collection Dataset](https://www.kaggle.com/datasets/uciml/sms-spam-collection-dataset) |
|
15 |
-
| spam_assassin | 英语 | 垃圾邮件分类 |
|
16 |
-
| enron_spam | 英语 | 垃圾邮件分类 | [enron_spam_data](https://github.com/MWiechmann/enron_spam_data); [Enron-Spam](https://www2.aueb.gr/users/ion/data/enron-spam/); [spam-mails-dataset](https://www.kaggle.com/datasets/venky73/spam-mails-dataset) |
|
|
|
|
|
17 |
|
18 |
|
19 |
### 样本示例
|
@@ -62,6 +64,7 @@ spam
|
|
62 |
|
63 |
|
64 |
### 参考来源
|
|
|
65 |
<details>
|
66 |
<summary>参考的数据来源,展开查看</summary>
|
67 |
<pre><code>
|
|
|
11 |
数据集从网上收集整理如下:
|
12 |
| 数据 | 语言 | 任务类型 | 原始数据/项目地址 | 样本个数 | 原始数据描述 | 替代数据下载地址 |
|
13 |
| :--- | :---: | :---: | :---: | :---: | :---: | :---: |
|
14 |
+
| sms_spam | 英语 | 垃圾短信分类 | [SMS Spam Collection](https://archive.ics.uci.edu/dataset/228/sms+spam+collection); [SMS Spam Collection Dataset](https://www.kaggle.com/datasets/uciml/sms-spam-collection-dataset) | ham: 4827; spam: 747 | SMS 垃圾邮件集合是一组公开的 SMS 标记消息,为移动电话垃圾邮件研究而收集。 | [sms_spam](https://huggingface.co/datasets/sms_spam) |
|
15 |
+
| spam_assassin | 英语 | 垃圾邮件分类 | [datasets-spam-assassin](https://github.com/stdlib-js/datasets-spam-assassin); [Apache SpamAssassin’s public datasets](https://spamassassin.apache.org/old/publiccorpus/); [Spam or Not Spam Dataset](https://www.kaggle.com/datasets/ozlerhakan/spam-or-not-spam-dataset) | ham: 3795; spam: 6954 | 这是一组邮件消息,适合用于测试垃圾邮件过滤系统。备注:text 很乱,不推荐使用。 | [talby/SpamAssassin](https://huggingface.co/datasets/talby/spamassassin) |
|
16 |
+
| enron_spam | 英语 | 垃圾邮件分类 | [enron_spam_data](https://github.com/MWiechmann/enron_spam_data); [Enron-Spam](https://www2.aueb.gr/users/ion/data/enron-spam/); [spam-mails-dataset](https://www.kaggle.com/datasets/venky73/spam-mails-dataset) | ham: 16545; spam: 17171 | Enron-Spam 数据集是 V. Metsis、I. Androutsopoulos 和 G. Paliouras 收集的绝佳资源 | [SetFit/enron_spam](https://huggingface.co/datasets/SetFit/enron_spam) |
|
17 |
+
| spam_detection | 英语 | 垃圾短信分类 | [Deysi/spam-detection-dataset](https://huggingface.co/datasets/Deysi/spam-detection-dataset) | ham: 5400; spam: 5500 | | |
|
18 |
+
| email_spam | 英语 | 垃圾短信分类 | [NotShrirang/email-spam-filter](https://huggingface.co/datasets/NotShrirang/email-spam-filter) | ham: 3672; spam: 1499 | | |
|
19 |
|
20 |
|
21 |
### 样本示例
|
|
|
64 |
|
65 |
|
66 |
### 参考来源
|
67 |
+
|
68 |
<details>
|
69 |
<summary>参考的数据来源,展开查看</summary>
|
70 |
<pre><code>
|
data/email_spam.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b530e530e6251b7755b2329e15482a95332d71caad0b8c2fb85a7433a335fd7
|
3 |
+
size 5919435
|
data/enron_spam.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:831a68669823136108e9e2a90c86220ccd3afe8b7c46a7d9df4a8667c21db477
|
3 |
+
size 54956602
|
data/sms_spam.jsonl
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f2c3c37b23bcbc97d37e86be652b70d6bbc4c69626ceeef6b3118eb9d3f2bc1
|
3 |
+
size 968576
|
data/spam_assassin.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:48753da2e31d5d65efcacf31f12e2f3b3f68b02a1a625222173a7bc8fb86435c
|
3 |
+
size 41867894
|
data/spam_detection.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8645dd871427da0d5cad715fedfb89ded865fa8cba27fe05b7331bb4bd9c1068
|
3 |
+
size 5239829
|
examples/preprocess/process_email_spam.py
CHANGED
@@ -1,6 +1,72 @@
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
if __name__ == '__main__':
|
6 |
-
|
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
from collections import defaultdict
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
import sys
|
11 |
+
|
12 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
13 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
14 |
+
|
15 |
+
from datasets import load_dataset
|
16 |
+
from tqdm import tqdm
|
17 |
+
|
18 |
+
from project_settings import project_path
|
19 |
+
|
20 |
+
|
21 |
+
def get_args():
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
|
24 |
+
parser.add_argument("--dataset_path", default="NotShrirang/email-spam-filter", type=str)
|
25 |
+
parser.add_argument(
|
26 |
+
"--dataset_cache_dir",
|
27 |
+
default=(project_path / "hub_datasets").as_posix(),
|
28 |
+
type=str
|
29 |
+
)
|
30 |
+
parser.add_argument(
|
31 |
+
"--output_file",
|
32 |
+
default=(project_path / "data/email_spam.jsonl"),
|
33 |
+
type=str
|
34 |
+
)
|
35 |
+
args = parser.parse_args()
|
36 |
+
return args
|
37 |
+
|
38 |
+
|
39 |
+
def main():
|
40 |
+
args = get_args()
|
41 |
+
|
42 |
+
dataset_dict = load_dataset(
|
43 |
+
path=args.dataset_path,
|
44 |
+
cache_dir=args.dataset_cache_dir,
|
45 |
+
)
|
46 |
+
print(dataset_dict)
|
47 |
+
|
48 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
49 |
+
for split, dataset in dataset_dict.items():
|
50 |
+
for sample in tqdm(dataset):
|
51 |
+
# print(sample)
|
52 |
+
text = sample["text"]
|
53 |
+
label = sample["label"]
|
54 |
+
|
55 |
+
if label not in ("spam", "ham"):
|
56 |
+
raise AssertionError
|
57 |
+
|
58 |
+
row = {
|
59 |
+
"text": text,
|
60 |
+
"label": label,
|
61 |
+
"category": None,
|
62 |
+
"data_source": "email_spam",
|
63 |
+
"split": split
|
64 |
+
}
|
65 |
+
row = json.dumps(row, ensure_ascii=False)
|
66 |
+
f.write("{}\n".format(row))
|
67 |
+
|
68 |
+
return
|
69 |
|
70 |
|
71 |
if __name__ == '__main__':
|
72 |
+
main()
|
examples/preprocess/process_enron_spam.py
CHANGED
@@ -1,6 +1,76 @@
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
if __name__ == '__main__':
|
6 |
-
|
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
from collections import defaultdict
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
import sys
|
11 |
+
|
12 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
13 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
14 |
+
|
15 |
+
from datasets import load_dataset
|
16 |
+
from tqdm import tqdm
|
17 |
+
|
18 |
+
from project_settings import project_path
|
19 |
+
|
20 |
+
|
21 |
+
def get_args():
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
|
24 |
+
parser.add_argument("--dataset_path", default="SetFit/enron_spam", type=str)
|
25 |
+
parser.add_argument(
|
26 |
+
"--dataset_cache_dir",
|
27 |
+
default=(project_path / "hub_datasets").as_posix(),
|
28 |
+
type=str
|
29 |
+
)
|
30 |
+
parser.add_argument(
|
31 |
+
"--output_file",
|
32 |
+
default=(project_path / "data/enron_spam.jsonl"),
|
33 |
+
type=str
|
34 |
+
)
|
35 |
+
args = parser.parse_args()
|
36 |
+
return args
|
37 |
+
|
38 |
+
|
39 |
+
def main():
|
40 |
+
args = get_args()
|
41 |
+
|
42 |
+
dataset_dict = load_dataset(
|
43 |
+
path=args.dataset_path,
|
44 |
+
cache_dir=args.dataset_cache_dir,
|
45 |
+
)
|
46 |
+
print(dataset_dict)
|
47 |
+
|
48 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
49 |
+
for split, dataset in dataset_dict.items():
|
50 |
+
for sample in tqdm(dataset):
|
51 |
+
# print(sample)
|
52 |
+
# text = sample["text"]
|
53 |
+
subject = sample["subject"]
|
54 |
+
message = sample["message"]
|
55 |
+
label = sample["label_text"]
|
56 |
+
|
57 |
+
text = "{}\n\n{}".format(subject, message)
|
58 |
+
|
59 |
+
if label not in ("spam", "ham"):
|
60 |
+
raise AssertionError
|
61 |
+
|
62 |
+
row = {
|
63 |
+
"text": text,
|
64 |
+
"label": label,
|
65 |
+
"category": None,
|
66 |
+
"data_source": "enron_spam",
|
67 |
+
"split": split
|
68 |
+
}
|
69 |
+
row = json.dumps(row, ensure_ascii=False)
|
70 |
+
f.write("{}\n".format(row))
|
71 |
+
|
72 |
+
return
|
73 |
|
74 |
|
75 |
if __name__ == '__main__':
|
76 |
+
main()
|
examples/preprocess/process_sms_spam.py
CHANGED
@@ -54,9 +54,13 @@ def main():
|
|
54 |
text = text.strip()
|
55 |
label = "spam" if label == 1 else "ham"
|
56 |
|
|
|
|
|
|
|
57 |
row = {
|
58 |
"text": text,
|
59 |
"label": label,
|
|
|
60 |
"data_source": "sms_spam",
|
61 |
"split": "train"
|
62 |
}
|
|
|
54 |
text = text.strip()
|
55 |
label = "spam" if label == 1 else "ham"
|
56 |
|
57 |
+
if label not in ("spam", "ham"):
|
58 |
+
raise AssertionError
|
59 |
+
|
60 |
row = {
|
61 |
"text": text,
|
62 |
"label": label,
|
63 |
+
"category": None,
|
64 |
"data_source": "sms_spam",
|
65 |
"split": "train"
|
66 |
}
|
examples/preprocess/process_spam_assassin.py
CHANGED
@@ -49,17 +49,20 @@ def main():
|
|
49 |
for split, dataset in dataset_dict.items():
|
50 |
for sample in tqdm(dataset):
|
51 |
# print(sample)
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
label = sample["label_text"]
|
56 |
|
57 |
-
|
|
|
|
|
|
|
58 |
|
59 |
row = {
|
60 |
"text": text,
|
61 |
"label": label,
|
62 |
-
"
|
|
|
63 |
"split": split
|
64 |
}
|
65 |
row = json.dumps(row, ensure_ascii=False)
|
|
|
49 |
for split, dataset in dataset_dict.items():
|
50 |
for sample in tqdm(dataset):
|
51 |
# print(sample)
|
52 |
+
text = sample["text"]
|
53 |
+
group = sample["group"]
|
54 |
+
label = sample["label"]
|
|
|
55 |
|
56 |
+
label = "spam" if label == 1 else "ham"
|
57 |
+
|
58 |
+
if label not in ("spam", "ham"):
|
59 |
+
raise AssertionError
|
60 |
|
61 |
row = {
|
62 |
"text": text,
|
63 |
"label": label,
|
64 |
+
"category": group,
|
65 |
+
"data_source": "spam_assassin",
|
66 |
"split": split
|
67 |
}
|
68 |
row = json.dumps(row, ensure_ascii=False)
|
examples/preprocess/process_spam_detection.py
CHANGED
@@ -1,6 +1,74 @@
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
if __name__ == '__main__':
|
6 |
-
|
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
+
import argparse
|
4 |
+
from collections import defaultdict
|
5 |
+
import json
|
6 |
+
import os
|
7 |
+
from pathlib import Path
|
8 |
+
import random
|
9 |
+
import re
|
10 |
+
import sys
|
11 |
+
|
12 |
+
pwd = os.path.abspath(os.path.dirname(__file__))
|
13 |
+
sys.path.append(os.path.join(pwd, '../../'))
|
14 |
+
|
15 |
+
from datasets import load_dataset
|
16 |
+
from tqdm import tqdm
|
17 |
+
|
18 |
+
from project_settings import project_path
|
19 |
+
|
20 |
+
|
21 |
+
def get_args():
|
22 |
+
parser = argparse.ArgumentParser()
|
23 |
+
|
24 |
+
parser.add_argument("--dataset_path", default="Deysi/spam-detection-dataset", type=str)
|
25 |
+
parser.add_argument(
|
26 |
+
"--dataset_cache_dir",
|
27 |
+
default=(project_path / "hub_datasets").as_posix(),
|
28 |
+
type=str
|
29 |
+
)
|
30 |
+
parser.add_argument(
|
31 |
+
"--output_file",
|
32 |
+
default=(project_path / "data/spam_detection.jsonl"),
|
33 |
+
type=str
|
34 |
+
)
|
35 |
+
args = parser.parse_args()
|
36 |
+
return args
|
37 |
+
|
38 |
+
|
39 |
+
def main():
|
40 |
+
args = get_args()
|
41 |
+
|
42 |
+
dataset_dict = load_dataset(
|
43 |
+
path=args.dataset_path,
|
44 |
+
cache_dir=args.dataset_cache_dir,
|
45 |
+
)
|
46 |
+
print(dataset_dict)
|
47 |
+
|
48 |
+
with open(args.output_file, "w", encoding="utf-8") as f:
|
49 |
+
for split, dataset in dataset_dict.items():
|
50 |
+
for sample in tqdm(dataset):
|
51 |
+
# print(sample)
|
52 |
+
text = sample["text"]
|
53 |
+
label = sample["label"]
|
54 |
+
|
55 |
+
label = "spam" if label == "spam" else "ham"
|
56 |
+
|
57 |
+
if label not in ("spam", "ham"):
|
58 |
+
raise AssertionError
|
59 |
+
|
60 |
+
row = {
|
61 |
+
"text": text,
|
62 |
+
"label": label,
|
63 |
+
"category": None,
|
64 |
+
"data_source": "spam_detection",
|
65 |
+
"split": split
|
66 |
+
}
|
67 |
+
row = json.dumps(row, ensure_ascii=False)
|
68 |
+
f.write("{}\n".format(row))
|
69 |
+
|
70 |
+
return
|
71 |
|
72 |
|
73 |
if __name__ == '__main__':
|
74 |
+
main()
|
examples/preprocess/samples_count.py
CHANGED
@@ -1,5 +1,31 @@
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
|
4 |
|
5 |
if __name__ == '__main__':
|
|
|
1 |
#!/usr/bin/python3
|
2 |
# -*- coding: utf-8 -*-
|
3 |
+
from collections import defaultdict
|
4 |
+
from datasets import load_dataset, DownloadMode
|
5 |
+
|
6 |
+
|
7 |
+
dataset_dict = load_dataset(
|
8 |
+
"../../spam_detect.py",
|
9 |
+
name="email_spam",
|
10 |
+
# name="enron_spam",
|
11 |
+
# name="sms_spam",
|
12 |
+
# name="spam_assassin",
|
13 |
+
# name="spam_detection",
|
14 |
+
split=None,
|
15 |
+
cache_dir=None,
|
16 |
+
download_mode=DownloadMode.FORCE_REDOWNLOAD
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
counter = defaultdict(int)
|
21 |
+
for split, dataset in dataset_dict.items():
|
22 |
+
for sample in dataset:
|
23 |
+
text = sample["text"]
|
24 |
+
label = sample["label"]
|
25 |
+
counter[label] += 1
|
26 |
+
|
27 |
+
count = "; ".join(sorted(["{}: {}".format(k, v) for k, v in counter.items()]))
|
28 |
+
print(count)
|
29 |
|
30 |
|
31 |
if __name__ == '__main__':
|
main.py
CHANGED
@@ -5,7 +5,10 @@ from datasets import load_dataset, DownloadMode
|
|
5 |
|
6 |
dataset = load_dataset(
|
7 |
"spam_detect.py",
|
8 |
-
name="
|
|
|
|
|
|
|
9 |
split="train",
|
10 |
cache_dir=None,
|
11 |
download_mode=DownloadMode.FORCE_REDOWNLOAD
|
|
|
5 |
|
6 |
dataset = load_dataset(
|
7 |
"spam_detect.py",
|
8 |
+
name="email_spam",
|
9 |
+
# name="sms_spam",
|
10 |
+
# name="spam_assassin",
|
11 |
+
# name="spam_detection",
|
12 |
split="train",
|
13 |
cache_dir=None,
|
14 |
download_mode=DownloadMode.FORCE_REDOWNLOAD
|
requirements.txt
CHANGED
@@ -1,3 +1,6 @@
|
|
1 |
datasets==2.10.1
|
2 |
fsspec==2023.9.2
|
3 |
tqdm==4.66.1
|
|
|
|
|
|
|
|
1 |
datasets==2.10.1
|
2 |
fsspec==2023.9.2
|
3 |
tqdm==4.66.1
|
4 |
+
pandas==1.5.2
|
5 |
+
xlrd==1.2.0
|
6 |
+
openpyxl==3.0.9
|
spam_detect.py
CHANGED
@@ -11,7 +11,12 @@ import datasets
|
|
11 |
|
12 |
|
13 |
_urls = {
|
14 |
-
"
|
|
|
|
|
|
|
|
|
|
|
15 |
}
|
16 |
|
17 |
|
@@ -43,6 +48,7 @@ class SpamDetect(datasets.GeneratorBasedBuilder):
|
|
43 |
features = datasets.Features({
|
44 |
"text": datasets.Value("string"),
|
45 |
"label": datasets.Value("string"),
|
|
|
46 |
"data_source": datasets.Value("string"),
|
47 |
})
|
48 |
|
@@ -91,6 +97,7 @@ class SpamDetect(datasets.GeneratorBasedBuilder):
|
|
91 |
yield idx, {
|
92 |
"text": sample["text"],
|
93 |
"label": sample["label"],
|
|
|
94 |
"data_source": sample["data_source"],
|
95 |
}
|
96 |
idx += 1
|
|
|
11 |
|
12 |
|
13 |
_urls = {
|
14 |
+
"email_spam": "data/email_spam.jsonl",
|
15 |
+
"enron_spam": "data/enron_spam.jsonl",
|
16 |
+
"sms_spam": "data/sms_spam.jsonl",
|
17 |
+
"spam_assassin": "data/spam_assassin.jsonl",
|
18 |
+
"spam_detection": "data/spam_detection.jsonl",
|
19 |
+
|
20 |
}
|
21 |
|
22 |
|
|
|
48 |
features = datasets.Features({
|
49 |
"text": datasets.Value("string"),
|
50 |
"label": datasets.Value("string"),
|
51 |
+
"category": datasets.Value("string"),
|
52 |
"data_source": datasets.Value("string"),
|
53 |
})
|
54 |
|
|
|
97 |
yield idx, {
|
98 |
"text": sample["text"],
|
99 |
"label": sample["label"],
|
100 |
+
"category": sample["category"],
|
101 |
"data_source": sample["data_source"],
|
102 |
}
|
103 |
idx += 1
|