File size: 6,494 Bytes
45f1e06
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
import csv
import os

import datasets

_DESCRIPTION = ""
_CITATION = ""
_HOMEPAGE = ""

_ROOT_URL = "https://digitalcorpora.s3.amazonaws.com/corpora/files/CC-MAIN-2021-31-PDF-UNTRUNCATED"
_ZIPFILES_URL_TEMPLATE = _ROOT_URL + "/zipfiles/{subdir}/{filename}"
_ZIPFILES_URLS = [
    _ZIPFILES_URL_TEMPLATE.format(subdir=f"{thousand:04d}-{thousand + 999:04d}", filename=f"{thousand + i:04d}.zip")
    for thousand in range(0, 8000, 1000) for i in range(933 if thousand == 7000 else 1000)
]

_CC_HOSTS_URL = _ROOT_URL + "/metadata/cc-hosts-20230303.csv.gz"
_CC_PROVENANCE_URL = _ROOT_URL + "/metadata/cc-provenance-20230303.csv.gz"
_PDFINFO_URL = _ROOT_URL + "/metadata/pdfinfo-20230315.csv.gz"

_MISSING_PDFS = {
    "177150.pdf",
    "594742.pdf",
    "706328.pdf",
    "1260258.pdf",
    "1544119.pdf",
    "1591732.pdf",
    "1640603.pdf",
    "1890087.pdf",
    "1920911.pdf",
    "1992331.pdf",
    "2519839.pdf",
    "2712444.pdf",
    "2765539.pdf",
    "3179469.pdf",
    "4170238.pdf",
    "4414331.pdf",
    "4512373.pdf",
    "4977579.pdf",
    "5198714.pdf",
    "5236677.pdf",
    "5447694.pdf",
    "6318895.pdf",
    "6817632.pdf",
    "6940914.pdf",
    "7241425.pdf",
    "7279847.pdf",
    "7407159.pdf",
    "7635694.pdf",
    "7889525.pdf"
}

class Pdfa(datasets.GeneratorBasedBuilder):

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            citation=_CITATION,
            homepage=_HOMEPAGE,
            features = datasets.Features({
                "pdf_bytes": datasets.Value("binary"),
                "file_name": datasets.Value("string"),
                "url_id": datasets.Value("string"),
                "cc_host": {
                    "host": datasets.Value("string"),
                    "tld": datasets.Value("string"),
                    "ip_address": datasets.Value("string"),
                    "country": datasets.Value("string"),
                    "latitude": datasets.Value("float32"),
                    "longitude": datasets.Value("float32"),
                },
                "cc_provenance": {
                    "url": datasets.Value("string"),
                    "cc_digest": datasets.Value("string"),
                    "cc_http_mime": datasets.Value("string"),
                    "cc_detected_mime": datasets.Value("string"),
                    "cc_warc_file_name": datasets.Value("string"),
                    "cc_warc_start": datasets.Value("int64"),
                    "cc_warc_end": datasets.Value("int64"),
                    "cc_truncated": datasets.Value("string"),
                    "fetched_status": datasets.Value("string"),
                    "fetched_digest": datasets.Value("string"),
                    "fetched_length": datasets.Value("int64"),
                },
                "pdfinfo": {
                    "parse_time_millis": datasets.Value("int64"),
                    "exit_value": datasets.Value("int64"),
                    "timeout": datasets.Value("string"),
                    "stderr": datasets.Value("string"),
                    "pdf_version": datasets.Value("string"),
                    "creator": datasets.Value("string"),
                    "producer": datasets.Value("string"),
                    "created": datasets.Value("string"),
                    "modified": datasets.Value("string"),
                    "custom_metadata": datasets.Value("string"),
                    "metadata_stream": datasets.Value("string"),
                    "tagged": datasets.Value("string"),
                    "user_properties": datasets.Value("string"),
                    "form": datasets.Value("string"),
                    "javascript": datasets.Value("string"),
                    "pages": datasets.Value("int64"),
                    "page_size": datasets.Value("string"),
                    "page_rotation": datasets.Value("int64"),
                    "optimized": datasets.Value("string"),
                },
            })
        )

    def _split_generators(self, dl_manager):
        cc_host_csv_path = dl_manager.download_and_extract(_CC_HOSTS_URL)
        cc_provenance_csv_path = dl_manager.download_and_extract(_CC_PROVENANCE_URL)
        pdfinfo_csv_path = dl_manager.download_and_extract(_PDFINFO_URL)
        pdfs_directories = tuple(dl_manager.download_and_extract(_ZIPFILES_URLS))  # use tuple to disallow shuffling
        return [
            datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={
                "cc_host_csv_path": cc_host_csv_path,
                "cc_provenance_csv_path": cc_provenance_csv_path,
                "pdfinfo_csv_path": pdfinfo_csv_path,
                "pdfs_directories": pdfs_directories
            }),
        ]

    def _generate_examples(self, cc_host_csv_path, cc_provenance_csv_path, pdfinfo_csv_path, pdfs_directories):
        """Yields examples."""
        with open(cc_host_csv_path, encoding="utf-8") as cc_host_file, \
            open(cc_provenance_csv_path, encoding="utf-8") as cc_provenance_csv_file, \
            open(pdfinfo_csv_path, encoding="utf-8") as pdfinfo_csv_file:
            cc_host_reader = csv.DictReader(cc_host_file)
            cc_provenance_reader = csv.DictReader(cc_provenance_csv_file)
            pdfinfo_csv_reader = csv.DictReader(pdfinfo_csv_file)
            for cc_host_dict, cc_provenance_dict, pdfinfo_dict in zip(cc_host_reader, cc_provenance_reader, pdfinfo_csv_reader):
                file_name = cc_host_dict["file_name"]
                url_id = cc_host_dict["url_id"]
                if file_name in _MISSING_PDFS:
                    continue
                pdf_idx = int(file_name.split(".")[0])
                pdf_dir = pdfs_directories[pdf_idx // 1000]
                pdf_path = os.path.join(pdf_dir, file_name)
                cc_host_dict.pop("url_id")
                cc_host_dict.pop("file_name")
                cc_provenance_dict.pop("url_id")
                cc_provenance_dict.pop("file_name")
                pdfinfo_dict.pop("url_id")
                pdfinfo_dict.pop("file_name")
                with open(pdf_path, "rb") as pdf_file:
                    yield file_name, {
                        "pdf_bytes": pdf_file.read(),
                        "file_name": file_name,
                        "url_id": url_id,
                        "cc_host": cc_host_dict,
                        "cc_provenance": cc_provenance_dict,
                        "pdfinfo": pdfinfo_dict
                    }