lcolonn commited on
Commit
4a3c7b3
1 Parent(s): 58ef1a4

Delete loading script

Browse files
Files changed (1) hide show
  1. patfig.py +0 -78
patfig.py DELETED
@@ -1,78 +0,0 @@
1
- import datasets
2
- from datasets import load_dataset, Dataset, Value, Sequence, Features, DatasetInfo, GeneratorBasedBuilder, Image
3
-
4
- from pathlib import Path
5
- import os
6
- import pandas as pd
7
- import json
8
-
9
- _DESCRIPTION = """\ The PatFig Dataset is a curated collection of over 18,000 patent images from more than 7,
10
- 000 European patent applications, spanning the year 2020. It aims to provide a comprehensive resource for research
11
- and applications in image captioning, abstract reasoning, patent analysis, and automated documentprocessing. The
12
- overarching goal of this dataset is to advance the research in visually situated language understanding towards more
13
- hollistic consumption of the visual and textual data.
14
- """
15
-
16
- _BASE_URL = "https://huggingface.co/datasets/lcolonn/patfig/resolve/main/"
17
- _METADATA_URLS = {
18
- "annotations_train": "train/annotations_train.zip",
19
- "annotations_test": "test/annotations_test.zip"
20
- }
21
- _IMAGES_URLS = {
22
- "test_images": "train/train_images.tar.gz",
23
- "train_images": "test/test_images.tar.gz",
24
- }
25
- _URLS = {
26
- "train_images": "train/train_images.tar.gz",
27
- "test_images": "test/test_images.tar.gz",
28
- "annotations_train": "train/annotations_train.zip",
29
- "annotations_test": "test/annotations_test.zip",
30
- }
31
-
32
-
33
- class PatFig(GeneratorBasedBuilder):
34
- """DatasetBuilder for patfig dataset."""
35
-
36
- def _info(self):
37
- return DatasetInfo(
38
- description=_DESCRIPTION,
39
- features=Features({
40
- "image": Image(),
41
- "image_name": Value("string"),
42
- "pub_number": Value("string"),
43
- "title": Value("string"),
44
- "figs_norm": Sequence(feature=Value("string"), length=-1),
45
- "short_description": Sequence(feature=Value("string"), length=-1),
46
- "long_description": Sequence(feature=Value("string"), length=-1),
47
- "short_description_token_count": Value("int64"),
48
- "long_description_token_count": Value("int64"),
49
- "draft_class": Value("string"),
50
- "cpc_class": Value("string"),
51
- "relevant_terms": [{'element_identifier': Value("string"), "terms": Sequence(feature=Value("string"), length=-1)}],
52
- "associated_claims": Value("string"),
53
- "compound": Value("bool"),
54
- "references": Sequence(feature=Value(dtype='string'), length=-1),
55
- }),
56
- )
57
-
58
- def _split_generators(self, dl_manager: datasets.DownloadManager):
59
- urls_to_download = {key: _BASE_URL + fname for key, fname in _URLS.items()}
60
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
61
- return [
62
- datasets.SplitGenerator(
63
- name=datasets.Split.TRAIN, gen_kwargs={"images_dir": f'{downloaded_files["train_images"]}/train', "annotations_dir": f'{downloaded_files["annotations_train"]}/annotations_train.json'}
64
- ),
65
- datasets.SplitGenerator(
66
- name=datasets.Split.TEST, gen_kwargs={"images_dir": f'{downloaded_files["test_images"]}/test', "annotations_dir": f'{downloaded_files["annotations_test"]}/annotations_test.json'}
67
- ),
68
- ]
69
-
70
- def _generate_examples(self, images_dir: str, annotations_dir: str):
71
- with open(annotations_dir, "r") as f:
72
- data = json.load(f)
73
- for idx, record in data.items():
74
- image_path = os.path.join(images_dir, record["pub_number"], record["image_name"])
75
- yield idx, {
76
- "image": image_path,
77
- **record,
78
- }