Alexander Black commited on
Commit
bf2104d
1 Parent(s): ba9fe16

add dataset script

Browse files
Files changed (1) hide show
  1. anakin.py +154 -0
anakin.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import json
3
+ import os
4
+ import random
5
+
6
+ import datasets
7
+ import pandas as pd
8
+
9
+ # TODO: Add BibTeX citation
10
+ # Find for instance the citation on arxiv or on the dataset repo/website
11
+ _CITATION = """\
12
+ @misc{black2023vader,
13
+ title={VADER: Video Alignment Differencing and Retrieval},
14
+ author={Alexander Black and Simon Jenni and Tu Bui and Md. Mehrab Tanjim and Stefano Petrangeli and Ritwik Sinha and Viswanathan Swaminathan and John Collomosse},
15
+ year={2023},
16
+ eprint={2303.13193},
17
+ archivePrefix={arXiv},
18
+ primaryClass={cs.CV}
19
+ }
20
+ """
21
+
22
+ # TODO: Add description of the dataset here
23
+ # You can copy an official description
24
+ _DESCRIPTION = """\
25
+ ANAKIN is a dataset of mANipulated videos and mAsK annotatIoNs.
26
+ """
27
+
28
+ # TODO: Add a link to an official homepage for the dataset here
29
+ _HOMEPAGE = "https://github.com/AlexBlck/vader"
30
+
31
+ # TODO: Add the licence for the dataset here if you can find it
32
+ _LICENSE = "cc-by-4.0"
33
+
34
+ # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
35
+ # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
36
+ _URLS = {
37
+ "first_domain": "https://huggingface.co/datasets/AlexBlck/ANAKIN/raw/main/metadata.csv",
38
+ }
39
+
40
+
41
+ # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
42
+ class Anakin(datasets.GeneratorBasedBuilder):
43
+ """TODO: Short description of my dataset."""
44
+
45
+ VERSION = datasets.Version("1.0.0")
46
+
47
+ # This is an example of a dataset with multiple configurations.
48
+ # If you don't want/need to define several sub-sets in your dataset,
49
+ # just remove the BUILDER_CONFIG_CLASS and the BUILDER_CONFIGS attributes.
50
+
51
+ # If you need to make complex sub-parts in the datasets with configurable options
52
+ # You can create your own builder configuration class to store attribute, inheriting from datasets.BuilderConfig
53
+ # BUILDER_CONFIG_CLASS = MyBuilderConfig
54
+
55
+ # You will be able to load one or the other configurations in the following list with
56
+ # data = datasets.load_dataset('my_dataset', 'first_domain')
57
+ # data = datasets.load_dataset('my_dataset', 'second_domain')
58
+ BUILDER_CONFIGS = [
59
+ datasets.BuilderConfig(
60
+ name="all",
61
+ version=VERSION,
62
+ description="Full video, trimmed video, edited video, masks (if exists), and edit description",
63
+ ),
64
+ ]
65
+
66
+ DEFAULT_CONFIG_NAME = "all" # It's not mandatory to have a default configuration. Just use one if it make sense.
67
+
68
+ def _info(self):
69
+ # TODO: This method specifies the datasets.DatasetInfo object which contains informations and typings for the dataset
70
+ if self.config.name == "all":
71
+ features = datasets.Features(
72
+ {
73
+ "full": datasets.Value("string"),
74
+ "trimmed": datasets.Value("string"),
75
+ "edited": datasets.Value("string"),
76
+ "masks": datasets.Value("string"),
77
+ # "edit_description": datasets.Value("string"),
78
+ }
79
+ )
80
+ else: # This is an example to show how to have different features for "first_domain" and "second_domain"
81
+ features = datasets.Features(
82
+ {
83
+ "sentence": datasets.Value("string"),
84
+ "option2": datasets.Value("string"),
85
+ "second_domain_answer": datasets.Value("string")
86
+ # These are the features of your dataset like images, labels ...
87
+ }
88
+ )
89
+ return datasets.DatasetInfo(
90
+ # This is the description that will appear on the datasets page.
91
+ description=_DESCRIPTION,
92
+ # This defines the different columns of the dataset and their types
93
+ features=features, # Here we define them above because they are different between the two configurations
94
+ # If there's a common (input, target) tuple from the features, uncomment supervised_keys line below and
95
+ # specify them. They'll be used if as_supervised=True in builder.as_dataset.
96
+ # supervised_keys=("sentence", "label"),
97
+ # Homepage of the dataset for documentation
98
+ homepage=_HOMEPAGE,
99
+ # License for the dataset if available
100
+ license=_LICENSE,
101
+ # Citation for the dataset
102
+ citation=_CITATION,
103
+ )
104
+
105
+ def _split_generators(self, dl_manager):
106
+ # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
107
+ # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
108
+
109
+ # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLS
110
+ # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
111
+ # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
112
+ urls = _URLS[self.config.name]
113
+ data_dir = dl_manager.download_and_extract(urls)
114
+ return [
115
+ datasets.SplitGenerator(
116
+ name=datasets.Split.TRAIN,
117
+ # These kwargs will be passed to _generate_examples
118
+ gen_kwargs={
119
+ "filepath": os.path.join(data_dir, "metadata.csv"),
120
+ "split": "train",
121
+ },
122
+ ),
123
+ datasets.SplitGenerator(
124
+ name=datasets.Split.VALIDATION,
125
+ # These kwargs will be passed to _generate_examples
126
+ gen_kwargs={
127
+ "filepath": os.path.join(data_dir, "metadata.csv"),
128
+ "split": "dev",
129
+ },
130
+ ),
131
+ datasets.SplitGenerator(
132
+ name=datasets.Split.TEST,
133
+ # These kwargs will be passed to _generate_examples
134
+ gen_kwargs={
135
+ "filepath": os.path.join(data_dir, "metadata.csv"),
136
+ "split": "test",
137
+ },
138
+ ),
139
+ ]
140
+
141
+ def _generate_examples(self, filepath, split):
142
+ random.seed(47)
143
+ root_url = "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/"
144
+ df = pd.read_csv(filepath)
145
+ ids = df["video-id"].to_list()
146
+ random.shuffle(ids)
147
+ if split == "train":
148
+ for key, idx in enumerate(ids[:342]):
149
+ yield key, {
150
+ "full": root_url + f"full/{idx}.mp4",
151
+ "trimmed": root_url + f"trimmed/{idx}.mp4",
152
+ "edited": root_url + f"edited/{idx}.mp4",
153
+ "masks": root_url + f"masks/{idx}/",
154
+ }