Alexander Black commited on
Commit
466b3f2
1 Parent(s): 6e4d8ca

script wip

Browse files
Files changed (1) hide show
  1. ANAKIN.py +29 -20
ANAKIN.py CHANGED
@@ -110,10 +110,25 @@ class Anakin(datasets.GeneratorBasedBuilder):
110
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
111
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
112
  urls = _URLS[self.config.name]
113
- data_dir = dl_manager.download_and_extract(urls)
114
- trimmed_dir = dl_manager.download_and_extract(
115
- "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/trimmed/"
116
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  return [
118
  datasets.SplitGenerator(
119
  name=datasets.Split.TRAIN,
@@ -121,7 +136,7 @@ class Anakin(datasets.GeneratorBasedBuilder):
121
  gen_kwargs={
122
  "filepath": data_dir,
123
  "split": "train",
124
- "trimmed_dir": trimmed_dir,
125
  },
126
  ),
127
  datasets.SplitGenerator(
@@ -130,7 +145,7 @@ class Anakin(datasets.GeneratorBasedBuilder):
130
  gen_kwargs={
131
  "filepath": data_dir,
132
  "split": "dev",
133
- "trimmed_dir": trimmed_dir,
134
  },
135
  ),
136
  datasets.SplitGenerator(
@@ -143,17 +158,11 @@ class Anakin(datasets.GeneratorBasedBuilder):
143
  ),
144
  ]
145
 
146
- def _generate_examples(self, filepath, split, trimmed_dir):
147
- random.seed(47)
148
- root_url = "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/"
149
- df = pd.read_csv(filepath)
150
- ids = df["video-id"].to_list()
151
- random.shuffle(ids)
152
- if split == "train":
153
- for key, idx in enumerate(ids[:342]):
154
- yield key, {
155
- "full": root_url + f"full/{idx}.mp4",
156
- "trimmed": trimmed_dir + f"{idx}.mp4",
157
- "edited": root_url + f"edited/{idx}.mp4",
158
- "masks": root_url + f"masks/{idx}/",
159
- }
 
110
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
111
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
112
  urls = _URLS[self.config.name]
113
+ metadata_dir = dl_manager.download_and_extract(urls)
114
+
115
+ random.seed(47)
116
+ root_url = "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/"
117
+ df = pd.read_csv(metadata_dir)
118
+ ids = df["video-id"].to_list()
119
+ random.shuffle(ids)
120
+
121
+ data_urls = [
122
+ {
123
+ "full": root_url + f"full/{idx}.mp4",
124
+ "trimmed": root_url + f"trimmed/{idx}.mp4",
125
+ "edited": root_url + f"edited/{idx}.mp4",
126
+ "masks": root_url + f"masks/{idx}/",
127
+ }
128
+ for idx in ids
129
+ ]
130
+ data_dir = dl_manager.download_and_extract(data_urls)
131
+
132
  return [
133
  datasets.SplitGenerator(
134
  name=datasets.Split.TRAIN,
 
136
  gen_kwargs={
137
  "filepath": data_dir,
138
  "split": "train",
139
+ "ids": ids[:342],
140
  },
141
  ),
142
  datasets.SplitGenerator(
 
145
  gen_kwargs={
146
  "filepath": data_dir,
147
  "split": "dev",
148
+ "ids": ids[342:],
149
  },
150
  ),
151
  datasets.SplitGenerator(
 
158
  ),
159
  ]
160
 
161
+ def _generate_examples(self, filepath, ids, split):
162
+ for key, idx in enumerate(ids):
163
+ yield key, {
164
+ "full": filepath + f"full/{idx}.mp4",
165
+ "trimmed": filepath + f"trimmed/{idx}.mp4",
166
+ "edited": filepath + f"edited/{idx}.mp4",
167
+ "masks": filepath + f"masks/{idx}/",
168
+ }