Alexander Black commited on
Commit
6e4d8ca
1 Parent(s): 3afda2d

script wip

Browse files
Files changed (1) hide show
  1. ANAKIN.py +7 -2
ANAKIN.py CHANGED
@@ -111,6 +111,9 @@ class Anakin(datasets.GeneratorBasedBuilder):
111
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
112
  urls = _URLS[self.config.name]
113
  data_dir = dl_manager.download_and_extract(urls)
 
 
 
114
  return [
115
  datasets.SplitGenerator(
116
  name=datasets.Split.TRAIN,
@@ -118,6 +121,7 @@ class Anakin(datasets.GeneratorBasedBuilder):
118
  gen_kwargs={
119
  "filepath": data_dir,
120
  "split": "train",
 
121
  },
122
  ),
123
  datasets.SplitGenerator(
@@ -126,6 +130,7 @@ class Anakin(datasets.GeneratorBasedBuilder):
126
  gen_kwargs={
127
  "filepath": data_dir,
128
  "split": "dev",
 
129
  },
130
  ),
131
  datasets.SplitGenerator(
@@ -138,7 +143,7 @@ class Anakin(datasets.GeneratorBasedBuilder):
138
  ),
139
  ]
140
 
141
- def _generate_examples(self, filepath, split):
142
  random.seed(47)
143
  root_url = "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/"
144
  df = pd.read_csv(filepath)
@@ -148,7 +153,7 @@ class Anakin(datasets.GeneratorBasedBuilder):
148
  for key, idx in enumerate(ids[:342]):
149
  yield key, {
150
  "full": root_url + f"full/{idx}.mp4",
151
- "trimmed": root_url + f"trimmed/{idx}.mp4",
152
  "edited": root_url + f"edited/{idx}.mp4",
153
  "masks": root_url + f"masks/{idx}/",
154
  }
 
111
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
112
  urls = _URLS[self.config.name]
113
  data_dir = dl_manager.download_and_extract(urls)
114
+ trimmed_dir = dl_manager.download_and_extract(
115
+ "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/trimmed/"
116
+ )
117
  return [
118
  datasets.SplitGenerator(
119
  name=datasets.Split.TRAIN,
 
121
  gen_kwargs={
122
  "filepath": data_dir,
123
  "split": "train",
124
+ "trimmed_dir": trimmed_dir,
125
  },
126
  ),
127
  datasets.SplitGenerator(
 
130
  gen_kwargs={
131
  "filepath": data_dir,
132
  "split": "dev",
133
+ "trimmed_dir": trimmed_dir,
134
  },
135
  ),
136
  datasets.SplitGenerator(
 
143
  ),
144
  ]
145
 
146
+ def _generate_examples(self, filepath, split, trimmed_dir):
147
  random.seed(47)
148
  root_url = "https://huggingface.co/datasets/AlexBlck/ANAKIN/resolve/main/"
149
  df = pd.read_csv(filepath)
 
153
  for key, idx in enumerate(ids[:342]):
154
  yield key, {
155
  "full": root_url + f"full/{idx}.mp4",
156
+ "trimmed": trimmed_dir + f"{idx}.mp4",
157
  "edited": root_url + f"edited/{idx}.mp4",
158
  "masks": root_url + f"masks/{idx}/",
159
  }