lukasbraach commited on
Commit
ab89da9
·
verified ·
1 Parent(s): d20a69b

implemented RWTH Phoenix data loading script..?

Browse files
Files changed (1) hide show
  1. rwth_phoenix_weather_2014.py +32 -120
rwth_phoenix_weather_2014.py CHANGED
@@ -1,12 +1,5 @@
1
- import queue
2
- from concurrent import futures
3
- from functools import wraps
4
- from typing import Generator
5
-
6
- import cv2
7
  import datasets
8
- import numpy as np
9
- import scipy.io
10
 
11
  _CITATION = """\
12
  @article{koller2015continuous,
@@ -55,6 +48,7 @@ class RWTHPhoenixWeather2014Config(datasets.BuilderConfig):
55
  super(RWTHPhoenixWeather2014Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
56
  self.main_data_folder = main_data_folder
57
 
 
58
  class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
59
  """RWTH-PHOENIX-Weather 2014: Continuous Sign Language Recognition Dataset."""
60
 
@@ -79,8 +73,7 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
79
  description=_DESCRIPTION + self.config.description,
80
  features=datasets.Features(
81
  {
82
- "label": datasets.ClassLabel(names=_LABELS),
83
- "frames_interval": datasets.Sequence(feature=datasets.Value("uint32"), length=2),
84
  "frames": datasets.Sequence(feature=datasets.Image()),
85
  }
86
  ),
@@ -92,8 +85,9 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
92
  )
93
 
94
  def _split_generators(self, dl_manager: datasets.DownloadManager):
95
- videos = {}
96
- data = {}
 
97
 
98
  dataDirMapper = {
99
  datasets.Split.TRAIN: "train",
@@ -106,31 +100,34 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
106
  datasets.Split.VALIDATION,
107
  datasets.Split.TEST,
108
  ]:
109
- base_urls = [
110
- f"data/{self.config.main_data_folder}/features/fullFrame-210x260px/{dataDirMapper[split]}"
111
- for idx in split_ids[split]
112
- ]
113
 
114
- video_urls = [
115
- f"{base_url}_color.mp4"
116
- for base_url in base_urls
117
- ]
118
 
119
- data_urls = [
120
- f"{base_url}_data.mat"
121
- for base_url in base_urls
122
- ]
 
 
 
 
 
 
123
 
124
- videos[split] = dl_manager.download(video_urls)
125
- data[split] = dl_manager.download(data_urls)
 
 
126
 
127
  return [
128
  datasets.SplitGenerator(
129
  name=split,
130
  gen_kwargs={
131
- "ids": split_ids[split],
132
- "video_files": videos[split],
133
- "data_files": data[split],
134
  },
135
  )
136
  for split in [
@@ -140,94 +137,9 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
140
  ]
141
  ]
142
 
143
- def _generate_examples(self, ids, video_files, data_files):
144
- with futures.ThreadPoolExecutor() as executor:
145
- video_processor = VideoProcessor(executor)
146
-
147
- for idx, video_file, data_file in zip(ids, video_files, data_files):
148
- mat = scipy.io.loadmat(data_file)
149
- data = mat['Video'][0, 0][4][0]
150
-
151
- frame_queue = video_processor.open(video_file)
152
- frames = []
153
-
154
- last_end_frame = 0
155
- sub_sample_idx = 0
156
- curr_frame_idx = 0
157
-
158
- while True:
159
- try:
160
- [label], [[start_frame]], [[end_frame]] = data[sub_sample_idx]
161
- except Exception as e:
162
- print(f"\nskipping this example: something weird happened at idx = {idx}, sub_sample_idx = {sub_sample_idx}: {e}")
163
- break
164
-
165
- image = next(frame_queue, None)
166
-
167
- if image is None:
168
- # no more images
169
- break
170
-
171
- # last read was successful
172
- curr_frame_idx += 1
173
-
174
- if last_end_frame > start_frame:
175
- # sanity check
176
- raise RuntimeError(
177
- f"example frames are not monotonically increasing: last_end_frame = {last_end_frame} > start_frame = {start_frame}")
178
-
179
- if curr_frame_idx > end_frame:
180
- # sanity check
181
- raise RuntimeError(f"count = {curr_frame_idx} was greater than end_frame = {start_frame}")
182
-
183
- if curr_frame_idx >= start_frame:
184
- # save image if we are at the right frame index
185
- frames.append(image)
186
-
187
- if curr_frame_idx == end_frame:
188
- # we found our end frame and can yield a result
189
- yield f"Sample{idx}/{sub_sample_idx}", {
190
- "label": label,
191
- "frames_interval": (start_frame, end_frame),
192
- "frames": frames,
193
- }
194
-
195
- frames = []
196
- last_end_frame = end_frame
197
- sub_sample_idx += 1
198
-
199
- if sub_sample_idx == len(data):
200
- # no more samples to generate
201
- break
202
-
203
-
204
- class VideoProcessor:
205
- def __init__(self, executor: futures.Executor):
206
- super().__init__()
207
- self.executor = executor
208
- self.feature = datasets.Image()
209
-
210
- def open(self, video_file) -> Generator[dict, None, None]:
211
- video_capture = cv2.VideoCapture(video_file)
212
- futures_queue = queue.Queue[futures.Future[dict]](maxsize=30)
213
-
214
- self.executor.submit(self._push_frames_to_queue, video_capture, futures_queue)
215
-
216
- while True:
217
- value = futures_queue.get()
218
-
219
- if value is None:
220
- break
221
-
222
- yield value.result()
223
-
224
- def _push_frames_to_queue(self, capture: cv2.VideoCapture, futures_queue: queue.Queue):
225
- ok = True
226
- while ok:
227
- ok, image = capture.read()
228
-
229
- if not ok:
230
- futures_queue.put(None)
231
- else:
232
- future = self.executor.submit(self.feature.encode_example, image)
233
- futures_queue.put(future)
 
 
 
 
 
 
 
1
  import datasets
2
+ import pandas as pd
 
3
 
4
  _CITATION = """\
5
  @article{koller2015continuous,
 
48
  super(RWTHPhoenixWeather2014Config, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
49
  self.main_data_folder = main_data_folder
50
 
51
+
52
  class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
53
  """RWTH-PHOENIX-Weather 2014: Continuous Sign Language Recognition Dataset."""
54
 
 
73
  description=_DESCRIPTION + self.config.description,
74
  features=datasets.Features(
75
  {
76
+ "tokens": datasets.Sequence(feature=str),
 
77
  "frames": datasets.Sequence(feature=datasets.Image()),
78
  }
79
  ),
 
85
  )
86
 
87
  def _split_generators(self, dl_manager: datasets.DownloadManager):
88
+ split_frames = {}
89
+ split_tokens = {}
90
+ split_ids = {}
91
 
92
  dataDirMapper = {
93
  datasets.Split.TRAIN: "train",
 
100
  datasets.Split.VALIDATION,
101
  datasets.Split.TEST,
102
  ]:
103
+ base_url = f"data/{self.config.main_data_folder}"
 
 
 
104
 
105
+ data_csv = dl_manager.download(f"{base_url}/annotations/manual/{dataDirMapper[split]}.corpus.csv")
106
+ df = pd.read_csv(data_csv, sep='|')
 
 
107
 
108
+ split_tokens[split] = df.annotation.map(
109
+ lambda x: x.strip().split()
110
+ )
111
+
112
+ split_ids[split] = df.id
113
+
114
+ frame_archive_urls = dl_manager.download([
115
+ f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{id}.tar"
116
+ for id in df.id
117
+ ])
118
 
119
+ split_frames[split] = [
120
+ dl_manager.iter_archive(url)
121
+ for url in frame_archive_urls
122
+ ]
123
 
124
  return [
125
  datasets.SplitGenerator(
126
  name=split,
127
  gen_kwargs={
128
+ "split_ids": split_ids[split],
129
+ "split_frames": split_frames[split],
130
+ "split_tokens": split_tokens[split],
131
  },
132
  )
133
  for split in [
 
137
  ]
138
  ]
139
 
140
+ def _generate_examples(self, split_ids, split_frames, split_tokens):
141
+ for id, frames, tokens in zip(split_ids, split_frames, split_tokens):
142
+ yield id, {
143
+ "tokens": tokens,
144
+ "frames": frames,
145
+ }