upd: I don't know anymore
Browse files- rwth_phoenix_weather_2014.py +14 -16
rwth_phoenix_weather_2014.py
CHANGED
@@ -79,7 +79,7 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
79 |
|
80 |
def _info(self):
|
81 |
features_dict = {
|
82 |
-
"
|
83 |
}
|
84 |
|
85 |
if self.config.name != "pre-training":
|
@@ -97,8 +97,8 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
97 |
|
98 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
99 |
split_frames = {}
|
100 |
-
|
101 |
-
|
102 |
|
103 |
dataDirMapper = {
|
104 |
datasets.Split.TRAIN: "train",
|
@@ -117,19 +117,16 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
117 |
f"{base_url}/annotations/manual/{dataDirMapper[split]}{self.config.corpus_file_suffix}")
|
118 |
df = pd.read_csv(data_csv, sep='|')
|
119 |
|
120 |
-
|
121 |
-
|
122 |
-
)
|
123 |
-
|
124 |
-
split_ids[split] = df.id
|
125 |
|
126 |
if self.config.name == 'pre-training':
|
127 |
split_frames[split] = [None for id in df.id]
|
128 |
continue
|
129 |
|
130 |
frame_archive_urls = dl_manager.download([
|
131 |
-
f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{
|
132 |
-
for
|
133 |
])
|
134 |
|
135 |
split_frames[split] = [
|
@@ -141,9 +138,10 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
141 |
datasets.SplitGenerator(
|
142 |
name=split,
|
143 |
gen_kwargs={
|
144 |
-
"
|
|
|
145 |
"split_frames": split_frames[split],
|
146 |
-
"
|
147 |
},
|
148 |
)
|
149 |
for split in [
|
@@ -153,17 +151,17 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
153 |
]
|
154 |
]
|
155 |
|
156 |
-
def _generate_examples(self,
|
157 |
-
for
|
158 |
frames_as_list = [
|
159 |
{"path": p, "bytes": im.read()} for p, im in frames
|
160 |
] if frames is not None else None
|
161 |
|
162 |
result = {
|
163 |
-
"
|
164 |
}
|
165 |
|
166 |
if frames_as_list is not None:
|
167 |
result["frames"] = frames_as_list
|
168 |
|
169 |
-
yield
|
|
|
79 |
|
80 |
def _info(self):
|
81 |
features_dict = {
|
82 |
+
"transcription": datasets.Value("string"),
|
83 |
}
|
84 |
|
85 |
if self.config.name != "pre-training":
|
|
|
97 |
|
98 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
99 |
split_frames = {}
|
100 |
+
split_transcription = {}
|
101 |
+
file_ids = {}
|
102 |
|
103 |
dataDirMapper = {
|
104 |
datasets.Split.TRAIN: "train",
|
|
|
117 |
f"{base_url}/annotations/manual/{dataDirMapper[split]}{self.config.corpus_file_suffix}")
|
118 |
df = pd.read_csv(data_csv, sep='|')
|
119 |
|
120 |
+
split_transcription[split] = df['annotation']
|
121 |
+
file_ids[split] = df['id']
|
|
|
|
|
|
|
122 |
|
123 |
if self.config.name == 'pre-training':
|
124 |
split_frames[split] = [None for id in df.id]
|
125 |
continue
|
126 |
|
127 |
frame_archive_urls = dl_manager.download([
|
128 |
+
f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{file_id}.tar"
|
129 |
+
for file_id in file_ids[split]
|
130 |
])
|
131 |
|
132 |
split_frames[split] = [
|
|
|
138 |
datasets.SplitGenerator(
|
139 |
name=split,
|
140 |
gen_kwargs={
|
141 |
+
"split_name": split,
|
142 |
+
"file_ids": file_ids[split],
|
143 |
"split_frames": split_frames[split],
|
144 |
+
"split_transcription": split_transcription[split],
|
145 |
},
|
146 |
)
|
147 |
for split in [
|
|
|
151 |
]
|
152 |
]
|
153 |
|
154 |
+
def _generate_examples(self, split_name, file_ids, split_frames, split_transcription):
|
155 |
+
for file_id, frames, transcription in zip(file_ids, split_frames, split_transcription):
|
156 |
frames_as_list = [
|
157 |
{"path": p, "bytes": im.read()} for p, im in frames
|
158 |
] if frames is not None else None
|
159 |
|
160 |
result = {
|
161 |
+
"transcription": transcription,
|
162 |
}
|
163 |
|
164 |
if frames_as_list is not None:
|
165 |
result["frames"] = frames_as_list
|
166 |
|
167 |
+
yield f"{split_name}/{file_id}", result
|