upd: new try
Browse files- rwth_phoenix_weather_2014.py +22 -2
rwth_phoenix_weather_2014.py
CHANGED
@@ -83,6 +83,9 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
83 |
"transcription": datasets.Value("string"),
|
84 |
}
|
85 |
|
|
|
|
|
|
|
86 |
return datasets.DatasetInfo(
|
87 |
description=_DESCRIPTION + self.config.description,
|
88 |
features=datasets.Features(features_dict),
|
@@ -96,6 +99,7 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
96 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
97 |
example_ids = {}
|
98 |
annotations = {}
|
|
|
99 |
|
100 |
dataDirMapper = {
|
101 |
datasets.Split.TRAIN: "train",
|
@@ -118,12 +122,23 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
118 |
example_ids[split] = df['id']
|
119 |
annotations[split] = df['annotation']
|
120 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
121 |
return [
|
122 |
datasets.SplitGenerator(
|
123 |
name=split,
|
124 |
gen_kwargs={
|
125 |
"example_ids": example_ids[split],
|
126 |
"annotations": annotations[split],
|
|
|
127 |
},
|
128 |
)
|
129 |
for split in [
|
@@ -133,11 +148,16 @@ class RWTHPhoenixWeather2014(datasets.GeneratorBasedBuilder):
|
|
133 |
]
|
134 |
]
|
135 |
|
136 |
-
def _generate_examples(self, example_ids, annotations):
|
137 |
-
for key, (idx, annotation) in enumerate(zip(example_ids, annotations)):
|
138 |
result = {
|
139 |
"id": idx,
|
140 |
"transcription": annotation,
|
141 |
}
|
142 |
|
|
|
|
|
|
|
|
|
|
|
143 |
yield key, result
|
|
|
83 |
"transcription": datasets.Value("string"),
|
84 |
}
|
85 |
|
86 |
+
if self.config.name != "pre-training":
|
87 |
+
features_dict["frames"] = datasets.Sequence(feature=datasets.Image())
|
88 |
+
|
89 |
return datasets.DatasetInfo(
|
90 |
description=_DESCRIPTION + self.config.description,
|
91 |
features=datasets.Features(features_dict),
|
|
|
99 |
def _split_generators(self, dl_manager: datasets.DownloadManager):
|
100 |
example_ids = {}
|
101 |
annotations = {}
|
102 |
+
frames = {}
|
103 |
|
104 |
dataDirMapper = {
|
105 |
datasets.Split.TRAIN: "train",
|
|
|
122 |
example_ids[split] = df['id']
|
123 |
annotations[split] = df['annotation']
|
124 |
|
125 |
+
frame_archive_urls = dl_manager.download([
|
126 |
+
f"{base_url}/features/fullFrame-210x260px/{dataDirMapper[split]}/{id}.tar"
|
127 |
+
for id in example_ids[split]
|
128 |
+
])
|
129 |
+
|
130 |
+
frames[split] = [
|
131 |
+
dl_manager.iter_archive(url)
|
132 |
+
for url in frame_archive_urls
|
133 |
+
]
|
134 |
+
|
135 |
return [
|
136 |
datasets.SplitGenerator(
|
137 |
name=split,
|
138 |
gen_kwargs={
|
139 |
"example_ids": example_ids[split],
|
140 |
"annotations": annotations[split],
|
141 |
+
"frames": frames[split],
|
142 |
},
|
143 |
)
|
144 |
for split in [
|
|
|
148 |
]
|
149 |
]
|
150 |
|
151 |
+
def _generate_examples(self, example_ids, annotations, frames):
|
152 |
+
for key, (idx, annotation, frames_list) in enumerate(zip(example_ids, annotations, frames)):
|
153 |
result = {
|
154 |
"id": idx,
|
155 |
"transcription": annotation,
|
156 |
}
|
157 |
|
158 |
+
if self.config.name != 'pre-training':
|
159 |
+
result["frames"] = [
|
160 |
+
{"path": p, "bytes": im.read()} for p, im in frames_list
|
161 |
+
]
|
162 |
+
|
163 |
yield key, result
|