Albert Villanova del Moral
commited on
Parallelize with zipped file_list and archive
Browse files- open_access.py +23 -37
open_access.py
CHANGED
@@ -59,7 +59,7 @@ _BASELINE_DATE = "2022-12-17"
|
|
59 |
_BASELINE_MAX_RANGE = 10
|
60 |
_BASELINE_RANGES = {
|
61 |
"commercial": range(_BASELINE_MAX_RANGE),
|
62 |
-
"non_commercial": range(1, _BASELINE_MAX_RANGE),
|
63 |
"other": range(_BASELINE_MAX_RANGE),
|
64 |
}
|
65 |
|
@@ -76,7 +76,8 @@ class OpenAccessConfig(datasets.BuilderConfig):
|
|
76 |
"""
|
77 |
subsets = [subsets] if isinstance(subsets, str) else subsets
|
78 |
super().__init__(
|
79 |
-
name="+".join(subsets),
|
|
|
80 |
)
|
81 |
self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())
|
82 |
|
@@ -111,27 +112,18 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
111 |
|
112 |
def _split_generators(self, dl_manager):
|
113 |
|
114 |
-
baseline_paths =
|
115 |
-
|
116 |
-
"baseline_archives": [],
|
117 |
-
}
|
118 |
-
incremental_paths = {
|
119 |
-
"incremental_file_lists": [],
|
120 |
-
"incremental_archives": [],
|
121 |
-
}
|
122 |
-
|
123 |
for subset in self.config.subsets:
|
124 |
url = _URL.format(subset=_SUBSETS[subset])
|
125 |
basename = f"{_SUBSETS[subset]}_txt."
|
126 |
# Baselines
|
127 |
baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in _BASELINE_RANGES[subset]]
|
128 |
-
baseline_urls =
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
baseline_paths["baseline_file_lists"].extend(paths["baseline_file_lists"])
|
134 |
-
baseline_paths["baseline_archives"].extend(paths["baseline_archives"])
|
135 |
# Incremental
|
136 |
date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
|
137 |
incremental_dates = [
|
@@ -139,36 +131,30 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
139 |
for i in range(date_delta.days)
|
140 |
]
|
141 |
incrementals = [f"incr.{date}" for date in incremental_dates]
|
142 |
-
incremental_urls =
|
143 |
-
"
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
}
|
148 |
-
paths = dl_manager.download(incremental_urls)
|
149 |
-
incremental_paths["incremental_file_lists"].extend(paths["incremental_file_lists"])
|
150 |
-
incremental_paths["incremental_archives"].extend(paths["incremental_archives"])
|
151 |
|
152 |
return [
|
153 |
datasets.SplitGenerator(
|
154 |
name=datasets.Split.TRAIN,
|
155 |
gen_kwargs={
|
156 |
-
"
|
157 |
-
|
158 |
-
dl_manager.iter_archive(archive) for archive in baseline_paths["baseline_archives"]
|
159 |
],
|
160 |
-
"
|
161 |
-
|
162 |
-
dl_manager.iter_archive(archive) for archive in incremental_paths["incremental_archives"]
|
163 |
],
|
164 |
},
|
165 |
),
|
166 |
]
|
167 |
|
168 |
-
def _generate_examples(self,
|
169 |
key = 0
|
170 |
# Baselines
|
171 |
-
for baseline_file_list, baseline_archive in
|
172 |
baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index")
|
173 |
for path, file in baseline_archive:
|
174 |
data = baselines.pop(path)
|
@@ -189,8 +175,8 @@ class OpenAccess(datasets.GeneratorBasedBuilder):
|
|
189 |
yield key, data
|
190 |
key += 1
|
191 |
# Incrementals
|
192 |
-
if
|
193 |
-
for incremental_file_list, incremental_archive in
|
194 |
incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index")
|
195 |
for path, file in incremental_archive:
|
196 |
data = incrementals.pop(path)
|
|
|
59 |
_BASELINE_MAX_RANGE = 10
|
60 |
_BASELINE_RANGES = {
|
61 |
"commercial": range(_BASELINE_MAX_RANGE),
|
62 |
+
"non_commercial": range(1, _BASELINE_MAX_RANGE), # non-commercial PMC000xxxxxx baseline does not exist
|
63 |
"other": range(_BASELINE_MAX_RANGE),
|
64 |
}
|
65 |
|
|
|
76 |
"""
|
77 |
subsets = [subsets] if isinstance(subsets, str) else subsets
|
78 |
super().__init__(
|
79 |
+
name="+".join(subsets),
|
80 |
+
**kwargs,
|
81 |
)
|
82 |
self.subsets = subsets if self.name != "all" else list(_SUBSETS.keys())
|
83 |
|
|
|
112 |
|
113 |
def _split_generators(self, dl_manager):
|
114 |
|
115 |
+
baseline_paths = []
|
116 |
+
incremental_paths = []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
117 |
for subset in self.config.subsets:
|
118 |
url = _URL.format(subset=_SUBSETS[subset])
|
119 |
basename = f"{_SUBSETS[subset]}_txt."
|
120 |
# Baselines
|
121 |
baselines = [f"PMC00{i}xxxxxx.baseline.{_BASELINE_DATE}" for i in _BASELINE_RANGES[subset]]
|
122 |
+
baseline_urls = [
|
123 |
+
(f"{url}{basename}{baseline}.filelist.csv", f"{url}{basename}{baseline}.tar.gz")
|
124 |
+
for baseline in baselines
|
125 |
+
]
|
126 |
+
baseline_paths += dl_manager.download(baseline_urls)
|
|
|
|
|
127 |
# Incremental
|
128 |
date_delta = datetime.date.today() - datetime.date.fromisoformat(_BASELINE_DATE)
|
129 |
incremental_dates = [
|
|
|
131 |
for i in range(date_delta.days)
|
132 |
]
|
133 |
incrementals = [f"incr.{date}" for date in incremental_dates]
|
134 |
+
incremental_urls = [
|
135 |
+
(f"{url}{basename}{incremental}.filelist.csv", f"{url}{basename}{incremental}.tar.gz")
|
136 |
+
for incremental in incrementals
|
137 |
+
]
|
138 |
+
incremental_paths += dl_manager.download(incremental_urls)
|
|
|
|
|
|
|
|
|
139 |
|
140 |
return [
|
141 |
datasets.SplitGenerator(
|
142 |
name=datasets.Split.TRAIN,
|
143 |
gen_kwargs={
|
144 |
+
"baseline_paths": [
|
145 |
+
(file_list, dl_manager.iter_archive(archive)) for file_list, archive in baseline_paths
|
|
|
146 |
],
|
147 |
+
"incremental_paths": [
|
148 |
+
(file_list, dl_manager.iter_archive(archive)) for file_list, archive in incremental_paths
|
|
|
149 |
],
|
150 |
},
|
151 |
),
|
152 |
]
|
153 |
|
154 |
+
def _generate_examples(self, baseline_paths, incremental_paths):
|
155 |
key = 0
|
156 |
# Baselines
|
157 |
+
for baseline_file_list, baseline_archive in baseline_paths:
|
158 |
baselines = pd.read_csv(baseline_file_list, index_col="Article File").to_dict(orient="index")
|
159 |
for path, file in baseline_archive:
|
160 |
data = baselines.pop(path)
|
|
|
175 |
yield key, data
|
176 |
key += 1
|
177 |
# Incrementals
|
178 |
+
if incremental_paths:
|
179 |
+
for incremental_file_list, incremental_archive in incremental_paths:
|
180 |
incrementals = pd.read_csv(incremental_file_list, index_col="Article File").to_dict(orient="index")
|
181 |
for path, file in incremental_archive:
|
182 |
data = incrementals.pop(path)
|