Datasets:
guilhermelmello
commited on
Commit
•
816a97f
1
Parent(s):
789cc32
Update loading script to v1.2
Browse files- corpus-carolina.py +13 -20
corpus-carolina.py
CHANGED
@@ -18,6 +18,7 @@ from collections import defaultdict
|
|
18 |
from lxml import etree
|
19 |
import os
|
20 |
import datasets
|
|
|
21 |
|
22 |
|
23 |
_HOMEPAGE = "https://sites.usp.br/corpuscarolina/"
|
@@ -158,32 +159,23 @@ class Carolina(datasets.GeneratorBasedBuilder):
|
|
158 |
# download checksum files
|
159 |
checksum_urls = {t: _CHECKSUM_FNAME.format(tax=t) for t in taxonomies}
|
160 |
checksum_paths = dl_manager.download(checksum_urls)
|
161 |
-
|
162 |
# prepare xml file name and zip urls
|
163 |
-
|
164 |
for tax, cpath in checksum_paths.items():
|
165 |
tax_path = _CORPUS_URL.format(tax=tax)
|
166 |
with open(cpath, encoding="utf-8") as cfile:
|
167 |
for line in cfile:
|
168 |
xml_tax_path = line.split()[1] # xml file inside taxonomy
|
169 |
-
zip_fname = xml_tax_path + ".
|
170 |
-
xml_fname = xml_tax_path.split('/')[-1] # xml file name only
|
171 |
zip_fpath = os.path.join(tax_path, zip_fname) # path inside corpus
|
172 |
-
|
173 |
-
|
174 |
-
# extractions are made in cache folders and
|
175 |
-
# the path returned is the folder path, not the
|
176 |
-
# extracted file (or files). It is necessary to
|
177 |
-
# build the xml file path. It is made using the
|
178 |
-
# zip_urls dict structure.
|
179 |
-
extracted = dl_manager.download_and_extract(zip_urls)
|
180 |
-
xml_files = [os.path.join(v, k) for k, v in extracted.items()]
|
181 |
-
xml_files = sorted(xml_files)
|
182 |
|
|
|
183 |
return [
|
184 |
datasets.SplitGenerator(
|
185 |
name="corpus",
|
186 |
-
gen_kwargs={"filepaths":
|
187 |
)
|
188 |
]
|
189 |
|
@@ -197,10 +189,10 @@ class Carolina(datasets.GeneratorBasedBuilder):
|
|
197 |
|
198 |
_key = 0
|
199 |
for path in filepaths:
|
200 |
-
|
201 |
-
for _, tei in etree.iterparse(
|
202 |
header = tei.find(f"{TEI_NS}teiHeader")
|
203 |
-
|
204 |
meta = etree.tostring(
|
205 |
header, encoding="utf-8").decode("utf-8")
|
206 |
text = ' '.join([e.text
|
@@ -208,9 +200,10 @@ class Carolina(datasets.GeneratorBasedBuilder):
|
|
208 |
if e.text is not None
|
209 |
])
|
210 |
|
211 |
-
|
212 |
"meta": meta,
|
213 |
"text": text
|
214 |
}
|
215 |
-
yield _key, example
|
216 |
_key += 1
|
|
|
|
|
|
18 |
from lxml import etree
|
19 |
import os
|
20 |
import datasets
|
21 |
+
import gzip
|
22 |
|
23 |
|
24 |
_HOMEPAGE = "https://sites.usp.br/corpuscarolina/"
|
|
|
159 |
# download checksum files
|
160 |
checksum_urls = {t: _CHECKSUM_FNAME.format(tax=t) for t in taxonomies}
|
161 |
checksum_paths = dl_manager.download(checksum_urls)
|
162 |
+
|
163 |
# prepare xml file name and zip urls
|
164 |
+
gzip_urls = list()
|
165 |
for tax, cpath in checksum_paths.items():
|
166 |
tax_path = _CORPUS_URL.format(tax=tax)
|
167 |
with open(cpath, encoding="utf-8") as cfile:
|
168 |
for line in cfile:
|
169 |
xml_tax_path = line.split()[1] # xml file inside taxonomy
|
170 |
+
zip_fname = xml_tax_path + ".gz" # zip file inside taxonomy
|
|
|
171 |
zip_fpath = os.path.join(tax_path, zip_fname) # path inside corpus
|
172 |
+
gzip_urls.append(zip_fpath)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
173 |
|
174 |
+
gzip_files = dl_manager.download(gzip_urls)
|
175 |
return [
|
176 |
datasets.SplitGenerator(
|
177 |
name="corpus",
|
178 |
+
gen_kwargs={"filepaths": gzip_files}
|
179 |
)
|
180 |
]
|
181 |
|
|
|
189 |
|
190 |
_key = 0
|
191 |
for path in filepaths:
|
192 |
+
gzip_file = gzip.GzipFile(path, "rb")
|
193 |
+
for _, tei in etree.iterparse(gzip_file, **parser_params):
|
194 |
header = tei.find(f"{TEI_NS}teiHeader")
|
195 |
+
|
196 |
meta = etree.tostring(
|
197 |
header, encoding="utf-8").decode("utf-8")
|
198 |
text = ' '.join([e.text
|
|
|
200 |
if e.text is not None
|
201 |
])
|
202 |
|
203 |
+
yield _key, {
|
204 |
"meta": meta,
|
205 |
"text": text
|
206 |
}
|
|
|
207 |
_key += 1
|
208 |
+
|
209 |
+
gzip_file.close()
|