Update update.py
Browse files
update.py
CHANGED
@@ -47,6 +47,8 @@ import requests
|
|
47 |
from joblib import expires_after
|
48 |
from pdfminer.high_level import extract_text
|
49 |
|
|
|
|
|
50 |
|
51 |
def clean_text(func):
|
52 |
"""
|
@@ -73,7 +75,9 @@ def get_eurovoc_terms_and_id():
|
|
73 |
eurovoc_terms_and_id = {}
|
74 |
response = requests.get('http://publications.europa.eu/resource/dataset/eurovoc',
|
75 |
headers={'Accept': 'application/xml',
|
76 |
-
'Accept-Language': 'en'
|
|
|
|
|
77 |
)
|
78 |
data = xmltodict.parse(response.content)
|
79 |
for term in data['xs:schema']['xs:simpleType']['xs:restriction']['xs:enumeration']:
|
@@ -98,6 +102,7 @@ def get_sparql_query(d):
|
|
98 |
|
99 |
def get_json_response(d):
|
100 |
url = "https://publications.europa.eu/webapi/rdf/sparql"
|
|
|
101 |
params = {"default-graph-uri": "",
|
102 |
"query": get_sparql_query(d),
|
103 |
"format": "application/sparql-results+json",
|
@@ -105,7 +110,7 @@ def get_json_response(d):
|
|
105 |
"debug": "on",
|
106 |
"run": "Run Query"}
|
107 |
|
108 |
-
response = requests.get(url, params)
|
109 |
assert response.status_code == 200
|
110 |
return response.json()
|
111 |
|
@@ -123,7 +128,7 @@ def get_docs(d):
|
|
123 |
results = get_json_response(d)
|
124 |
for r in results['results']['bindings']:
|
125 |
terms = r['subjects']['value'].replace(u'\xa0', u' ').split(',')
|
126 |
-
r['eurovoc_concepts'] = terms
|
127 |
r['url'] = r['cellarURIs']['value']
|
128 |
r['title'] = r['title']['value']
|
129 |
r['date'] = r['date']['value']
|
@@ -144,20 +149,19 @@ def get_docs_text(d):
|
|
144 |
|
145 |
def get_body(r):
|
146 |
try:
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
except Exception as e:
|
159 |
-
|
160 |
-
|
161 |
|
162 |
|
163 |
@clean_text
|
@@ -166,26 +170,28 @@ def get_pdf_body(r):
|
|
166 |
url = r['url']
|
167 |
language = r['lang']
|
168 |
accept = 'application/pdf'
|
169 |
-
response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language})
|
170 |
if response.status_code == 300:
|
171 |
return " ".join(_multiple_choice(get_pdf_body, response, accept, language))
|
172 |
elif response.status_code == 200:
|
173 |
mem = BytesIO(response.content)
|
174 |
return extract_text(mem)
|
175 |
|
|
|
176 |
@clean_text
|
177 |
@memory.cache()
|
178 |
def get_xhtml_body(r):
|
179 |
url = r['url']
|
180 |
language = r['lang']
|
181 |
accept = 'application/xhtml+xml'
|
182 |
-
response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language})
|
183 |
if response.status_code == 300:
|
184 |
return " ".join(_multiple_choice(get_xhtml_body, response, accept, language))
|
185 |
elif response.status_code == 200:
|
186 |
soup = BeautifulSoup(response.content, 'html.parser')
|
187 |
return soup.get_text()
|
188 |
|
|
|
189 |
def get_docx_body(r):
|
190 |
accept = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml'
|
191 |
url = r['url']
|
@@ -209,6 +215,7 @@ def get_doc_body(r):
|
|
209 |
print(f"⚠️ Could not download {r} --- {accept} {e}")
|
210 |
return ""
|
211 |
|
|
|
212 |
def _multiple_choice(func, response, accept, language):
|
213 |
soup = BeautifulSoup(response.text, 'html.parser')
|
214 |
for link in soup.find_all('a'):
|
@@ -216,10 +223,11 @@ def _multiple_choice(func, response, accept, language):
|
|
216 |
url = link.attrs['href']
|
217 |
yield func(url, accept, language)
|
218 |
|
|
|
219 |
@clean_text
|
220 |
@memory.cache()
|
221 |
def _get_doc_body(url, accept, language='en'):
|
222 |
-
response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language})
|
223 |
if response.status_code == 300:
|
224 |
return " ".join(_multiple_choice(_get_doc_body, response, accept, language))
|
225 |
elif response.status_code == 200:
|
@@ -246,6 +254,6 @@ if __name__ == '__main__':
|
|
246 |
ofiles[ym].flush()
|
247 |
except Exception as e:
|
248 |
log.error('Day ' + str(d) + ' ' + str(e))
|
249 |
-
print
|
250 |
for f in ofiles.values():
|
251 |
f.close()
|
|
|
47 |
from joblib import expires_after
|
48 |
from pdfminer.high_level import extract_text
|
49 |
|
50 |
+
user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36'
|
51 |
+
|
52 |
|
53 |
def clean_text(func):
|
54 |
"""
|
|
|
75 |
eurovoc_terms_and_id = {}
|
76 |
response = requests.get('http://publications.europa.eu/resource/dataset/eurovoc',
|
77 |
headers={'Accept': 'application/xml',
|
78 |
+
'Accept-Language': 'en',
|
79 |
+
'User-Agent': user_agent
|
80 |
+
}
|
81 |
)
|
82 |
data = xmltodict.parse(response.content)
|
83 |
for term in data['xs:schema']['xs:simpleType']['xs:restriction']['xs:enumeration']:
|
|
|
102 |
|
103 |
def get_json_response(d):
|
104 |
url = "https://publications.europa.eu/webapi/rdf/sparql"
|
105 |
+
headers = {'User-Agent': user_agent}
|
106 |
params = {"default-graph-uri": "",
|
107 |
"query": get_sparql_query(d),
|
108 |
"format": "application/sparql-results+json",
|
|
|
110 |
"debug": "on",
|
111 |
"run": "Run Query"}
|
112 |
|
113 |
+
response = requests.get(url, headers=headers, params=params)
|
114 |
assert response.status_code == 200
|
115 |
return response.json()
|
116 |
|
|
|
128 |
results = get_json_response(d)
|
129 |
for r in results['results']['bindings']:
|
130 |
terms = r['subjects']['value'].replace(u'\xa0', u' ').split(',')
|
131 |
+
r['eurovoc_concepts'] = terms # list(get_concepts_id(terms))
|
132 |
r['url'] = r['cellarURIs']['value']
|
133 |
r['title'] = r['title']['value']
|
134 |
r['date'] = r['date']['value']
|
|
|
149 |
|
150 |
def get_body(r):
|
151 |
try:
|
152 |
+
if 'pdf' in r['formats']:
|
153 |
+
r['text'] = get_pdf_body(r)
|
154 |
+
elif 'docx' in r['formats']:
|
155 |
+
r['text'] = get_docx_body(r)
|
156 |
+
elif 'doc' in r['formats']:
|
157 |
+
r['text'] = get_doc_body(r)
|
158 |
+
elif 'xhtml' in r['formats']:
|
159 |
+
r['text'] = get_xhtml_body(r)
|
160 |
+
else:
|
161 |
+
log.warning(f"⚠️ Could not find a parser for {r['formats']}")
|
162 |
+
return r
|
163 |
except Exception as e:
|
164 |
+
log.error(str(e) + str(r))
|
|
|
165 |
|
166 |
|
167 |
@clean_text
|
|
|
170 |
url = r['url']
|
171 |
language = r['lang']
|
172 |
accept = 'application/pdf'
|
173 |
+
response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language, 'User-Agent': user_agent})
|
174 |
if response.status_code == 300:
|
175 |
return " ".join(_multiple_choice(get_pdf_body, response, accept, language))
|
176 |
elif response.status_code == 200:
|
177 |
mem = BytesIO(response.content)
|
178 |
return extract_text(mem)
|
179 |
|
180 |
+
|
181 |
@clean_text
|
182 |
@memory.cache()
|
183 |
def get_xhtml_body(r):
|
184 |
url = r['url']
|
185 |
language = r['lang']
|
186 |
accept = 'application/xhtml+xml'
|
187 |
+
response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language, 'User-Agent': user_agent})
|
188 |
if response.status_code == 300:
|
189 |
return " ".join(_multiple_choice(get_xhtml_body, response, accept, language))
|
190 |
elif response.status_code == 200:
|
191 |
soup = BeautifulSoup(response.content, 'html.parser')
|
192 |
return soup.get_text()
|
193 |
|
194 |
+
|
195 |
def get_docx_body(r):
|
196 |
accept = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml'
|
197 |
url = r['url']
|
|
|
215 |
print(f"⚠️ Could not download {r} --- {accept} {e}")
|
216 |
return ""
|
217 |
|
218 |
+
|
219 |
def _multiple_choice(func, response, accept, language):
|
220 |
soup = BeautifulSoup(response.text, 'html.parser')
|
221 |
for link in soup.find_all('a'):
|
|
|
223 |
url = link.attrs['href']
|
224 |
yield func(url, accept, language)
|
225 |
|
226 |
+
|
227 |
@clean_text
|
228 |
@memory.cache()
|
229 |
def _get_doc_body(url, accept, language='en'):
|
230 |
+
response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language, 'User-Agent': user_agent})
|
231 |
if response.status_code == 300:
|
232 |
return " ".join(_multiple_choice(_get_doc_body, response, accept, language))
|
233 |
elif response.status_code == 200:
|
|
|
254 |
ofiles[ym].flush()
|
255 |
except Exception as e:
|
256 |
log.error('Day ' + str(d) + ' ' + str(e))
|
257 |
+
print('Day ' + str(d) + ' ' + str(e))
|
258 |
for f in ofiles.values():
|
259 |
f.close()
|