Datasets:

Modalities:
Text
Formats:
json
Libraries:
Datasets
pandas
License:
Sebastien Campion commited on
Commit
3f42bd5
1 Parent(s): 3000ba6
Files changed (2) hide show
  1. query.j2 +55 -0
  2. update.py +247 -0
query.j2 ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PREFIX cdm:<http://publications.europa.eu/ontology/cdm#>
2
+ PREFIX skos:<http://www.w3.org/2004/02/skos/core#>
3
+ PREFIX dc:<http://purl.org/dc/elements/1.1/>
4
+ PREFIX xsd:<http://www.w3.org/2001/XMLSchema#>
5
+ PREFIX rdf:<http://www.w3.org/1999/02/22-rdf-syntax-ns#>
6
+ PREFIX owl:<http://www.w3.org/2002/07/owl#>
7
+ SELECT
8
+ DISTINCT (group_concat(distinct ?work;separator=",") as ?cellarURIs)
9
+ (group_concat(distinct ?title_;separator=",") as ?title)
10
+ ?langIdentifier
11
+ (group_concat(distinct ?mtype;separator=",") as ?mtypes)
12
+ (group_concat(distinct ?resType;separator=",") as ?workTypes)
13
+ (group_concat(distinct ?agentName;separator=",") as ?authors)
14
+ ?date
15
+ (group_concat(distinct ?subjectLabel;separator=",") as ?subjects)
16
+ (group_concat(distinct ?workId_;separator=",") as ?workIds)
17
+ WHERE
18
+ {
19
+ graph ?gw{
20
+ ?work rdf:type ?resType .
21
+ ?work cdm:work_date_document ?date .
22
+ ?work cdm:work_id_document ?workId_.
23
+ ?work cdm:work_is_about_concept_eurovoc ?subject. graph ?gs
24
+ { ?subject skos:prefLabel ?subjectLabel filter (lang(?subjectLabel)="en") }.
25
+ }
26
+ graph ?eg {
27
+ ?exp cdm:expression_belongs_to_work ?work .
28
+ ?exp cdm:expression_title ?title_
29
+ filter(lang(?title_)="en" or lang(?title_)="eng" or lang(?title_)='' ).
30
+ ?exp cdm:expression_uses_language ?lg.
31
+ graph ?lgc { ?lg dc:identifier ?langIdentifier .}
32
+ }
33
+ graph ?gm {
34
+ ?manif cdm:manifestation_manifests_expression ?exp .
35
+ {?manif cdm:manifestation_type ?mtype .}
36
+ }
37
+ OPTIONAL { graph ?gagent { {?work cdm:work_contributed_to_by_agent ?agent .}
38
+ union
39
+ {?work cdm:work_created_by_agent ?agent }
40
+ union
41
+ {?work cdm:work_authored_by_agent ?agent }
42
+ } graph ?ga { ?agent skos:prefLabel ?agentName
43
+ filter (lang(?agentName)="en") . }}.
44
+ { SELECT DISTINCT ?work WHERE {
45
+ ?work rdf:type ?resType .
46
+ ?work cdm:work_date_document ?date .
47
+ FILTER( ?date > "{{ start }}"^^xsd:date) .
48
+ FILTER( ?date < "{{ end }}"^^xsd:date)
49
+ ?work cdm:work_id_document ?workId_.
50
+ }
51
+ LIMIT 10000000
52
+ }
53
+ }
54
+ GROUP BY ?work ?date ?langIdentifier
55
+ OFFSET 0
update.py ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Cellar text and eurovoc extraction
3
+
4
+ python update.py 10000 dataset.jsonl
5
+
6
+ will extract for the last 10,000 days text in english and eurovoc labels in the JSON line file.
7
+
8
+
9
+ requirements:
10
+ beautifulsoup4==4.12.2
11
+ docx2txt==0.8
12
+ ipython==8.14.0
13
+ jinja2==3.1.2
14
+ joblib==1.3.1
15
+ pdfminer.six==20221105
16
+ pip-chill==1.0.3
17
+ pycryptodome==3.18.0
18
+ requests==2.31.0
19
+ tqdm==4.65.0
20
+ xmltodict==0.13.0
21
+ """
22
+ import datetime
23
+ import json
24
+ from concurrent.futures import ProcessPoolExecutor
25
+
26
+ from bs4 import BeautifulSoup
27
+ import logging
28
+ import re
29
+ import sys
30
+
31
+ from tqdm import tqdm
32
+ from io import BytesIO
33
+ import jinja2
34
+ from joblib import Memory
35
+
36
+ location = './cache'
37
+ memory = Memory(location, verbose=0)
38
+
39
+ log = logging.getLogger(__name__)
40
+ log.addHandler(logging.FileHandler('collect.log'))
41
+ log.setLevel(logging.DEBUG)
42
+
43
+ import xmltodict
44
+
45
+ import docx2txt as docx2txt
46
+ import requests
47
+ from joblib import expires_after
48
+ from pdfminer.high_level import extract_text
49
+
50
+
51
+ def clean_text(func):
52
+ """
53
+ Decorator used to clean the text
54
+ :param func:
55
+ :return:
56
+ """
57
+
58
+ def inner(*args, **kwargs):
59
+ text = func(*args, **kwargs)
60
+ text = text.replace("\n", " ")
61
+ text = text.replace(" .", ".")
62
+ text = re.sub(' +', ' ', text)
63
+ text = re.sub(' *[.] *', '. ', text)
64
+ text = re.sub('\.\s*\.\s*\.+', '. ', text)
65
+ text = '. '.join([s.strip() for s in text.split(".") if len(s.strip())])
66
+ return text
67
+
68
+ return inner
69
+
70
+
71
+ @memory.cache(cache_validation_callback=expires_after(minutes=120))
72
+ def get_eurovoc_terms_and_id():
73
+ eurovoc_terms_and_id = {}
74
+ response = requests.get('http://publications.europa.eu/resource/dataset/eurovoc',
75
+ headers={'Accept': 'application/xml',
76
+ 'Accept-Language': 'en'}
77
+ )
78
+ data = xmltodict.parse(response.content)
79
+ for term in data['xs:schema']['xs:simpleType']['xs:restriction']['xs:enumeration']:
80
+ try:
81
+ name = term['xs:annotation']['xs:documentation'].split('/')[0].strip()
82
+ for r in term['xs:annotation']['xs:appinfo']['record']:
83
+ if r['@thesaurus_id'] != '':
84
+ eurovoc_terms_and_id[name.lower()] = r['@thesaurus_id']
85
+ except KeyError as e:
86
+ log.warning("⚠️ Could not parse", term)
87
+ return eurovoc_terms_and_id
88
+
89
+
90
+ def get_sparql_query(d):
91
+ start = d.strftime('%Y-%m-%d')
92
+ end = d + datetime.timedelta(days=2)
93
+ end = end.strftime('%Y-%m-%d')
94
+ environment = jinja2.Environment()
95
+ template = environment.from_string(open("query.j2", 'r').read())
96
+ return template.render(start=start, end=end)
97
+
98
+
99
+ def get_json_response(d):
100
+ url = "https://publications.europa.eu/webapi/rdf/sparql"
101
+ params = {"default-graph-uri": "",
102
+ "query": get_sparql_query(d),
103
+ "format": "application/sparql-results+json",
104
+ "timeout": "0",
105
+ "debug": "on",
106
+ "run": "Run Query"}
107
+
108
+ response = requests.get(url, params)
109
+ assert response.status_code == 200
110
+ return response.json()
111
+
112
+
113
+ def get_concepts_id(list_of_eurovoc_terms):
114
+ terms = get_eurovoc_terms_and_id()
115
+ for e in list_of_eurovoc_terms:
116
+ try:
117
+ yield terms[e.strip().lower()]
118
+ except KeyError:
119
+ log.warning(f"⚠️ Could not find {e} in Eurovoc")
120
+
121
+
122
+ def get_docs(d):
123
+ results = get_json_response(d)
124
+ for r in results['results']['bindings']:
125
+ terms = r['subjects']['value'].replace(u'\xa0', u' ').split(',')
126
+ r['eurovoc_concepts'] = terms #list(get_concepts_id(terms))
127
+ r['url'] = r['cellarURIs']['value']
128
+ r['title'] = r['title']['value']
129
+ r['date'] = r['date']['value']
130
+ r['lang'] = r['langIdentifier']['value'].lower()
131
+ r['formats'] = [t for t in r['mtypes']['value'].split(',')]
132
+ for c in ['cellarURIs', 'mtypes', 'langIdentifier', 'subjects', 'authors', 'workTypes', 'workIds']:
133
+ del r[c]
134
+ yield r
135
+
136
+
137
+ def get_docs_text(d):
138
+ docs = list(get_docs(d))
139
+ print(f"Processing documents ... {len(docs)}")
140
+ with ProcessPoolExecutor(max_workers=16) as executor:
141
+ for v in tqdm(executor.map(get_body, docs), total=len(docs), colour='green'):
142
+ yield v
143
+
144
+
145
+ def get_body(r):
146
+ try:
147
+ if 'pdf' in r['formats']:
148
+ r['text'] = get_pdf_body(r)
149
+ elif 'docx' in r['formats']:
150
+ r['text'] = get_docx_body(r)
151
+ elif 'doc' in r['formats']:
152
+ r['text'] = get_doc_body(r)
153
+ elif 'xhtml' in r['formats']:
154
+ r['text'] = get_xhtml_body(r)
155
+ else:
156
+ log.warning(f"⚠️ Could not find a parser for {r['formats']}")
157
+ return r
158
+ except Exception as e:
159
+ log.error(str(e) + str(r) )
160
+
161
+
162
+
163
+ @clean_text
164
+ @memory.cache()
165
+ def get_pdf_body(r):
166
+ url = r['url']
167
+ language = r['lang']
168
+ accept = 'application/pdf'
169
+ response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language})
170
+ if response.status_code == 300:
171
+ return " ".join(_multiple_choice(get_pdf_body, response, accept, language))
172
+ elif response.status_code == 200:
173
+ mem = BytesIO(response.content)
174
+ return extract_text(mem)
175
+
176
+ @clean_text
177
+ @memory.cache()
178
+ def get_xhtml_body(r):
179
+ url = r['url']
180
+ language = r['lang']
181
+ accept = 'application/xhtml+xml'
182
+ response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language})
183
+ if response.status_code == 300:
184
+ return " ".join(_multiple_choice(get_xhtml_body, response, accept, language))
185
+ elif response.status_code == 200:
186
+ soup = BeautifulSoup(response.content, 'html.parser')
187
+ return soup.get_text()
188
+
189
+ def get_docx_body(r):
190
+ accept = 'application/vnd.openxmlformats-officedocument.wordprocessingml.document.main+xml'
191
+ url = r['url']
192
+ lang = r['lang']
193
+ try:
194
+ return _get_doc_body(url, accept, lang)
195
+ except AssertionError as e:
196
+ log.warning(f"⚠️ Could not download {url} {e}")
197
+ print(f"⚠️ Could not download {r} --- {accept} {e}")
198
+ return ""
199
+
200
+
201
+ def get_doc_body(r):
202
+ accept = 'application/msword'
203
+ url = r['url']
204
+ lang = r['lang']
205
+ try:
206
+ return _get_doc_body(url, accept, lang)
207
+ except AssertionError as e:
208
+ log.warning(f"⚠️ Could not download {url} {e}")
209
+ print(f"⚠️ Could not download {r} --- {accept} {e}")
210
+ return ""
211
+
212
+ def _multiple_choice(func, response, accept, language):
213
+ soup = BeautifulSoup(response.text, 'html.parser')
214
+ for link in soup.find_all('a'):
215
+ if 'href' in link.attrs:
216
+ url = link.attrs['href']
217
+ yield func(url, accept, language)
218
+
219
+ @clean_text
220
+ @memory.cache()
221
+ def _get_doc_body(url, accept, language='en'):
222
+ response = requests.get(url, headers={'Accept': accept, 'Accept-Language': language})
223
+ if response.status_code == 300:
224
+ return " ".join(_multiple_choice(_get_doc_body, response, accept, language))
225
+ elif response.status_code == 200:
226
+ mem = BytesIO(response.content)
227
+ log.info(f"📄 MS Word doc download and parsed {url}")
228
+ return docx2txt.process(mem)
229
+ else:
230
+ raise AssertionError(f"📄 MS Word doc download failed {url} {response.status_code} {response.content}")
231
+
232
+
233
+ if __name__ == '__main__':
234
+ output = sys.argv[1]
235
+ max = int(sys.argv[2])
236
+ with open(output, 'w') as f:
237
+ for i in range(max):
238
+ d = datetime.date.today() - datetime.timedelta(days=i)
239
+ print(d)
240
+ try:
241
+ for d in get_docs_text(d):
242
+ f.write(json.dumps(d) + '\n')
243
+ f.flush()
244
+ except Exception as e:
245
+ log.error('Day ' + str(d) + ' ' + str(e))
246
+ print('Day ' + str(d) + ' ' + str(e))
247
+