hsuvaskakoty commited on
Commit
2e68d8f
·
verified ·
1 Parent(s): fafa079

Upload 3 files

Browse files
Files changed (3) hide show
  1. collect_data.py +312 -0
  2. process_data.py +1 -1
  3. requirements.txt +9 -9
collect_data.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import pandas as pd
3
+ from bs4 import BeautifulSoup
4
+ import pysbd
5
+ from datetime import datetime, timedelta
6
+
7
+ def extract_div_contents_with_additional_columns(url, log_date):
8
+ response = requests.get(url)
9
+ if response.status_code != 200:
10
+ return pd.DataFrame(columns=['log_date', 'title', 'text_url', 'deletion_discussion', 'label', 'confirmation', 'verdict', 'discussion'])
11
+
12
+ soup = BeautifulSoup(response.content, 'html.parser')
13
+ div_classes = ['boilerplate afd vfd xfd-closed', 'boilerplate afd vfd xfd-closed archived mw-archivedtalk']
14
+ divs = []
15
+ for div_class in div_classes:
16
+ divs.extend(soup.find_all('div', class_=div_class))
17
+ url_fragment = url.split('#')[-1].replace('_', ' ')
18
+ data = []
19
+ for div in divs:
20
+ title_tag = div.find('a')
21
+ if title_tag:
22
+ title_span = div.find('span', {'data-mw-comment-start': True})
23
+ if title_span:
24
+ title_anchor = title_span.find_next_sibling('a')
25
+ if title_anchor:
26
+ title = title_anchor.text
27
+ text_url = 'https://en.wikipedia.org' + title_anchor['href']
28
+ else:
29
+ title = title_tag.text
30
+ text_url = 'https://en.wikipedia.org' + title_tag['href']
31
+
32
+ deletion_discussion = div.prettify()
33
+
34
+ # Extract label
35
+ label = ''
36
+ verdict_tag = div.find('p')
37
+ if verdict_tag:
38
+ label_b_tag = verdict_tag.find('b')
39
+ if label_b_tag:
40
+ label = verdict_tag.prettify()
41
+
42
+ # Extract confirmation
43
+ confirmation = ''
44
+ discussion_tag = div.find('dd').find('i')
45
+ if discussion_tag:
46
+ confirmation_b_tag = discussion_tag.find('b')
47
+ if confirmation_b_tag:
48
+ confirmation = discussion_tag.prettify()
49
+
50
+
51
+ parts = deletion_discussion.split('<div class="mw-heading mw-heading3">')
52
+ discussion = parts[0] if len(parts) > 0 else ''
53
+ verdict = '<div class="mw-heading mw-heading3">' + parts[1] if len(parts) > 1 else ''
54
+ data.append([log_date, title, text_url, deletion_discussion, label, confirmation, discussion, verdict])
55
+ df = pd.DataFrame(data, columns=['log_date', 'title', 'text_url', 'deletion_discussion', 'label', 'confirmation', 'verdict', 'discussion'])
56
+ return df
57
+
58
+
59
+ def extract_div_contents_from_url(url,date):
60
+ response = requests.get(url)
61
+ if response.status_code != 200:
62
+ print(f"Error: Received status code {response.status_code} for URL: {url}")
63
+ return pd.DataFrame(columns=['date','title', 'text_url', 'deletion_discussion', 'label', 'confirmation', 'discussion', 'verdict'])
64
+
65
+ soup = BeautifulSoup(response.content, 'html.parser')
66
+ div_classes = ['boilerplate afd vfd xfd-closed', 'boilerplate afd vfd xfd-closed archived mw-archivedtalk']
67
+ divs = []
68
+ for div_class in div_classes:
69
+ divs.extend(soup.find_all('div', class_=div_class))
70
+ url_fragment = url.split('#')[-1].replace('_', ' ')
71
+ log_date = url.split('/')[-1]
72
+
73
+
74
+ data = []
75
+ for div in divs:
76
+ try:
77
+ title = None
78
+ text_url = None
79
+ title_tag = div.find('a')
80
+ if title_tag:
81
+ title_span = div.find('span', {'data-mw-comment-start': True})
82
+ if title_span:
83
+ title_anchor = title_span.find_next_sibling('a')
84
+ if title_anchor:
85
+ title = title_anchor.text
86
+ text_url = 'https://en.wikipedia.org' + title_anchor['href']
87
+ else:
88
+ title = title_tag.text
89
+ text_url = 'https://en.wikipedia.org' + title_tag['href']
90
+
91
+ if title == 'talk page' or title is None:
92
+ heading_tag = div.find('div', class_='mw-heading mw-heading3')
93
+ if heading_tag:
94
+ title_tag = heading_tag.find('a')
95
+ if title_tag:
96
+ title = title_tag.text
97
+ text_url = 'https://en.wikipedia.org' + title_tag['href']
98
+
99
+ if not title:
100
+ continue
101
+ if title.lower() != url_fragment.lower():
102
+ continue
103
+ deletion_discussion = div.prettify()
104
+ label = ''
105
+ verdict_tag = div.find('p')
106
+ if verdict_tag:
107
+ label_b_tag = verdict_tag.find('b')
108
+ if label_b_tag:
109
+ label = label_b_tag.text.strip()
110
+ confirmation = ''
111
+ discussion_tag = div.find('dd')
112
+ if discussion_tag:
113
+ discussion_tag_i = discussion_tag.find('i')
114
+ if discussion_tag_i:
115
+ confirmation_b_tag = discussion_tag_i.find('b')
116
+ if confirmation_b_tag:
117
+ confirmation = confirmation_b_tag.text.strip()
118
+ parts = deletion_discussion.split('<div class="mw-heading mw-heading3">')
119
+ discussion = parts[0] if len(parts) > 0 else ''
120
+ verdict = '<div class="mw-heading mw-heading3">' + parts[1] if len(parts) > 1 else ''
121
+
122
+ data.append([date,title, text_url, deletion_discussion, label, confirmation, verdict, discussion])
123
+ except Exception as e:
124
+ print(f"Error processing div: {e}")
125
+ continue
126
+
127
+ df = pd.DataFrame(data, columns=['date', 'title', 'text_url', 'deletion_discussion', 'label', 'confirmation', 'discussion', 'verdict'])
128
+ return df
129
+
130
+
131
+
132
+
133
+ def extract_div_contents_from_url_new(url,date):
134
+ response = requests.get(url)
135
+ if response.status_code != 200:
136
+ print(f"Error: Received status code {response.status_code} for URL: {url}")
137
+ return pd.DataFrame(columns=['date', 'title', 'text_url', 'deletion_discussion', 'label', 'confirmation', 'discussion', 'verdict'])
138
+
139
+ soup = BeautifulSoup(response.content, 'html.parser')
140
+ div_classes = ['boilerplate afd vfd xfd-closed', 'boilerplate afd vfd xfd-closed archived mw-archivedtalk',"mw-heading mw-heading3"]
141
+ divs = []
142
+
143
+ for div_class in div_classes:
144
+ divs.extend(soup.find_all('div', class_=div_class))
145
+
146
+ url_fragment = url.split('#')[-1].replace('_', ' ')
147
+ log_date = url.split('/')[-1]
148
+
149
+ data = []
150
+ for i, div in enumerate(divs):
151
+ try:
152
+ title = None
153
+ text_url = None
154
+ title_tag = div.find('a')
155
+ if title_tag:
156
+ title_span = div.find('span', {'data-mw-comment-start': True})
157
+ if title_span:
158
+ title_anchor = title_span.find_next_sibling('a')
159
+ if title_anchor:
160
+ title = title_anchor.text
161
+ text_url = 'https://en.wikipedia.org' + title_anchor['href']
162
+ else:
163
+ title = title_tag.text
164
+ text_url = 'https://en.wikipedia.org' + title_tag['href']
165
+
166
+ if title == 'talk page' or title is None:
167
+ heading_tag = div.find('div', class_='mw-heading mw-heading3')
168
+ if heading_tag:
169
+ title_tag = heading_tag.find('a')
170
+ if title_tag:
171
+ title = title_tag.text
172
+ text_url = 'https://en.wikipedia.org' + title_tag['href']
173
+
174
+ if not title:
175
+ continue
176
+ if title.lower() != url_fragment.lower():
177
+ continue
178
+
179
+ next_div = div.find_next('div', class_='mw-heading mw-heading3')
180
+ deletion_discussion = ''
181
+ sibling = div.find_next_sibling()
182
+ while sibling and sibling != next_div:
183
+ deletion_discussion += str(sibling)
184
+ sibling = sibling.find_next_sibling()
185
+
186
+ label = ''
187
+ verdict_tag = div.find('p')
188
+ if verdict_tag:
189
+ label_b_tag = verdict_tag.find('b')
190
+ if label_b_tag:
191
+ label = label_b_tag.text.strip()
192
+ confirmation = ''
193
+ discussion_tag = div.find('dd')
194
+ if discussion_tag:
195
+ discussion_tag_i = discussion_tag.find('i')
196
+ if discussion_tag_i:
197
+ confirmation_b_tag = discussion_tag_i.find('b')
198
+ if confirmation_b_tag:
199
+ confirmation = confirmation_b_tag.text.strip()
200
+ parts = deletion_discussion.split('<div class="mw-heading mw-heading3">')
201
+ discussion = parts[0] if len(parts) > 0 else ''
202
+ verdict = '<div class="mw-heading mw-heading3">' + parts[1] if len(parts) > 1 else ''
203
+
204
+ data.append([date, title, text_url, deletion_discussion, label, confirmation, verdict, discussion])
205
+ except Exception as e:
206
+ print(f"Error processing div: {e}")
207
+ continue
208
+
209
+ df = pd.DataFrame(data, columns=['date', 'title', 'text_url', 'deletion_discussion', 'label', 'confirmation', 'discussion', 'verdict'])
210
+ return df
211
+
212
+ def extract_label(label_html):
213
+ soup = BeautifulSoup(label_html, 'html.parser')
214
+ b_tag = soup.find('b')
215
+ return b_tag.text.strip() if b_tag else ''
216
+
217
+ def process_labels(df):
218
+ df['proper_label'] = df['label'].apply(extract_label)
219
+ return df
220
+
221
+ def extract_confirmation(confirmation_html):
222
+ soup = BeautifulSoup(confirmation_html, 'html.parser')
223
+ b_tag = soup.find('span', {'style': 'color:red'}).find('b')
224
+ return b_tag.text.strip() if b_tag else ''
225
+
226
+ def process_confirmations(df):
227
+ df['confirmation'] = df['confirmation'].apply(extract_confirmation)
228
+ return df
229
+
230
+ def extract_post_links_text(discussion_html):
231
+ split_point = '<span class="plainlinks">'
232
+ if split_point in discussion_html:
233
+ parts = discussion_html.split(split_point)
234
+ if len(parts) > 1:
235
+ return parts[1]
236
+ return discussion_html
237
+
238
+ def process_discussion(df):
239
+
240
+ df['discussion_cleaned'] = df['discussion'].apply(extract_post_links_text)
241
+ return df
242
+
243
+ def html_to_plaintext(html_content):
244
+ soup = BeautifulSoup(html_content, 'html.parser')
245
+ for tag in soup.find_all(['p', 'li', 'dd', 'dl']):
246
+ tag.insert_before('\n')
247
+ tag.insert_after('\n')
248
+ for br in soup.find_all('br'):
249
+ br.replace_with('\n')
250
+
251
+ text = soup.get_text(separator=' ', strip=True)
252
+ text = '\n'.join([line.strip() for line in text.splitlines() if line.strip() != ''])
253
+
254
+ return text
255
+
256
+ def process_html_to_plaintext(df):
257
+ df['discussion_cleaned'] = df['discussion_cleaned'].apply(html_to_plaintext)
258
+ return df
259
+
260
+ def split_text_into_sentences(text):
261
+ seg = pysbd.Segmenter(language="en", clean=False)
262
+ sentences = seg.segment(text)
263
+ return ' '.join(sentences[1:])
264
+
265
+ def process_split_text_into_sentences(df):
266
+ df['discussion_cleaned'] = df['discussion_cleaned'].apply(split_text_into_sentences)
267
+ return df
268
+
269
+ def process_data(url,date):
270
+ df = extract_div_contents_from_url(url,date)
271
+ #print('Discussion: ',df.discussion.tolist())
272
+ if df.discussion.tolist() == []:
273
+ #print('Empty Discussion')
274
+ df = extract_div_contents_from_url_new(url,date)
275
+ #print(df.head())
276
+ df = process_discussion(df)
277
+ #print(df.at[0,'discussion'])
278
+ df = process_html_to_plaintext(df)
279
+ df = process_split_text_into_sentences(df)
280
+ if not df.empty:
281
+ return df
282
+ else:
283
+ return 'Empty DataFrame'
284
+
285
+ def collect_deletion_discussions(start_date, end_date):
286
+ base_url = 'https://en.wikipedia.org/wiki/Wikipedia:Articles_for_deletion/Log/'
287
+ all_data = pd.DataFrame()
288
+
289
+ current_date = start_date
290
+ while current_date <= end_date:
291
+ try:
292
+ print(f"Processing {current_date.strftime('%Y-%B-%d')}")
293
+ date_str = current_date.strftime('%Y_%B_%d')
294
+ url = base_url + date_str
295
+ log_date = current_date.strftime('%Y-%m-%d')
296
+
297
+ df = extract_div_contents_with_additional_columns(url, log_date)
298
+ if not df.empty:
299
+ df = process_labels(df)
300
+ df = process_confirmations(df)
301
+ df = process_discussion(df)
302
+ df = process_html_to_plaintext(df)
303
+ df = process_split_text_into_sentences(df)
304
+ all_data = pd.concat([all_data, df], ignore_index=True)
305
+
306
+ current_date += timedelta(days=1)
307
+ except Exception as e:
308
+ print(f"Error processing {current_date.strftime('%Y-%B-%d')}: {e}")
309
+ current_date += timedelta(days=1)
310
+ continue
311
+
312
+ return all_data
process_data.py CHANGED
@@ -1,6 +1,6 @@
1
  import pandas as pd
2
  from datetime import datetime
3
- from wide_analysis.data.collect_data import collect_deletion_discussions, process_data
4
 
5
 
6
  label_mapping = {
 
1
  import pandas as pd
2
  from datetime import datetime
3
+ from collect_data import collect_deletion_discussions, process_data
4
 
5
 
6
  label_mapping = {
requirements.txt CHANGED
@@ -1,9 +1,9 @@
1
- beautifulsoup4==4.12.3
2
- datasets==2.21.0
3
- gradio==5.8.0
4
- pandas==2.0.1
5
- pysbd==0.3.4
6
- Requests==2.32.3
7
- torch==2.2.1
8
- transformers==4.44.1
9
- wide_analysis==0.3.3
 
1
+ beautifulsoup4
2
+ datasets
3
+ gradio
4
+ pandas
5
+ pysbd
6
+ Requests
7
+ torch
8
+ transformers
9
+ wide_analysis