File size: 1,681 Bytes
6cdf09c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""
--> this code takes use of URLFetcher.py and fetches the text data from each of the pages
--> saves it in a .txt file
--> voila!!
"""

import os
import json
os.chdir('D:/Machine Learning/SLM-Project/')

query_file = 'Data Collection/webscrapper/search_queries.json'
out_file = f'Data/webscrapped data/britannica_output.txt'
max_limit = 10

with open(query_file, 'r') as file:
  search_queries = json.load(file)

from tqdm import tqdm

from URLFetcher import BritannicaUrls
scrape = BritannicaUrls(search_queries=search_queries, max_limit=10)
with tqdm(total=len(search_queries) * max_limit, desc="Generating URL snippets: ") as pbar:
  url_snippets = scrape.generate_urls(progress_bar=pbar)

print('fetched snippets successfully!')
print(f"total snippets: {len(url_snippets)}")

import requests
from bs4 import BeautifulSoup
import re

def text_extractor(url_snippet):
  target_url = f"https://britannica.com{url_snippet}"
  r = requests.get(target_url, headers=scrape.headers)

  if r.status_code == 200:
    soup = BeautifulSoup(r.content, 'html.parser')
    paragraphs = soup.find_all('p')
    
    # extract text content from each <p> tag, excluding specified text
    page = '\n'.join([p.get_text() for p in paragraphs if "Our editors will review what you’ve submitted and determine whether to revise the article." not in p.get_text()])
    page = re.sub('&\w+;','',page)

    return page

if __name__ == '__main__':
  with tqdm(total=len(url_snippets), desc="Scrapping in progress: ") as pbar:
    for snippets in url_snippets:
      page = text_extractor(snippets)
      with open(out_file, 'a', encoding='utf-8') as file:
        file.write(page)
      pbar.update(1)