Datasets:
Upload folder using huggingface_hub
Browse files- tools/clean.py +62 -0
- tools/crawl jokes.py +67 -0
- tools/crawl-stories.py +67 -0
- tools/crawl-tales.py +55 -0
tools/clean.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
|
4 |
+
def Clean_output_file(input_file_path, output_file_path):
|
5 |
+
with open(input_file_path, 'r') as input_file, open(output_file_path, 'w') as output_file:
|
6 |
+
for line in input_file:
|
7 |
+
# Load the line as a JSON object
|
8 |
+
item = json.loads(line)
|
9 |
+
|
10 |
+
# Check if 'text' field exists
|
11 |
+
if 'text' in item:
|
12 |
+
patterns_to_remove = [
|
13 |
+
r'Author:.*?\n', # Remove author part
|
14 |
+
r'\+ More articles by.*?\n', # Remove "+ More articles" lines
|
15 |
+
r'- Hide list.*?\n', # Remove "- Hide list" lines
|
16 |
+
r'Rating:.*?\n', # Remove lines starting with "Rating:"
|
17 |
+
r'\n\uf129\n.*?\n', # Remove lines with special characters and any text until the next newline
|
18 |
+
]
|
19 |
+
|
20 |
+
# Apply each pattern removal
|
21 |
+
for pattern in patterns_to_remove:
|
22 |
+
item['text'] = re.sub(pattern, '', item['text'], flags=re.DOTALL)
|
23 |
+
# Replace specific unicode characters with a space or equivalent character
|
24 |
+
item['text'] = item['text'].replace('\u00a0', ' ')
|
25 |
+
item['text'] = item['text'].replace('\u2008', ' ')
|
26 |
+
item['text'] = item['text'].replace('\u2019', '\'')
|
27 |
+
|
28 |
+
# Remove extra newline characters
|
29 |
+
item['text'] = re.sub(r'\n\s*\n', '\n\n', item['text'])
|
30 |
+
|
31 |
+
# Check for unwanted phrases and skip the line if any are found
|
32 |
+
if ("This page doesn" in item['text'] or
|
33 |
+
"+ Show component code" in item['text'] or
|
34 |
+
"position: relative" in item['text'] or
|
35 |
+
"Don't forget to fill out this" in item['text']or
|
36 |
+
"Table of Contents" in item['text'] or
|
37 |
+
"Tales by SCP Series" in item['text']) :
|
38 |
+
continue # Skip this line
|
39 |
+
|
40 |
+
# Remove "rating: +" and any text until the next newline
|
41 |
+
item['text'] = re.sub(r'rating: \+.*?\n', '', item['text'], flags=re.DOTALL)
|
42 |
+
|
43 |
+
# Write the updated item back to the file if it doesn't contain unwanted phrases
|
44 |
+
json.dump(item, output_file)
|
45 |
+
output_file.write('\n')
|
46 |
+
|
47 |
+
|
48 |
+
# stories
|
49 |
+
for num in range(1,9):
|
50 |
+
input_file_path = f'scp_stories{num}.jsonl' # Replace with your input file path
|
51 |
+
output_file_path = f'stories{num}.jsonl' # Replace with your desired output file path
|
52 |
+
Clean_output_file(input_file_path, output_file_path)
|
53 |
+
|
54 |
+
# tales
|
55 |
+
input_file_path = f'scp_tales.jsonl' # Replace with your input file path
|
56 |
+
output_file_path = f'scp_tales_cleaned.jsonl' # Replace with your desired output file path
|
57 |
+
Clean_output_file(input_file_path, output_file_path)
|
58 |
+
|
59 |
+
# jokes
|
60 |
+
input_file_path = f'scp_jokes.jsonl' # Replace with your input file path
|
61 |
+
output_file_path = f'jokes-cleaned.jsonl' # Replace with your desired output file path
|
62 |
+
Clean_output_file(input_file_path, output_file_path)
|
tools/crawl jokes.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import json
|
4 |
+
def crawl_scp_series(url,num):
|
5 |
+
response = requests.get(url)
|
6 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
7 |
+
|
8 |
+
# Find the div with id 'page-content'
|
9 |
+
content_div = soup.find('div', id='page-content')
|
10 |
+
|
11 |
+
# Extract all the links within this div
|
12 |
+
links = [a['href'] for a in content_div.find_all('a', href=True) if a.text]
|
13 |
+
|
14 |
+
# Initialize a list to store the extracted texts
|
15 |
+
stories = []
|
16 |
+
|
17 |
+
for link in links:
|
18 |
+
# Ensure the link is absolute
|
19 |
+
if not link.startswith('http'):
|
20 |
+
link = f"https://scp-wiki.wikidot.com{link}"
|
21 |
+
|
22 |
+
# Fetch each link
|
23 |
+
try:
|
24 |
+
story_response = requests.get(link)
|
25 |
+
story_soup = BeautifulSoup(story_response.content, 'html.parser')
|
26 |
+
if story_soup:
|
27 |
+
# Extract the text from the div with id 'page-content'
|
28 |
+
page_content = story_soup.find('div', id='page-content')
|
29 |
+
if page_content:
|
30 |
+
first_div = page_content.find('div', style="text-align: right;")
|
31 |
+
if first_div:
|
32 |
+
first_div.decompose()
|
33 |
+
|
34 |
+
# Remove the div with class 'licensebox' if it exists
|
35 |
+
licensebox_div = page_content.find('div', class_='licensebox')
|
36 |
+
if licensebox_div:
|
37 |
+
licensebox_div.decompose()
|
38 |
+
print("Found page-content div")
|
39 |
+
else:
|
40 |
+
print(f"Could not find page-content div for {link}")
|
41 |
+
|
42 |
+
if page_content:
|
43 |
+
story_text = page_content.get_text().strip()
|
44 |
+
stories.append(story_text)
|
45 |
+
|
46 |
+
# Check if 10 stories have been collected
|
47 |
+
if len(stories) == 10:
|
48 |
+
# Write these stories to a JSONL file
|
49 |
+
with open(f"scp_jokes.jsonl", 'a') as file:
|
50 |
+
for story in stories:
|
51 |
+
json_record = json.dumps({'text': story})
|
52 |
+
file.write(json_record + '\n')
|
53 |
+
|
54 |
+
# Reset the stories list for the next batch
|
55 |
+
stories = []
|
56 |
+
except requests.exceptions.RequestException as e:
|
57 |
+
print(f"Error fetching {link}: {e}")
|
58 |
+
|
59 |
+
|
60 |
+
# URL of the SCP series page
|
61 |
+
urls = ['https://scp-wiki.wikidot.com/joke-scps']
|
62 |
+
num=1
|
63 |
+
# Start crawling
|
64 |
+
for url in urls:
|
65 |
+
crawl_scp_series(url,num)
|
66 |
+
print(url)
|
67 |
+
num+=1
|
tools/crawl-stories.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import json
|
4 |
+
def crawl_scp_series(url,num):
|
5 |
+
response = requests.get(url)
|
6 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
7 |
+
|
8 |
+
# Find the div with id 'page-content'
|
9 |
+
content_div = soup.find('div', id='page-content')
|
10 |
+
|
11 |
+
# Extract all the links within this div
|
12 |
+
links = [a['href'] for a in content_div.find_all('a', href=True) if a.text]
|
13 |
+
|
14 |
+
# Initialize a list to store the extracted texts
|
15 |
+
stories = []
|
16 |
+
|
17 |
+
for link in links:
|
18 |
+
# Ensure the link is absolute
|
19 |
+
if not link.startswith('http'):
|
20 |
+
link = f"https://scp-wiki.wikidot.com{link}"
|
21 |
+
|
22 |
+
# Fetch each link
|
23 |
+
try:
|
24 |
+
story_response = requests.get(link)
|
25 |
+
story_soup = BeautifulSoup(story_response.content, 'html.parser')
|
26 |
+
if story_soup:
|
27 |
+
# Extract the text from the div with id 'page-content'
|
28 |
+
page_content = story_soup.find('div', id='page-content')
|
29 |
+
if page_content:
|
30 |
+
first_div = page_content.find('div', style="text-align: right;")
|
31 |
+
if first_div:
|
32 |
+
first_div.decompose()
|
33 |
+
|
34 |
+
# Remove the div with class 'licensebox' if it exists
|
35 |
+
licensebox_div = page_content.find('div', class_='licensebox')
|
36 |
+
if licensebox_div:
|
37 |
+
licensebox_div.decompose()
|
38 |
+
print("Found page-content div")
|
39 |
+
else:
|
40 |
+
print(f"Could not find page-content div for {link}")
|
41 |
+
|
42 |
+
if page_content:
|
43 |
+
story_text = page_content.get_text().strip()
|
44 |
+
stories.append(story_text)
|
45 |
+
|
46 |
+
# Check if 10 stories have been collected
|
47 |
+
if len(stories) == 10:
|
48 |
+
# Write these stories to a JSONL file
|
49 |
+
with open(f"scp_stories{num}.jsonl", 'a') as file:
|
50 |
+
for story in stories:
|
51 |
+
json_record = json.dumps({'text': story})
|
52 |
+
file.write(json_record + '\n')
|
53 |
+
|
54 |
+
# Reset the stories list for the next batch
|
55 |
+
stories = []
|
56 |
+
except requests.exceptions.RequestException as e:
|
57 |
+
print(f"Error fetching {link}: {e}")
|
58 |
+
|
59 |
+
|
60 |
+
# URL of the SCP series page
|
61 |
+
urls = ['https://scp-wiki.wikidot.com/scp-series-1','https://scp-wiki.wikidot.com/scp-series-2','https://scp-wiki.wikidot.com/scp-series-3','https://scp-wiki.wikidot.com/scp-series-4','https://scp-wiki.wikidot.com/scp-series-5','https://scp-wiki.wikidot.com/scp-series-6','https://scp-wiki.wikidot.com/scp-series-7','https://scp-wiki.wikidot.com/scp-series-8']
|
62 |
+
num=1
|
63 |
+
# Start crawling
|
64 |
+
for url in urls:
|
65 |
+
crawl_scp_series(url,num)
|
66 |
+
print(url)
|
67 |
+
num+=1
|
tools/crawl-tales.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import json
|
4 |
+
|
5 |
+
def scrape_tales_by_year(start_year, end_year):
|
6 |
+
base_url = "https://scp-wiki.wikidot.com/tales-by-date-"
|
7 |
+
all_tales = []
|
8 |
+
|
9 |
+
for year in range(start_year, end_year + 1):
|
10 |
+
url = f"{base_url}{year}"
|
11 |
+
response = requests.get(url)
|
12 |
+
soup = BeautifulSoup(response.content, 'html.parser')
|
13 |
+
|
14 |
+
# Find all tables, then find all table rows within each table
|
15 |
+
for table in soup.find_all('table', class_='wiki-content-table'):
|
16 |
+
for row in table.find_all('tr'):
|
17 |
+
cells = row.find_all('td')
|
18 |
+
if cells:
|
19 |
+
a_tag = cells[0].find('a', href=True)
|
20 |
+
if a_tag:
|
21 |
+
tale_url = f"https://scp-wiki.wikidot.com{a_tag['href']}"
|
22 |
+
tale_response = requests.get(tale_url)
|
23 |
+
tale_soup = BeautifulSoup(tale_response.content, 'html.parser')
|
24 |
+
|
25 |
+
# Extract the text from the div with id 'page-content'
|
26 |
+
page_content = tale_soup.find('div', id='page-content')
|
27 |
+
if page_content:
|
28 |
+
# Remove the first div with specific style if it exists
|
29 |
+
first_div = page_content.find('div', style="text-align: right;")
|
30 |
+
if first_div:
|
31 |
+
first_div.decompose()
|
32 |
+
|
33 |
+
# Remove the div with class 'licensebox' if it exists
|
34 |
+
licensebox_div = page_content.find('div', class_='licensebox')
|
35 |
+
if licensebox_div:
|
36 |
+
licensebox_div.decompose()
|
37 |
+
|
38 |
+
# Now get the text from the rest of the page-content
|
39 |
+
tale_text = page_content.get_text().strip()
|
40 |
+
|
41 |
+
all_tales.append({'text': tale_text})
|
42 |
+
|
43 |
+
# Find the first non-empty line in the tale's text
|
44 |
+
first_line = next((line for line in tale_text.splitlines() if line.strip()), "")
|
45 |
+
print(first_line) # Print the first non-empty line of the tale
|
46 |
+
# Optionally, write tale to a file or process it further here
|
47 |
+
else:
|
48 |
+
print(f"Could not find page-content div for {tale_url}")
|
49 |
+
# Write the tales to a JSONL file
|
50 |
+
with open('scp_tales.jsonl', 'w') as file:
|
51 |
+
for tale in all_tales:
|
52 |
+
json_record = json.dumps(tale) # tale is already a dict with the key "text"
|
53 |
+
file.write(json_record + '\n')
|
54 |
+
# Call the function with the range of years you want to scrape
|
55 |
+
scrape_tales_by_year(2008, 2022)
|