wikibooks-cookbook / scrape.py
Gosse Minnema
Bugfix: save all the parsed recipes (instead of just the first one)
1c69cf4
import requests
from bs4 import BeautifulSoup
import glob
import json
def download_recipes():
next_category_page = "https://en.wikibooks.org/wiki/Category:Recipes"
page_number = 0
while next_category_page is not None:
r = requests.get(next_category_page)
next_category_page = None
soup = BeautifulSoup(r.text, features="html.parser")
mw_pages = soup.select("#mw-pages")[0]
recipe_number = 0
for a in mw_pages.find_all("a"):
if a["href"].endswith("#mw-pages") and a.text == "next page":
print(a["href"])
next_category_page = f"https://en.wikibooks.org{a['href']}"
elif a["href"].startswith("/wiki/Cookbook:"):
r2 = requests.get(f"https://en.wikibooks.org{a['href']}")
if r2.status_code == 200:
print("\tdownloading:", a["href"])
with open(f"recipes/recipe_{page_number:02}_{recipe_number:03}.html", "w") as f:
f.write(r2.text)
else:
print("WARNING: could not download:", a["href"])
recipe_number += 1
page_number += 1
def parse_recipes():
recipes = []
for recipe_file in sorted(glob.glob("recipes/*.html")):
print("parsing: ", recipe_file)
recipe_data = parse_recipe(recipe_file)
recipes.append({"filename": recipe_file, "recipe_data": recipe_data})
with open("recipes_parsed.json", "w") as f:
json.dump(recipes, f, ensure_ascii=False, indent=4)
with open("recipes_parsed.mini.json", "w") as f:
json.dump(recipes, f, ensure_ascii=True)
def parse_recipe(filename):
with open(filename) as f:
soup = BeautifulSoup(f.read(), features="html.parser")
recipe_title = soup.select("#firstHeading")[0].get_text().replace("Cookbook:", "")
recipe_url = soup.find("link", attrs={"rel": "canonical"})["href"]
infoboxes = soup.select("table.infobox")
if len(infoboxes) >= 1:
recipe_info = parse_infobox(infoboxes[0])
else:
print(f"[{filename}] WARNING: no infobox found")
recipe_info = None
content = soup.select("div.mw-content-ltr")[0]
section_name = None
text_lines = []
for i, child in enumerate(content):
if child.name == "p" and child.get_text().startswith("Cookbook | Recipes | Ingredients |"):
continue # skip the first p (navigation bar)
elif child.name == "div" and " ".join(child.get("class", [])) == "mw-heading mw-heading2":
section_name = child.find("h2").text
elif child.name in ["p", "blockquote"]:
text_lines.append({
"text": child.get_text().strip(),
"line_type": child.name,
"section": section_name
})
elif child.name in ["ul", "ol"]:
for li in child.find_all("li"):
text_lines.append({
"text": li.get_text().strip(),
"line_type": child.name,
"section": section_name
})
return {
"url": recipe_url,
"title": recipe_title,
"infobox": recipe_info,
"text_lines": text_lines
}
def parse_infobox(infobox):
recipe_info = {
"category": None,
"servings": None,
"time": None,
"difficulty": None
}
for tr in infobox.select("tr"):
if not tr.select("th"):
continue
if tr.find("th").text == "Category":
category_link = tr.find("td").find("a")
if category_link:
recipe_info["category"] = category_link["href"]
elif tr.find("th").text == "Servings":
recipe_info["servings"] = tr.find("td").text
elif tr.find("th").text == "Time":
recipe_info["time"] = tr.find("td").text
elif tr.find("th").text == "Difficulty":
difficulty_link = tr.select("a.mw-file-description")
if difficulty_link:
recipe_info["difficulty"] = parse_difficulty(difficulty_link[0]["href"])
return recipe_info
def parse_difficulty(svg_filename):
if svg_filename == "/wiki/File:1o5dots.svg":
return 1
elif svg_filename == "/wiki/File:2o5dots.svg":
return 2
elif svg_filename == "/wiki/File:3o5dots.svg":
return 3
elif svg_filename == "/wiki/File:4o5dots.svg":
return 4
elif svg_filename == "/wiki/File:5o5dots.svg":
return 5
else:
raise ValueError("Invalid difficulty level filename")
if __name__ == "__main__":
# download_recipes()
parse_recipes()