Devopedia / tools /devopedia.py
KaraKaraWitch's picture
Inital Commit
c0c3c81
raw
history blame
No virus
3.96 kB
# https://devopedia.org/site-map/list-articles?page=2&action=prev-page&tag=0&ajax=1
import asyncio
import pathlib
import httpx
import markdownify
import orjson
from bs4 import BeautifulSoup
root = pathlib.Path(
"data"
)
root.mkdir(exist_ok=True, parents=True)
class md_nolinks(markdownify.MarkdownConverter):
def convert_a(self, _, text, __):
_, _, text = markdownify.chomp(text)
if not text:
return ""
return text
def convert_img(self, el, text, convert_as_inline):
return ""
md = md_nolinks()
async def index():
session = httpx.AsyncClient()
session.headers["user-agent"] = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Safari/537.36"
)
if not (root / pathlib.Path("dev_index.json")).exists():
dev_urls = set()
for idx in range(0, 20):
c = await session.get(
f"https://devopedia.org/site-map/list-articles?page={idx}&action=next-page&tag=0&ajax=1"
)
soup = BeautifulSoup(c.content, "lxml")
for href in [
a["href"] for a in soup.select("div.dev-events div.uk-panel > div > a")
]:
dev_urls.add(f"https://devopedia.org{href}")
(root / pathlib.Path("dev_index.json")).write_bytes(
orjson.dumps(list(dev_urls), option=orjson.OPT_INDENT_2)
)
else:
dev_urls = orjson.loads(pathlib.Path("dev_index.json").read_bytes())
final_content = []
for url in dev_urls:
c = await session.get(url, timeout=None)
soup = BeautifulSoup(c.text, "lxml")
main = soup.select_one("main div.article-middle")
print(url)
dev_content = []
if main:
h1 = soup.find("h1", attrs={"class": "uk-article-title"})
for section in main.select("section"):
[i.decompose() for i in section.find_all("figure")]
[
i.decompose()
for i in section.find_all("sup", attrs={"class": "inline-citation"})
]
# print(section.get("id"))
if section.get("id") and "summary" in section.get("id").lower():
fmt = f"## Summary\n\n{md.convert_soup(section).rstrip()}"
dev_content.append(fmt)
if section.get("id") and "discussion" in section.get("id").lower():
z = "## Discussion"
for qa in section.find("ul", recursive=False).find_all(
"li", recursive=False
):
q = qa.find("article-question")
a = qa.find("article-answer")
fmt = f"### {q.get_text()}\n\n{md.convert_soup(a)}"
z += f"\n\n{fmt}"
dev_content.append(z)
if section.get("id") and "milestone" in section.get("id").lower():
section.find("h2").decompose()
fmt = f"\n\n## Milestones\n\n{md.convert_soup(section).strip()}"
dev_content.append(fmt)
final_content.append(
orjson.dumps(
{
"text": f"# {h1.get_text()}\n\n"
+ ("\n\n".join(dev_content))
.replace("\n\n\n", "\n\n")
.replace("\n\n\n\n", "\n\n")
.replace("\r\n", "\n")
.replace("\t", " "),
"meta": {
"title": h1.get_text(),
"href": f"{url.split('/')[-1]}",
},
}
)
)
else:
raise Exception
pathlib.Path("dev_files.jsonl").write_bytes(b"\n".join(final_content))
if __name__ == "__main__":
asyncio.run(index())