llms-txt / dataset.py
plaguss's picture
plaguss HF staff
Create dataset.py
dc02162 verified
# /// script
# dependencies = [
# "aiohttp",
# "datasets",
# "beautifulsoup4",
# "llms-txt-rs",
# "tqdm"
# ]
# ///
from dataclasses import dataclass
from urllib.parse import urlparse
import asyncio
import aiohttp
from bs4 import BeautifulSoup
from llms_txt_rs import parse_llms_txt
from datasets import Dataset
from tqdm.asyncio import tqdm_asyncio
import json
# Type alias
ParsedTxt = dict[str, str | None | dict[str, list[dict[str, str | None]]]]
@dataclass
class Page:
name: str
url: str | None
url_full: str | None
# Data added to the Page object once downloaded
llms_txt: str | None = None
llms_txt_parsed: ParsedTxt | None = None
llms_txt_full: str | None = None
@property
def base_url(self) -> str:
parsed = urlparse(self.url)
return f"{parsed.scheme}://{parsed.netloc}"
def add_text(self, txt: str, parsed_txt: ParsedTxt, is_full: bool = False, url: str | None = None) -> None:
if url and not url.startswith(self.base_url):
return
if not is_full:
self.llms_txt = txt
self.llms_txt_parsed = parsed_txt
else:
self.llms_txt_full = txt
def record(self) -> dict:
# This is necessary to ensure the 'llms_txt_parsed' field is saved appropriately,
# otherwise datasets will mix the fields to make the field homogeneus across the different
# pages.
return {
"name": self.name,
"url": self.url,
"url_full": self.url_full,
"llms_txt": self.llms_txt,
"llms_txt_parsed": json.dumps(self.llms_txt_parsed),
"llms_txt_full": self.llms_txt_full,
}
async def download_directory_llmstxt(url: str = "https://directory.llmstxt.cloud/") -> list[Page]:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
soup = BeautifulSoup(content, 'html.parser')
pages = [parse_row(row) for row in soup.find_all('li')]
return pages
# Extracts the pages from the html
def parse_row(row: list[str]) -> Page:
a_elems = row.find_all("a")
name = a_elems[0].find("h3").text
url_full = None
if len(a_elems) > 2: # contains both url llms.txt and llms-full.txt
for elem in a_elems[1:]:
url = elem.get("href")
if "llms-full.txt" in url:
url_full = url
else:
url = a_elems[1].get("href")
return Page(
name=name,
url=url,
url_full=url_full
)
async def download_url(session: aiohttp.ClientSession, page: Page, url: str) -> Page:
try:
async with session.get(url) as response:
if response.status == 200:
txt = await response.text()
try:
parsed = parse_llms_txt(txt)
if "llms.txt" in url:
page.add_text(txt, parsed, is_full=False, url=url)
elif "llms-full.txt" in url:
page.add_text(txt, None, is_full=True, url=url)
else:
print(f"Unexpected url downloaded from the directory: {url}")
return page
except Exception as e:
# No need to do anything, the fields will remain None
print(f"Error parsing '{url}': {e}")
except Exception as e:
print(f"Error downloading url '{url}': {e}")
return page
async def process_page(session: aiohttp.ClientSession, page: Page) -> list[Page]:
if url := page.url:
page = await download_url(session, page, url)
if url := page.url_full:
page = await download_url(session, page, url)
return page
async def download_pages(pages: list[Page]) -> list[Page]:
async with aiohttp.ClientSession() as session:
tasks = [process_page(session, page) for page in pages]
return await tqdm_asyncio.gather(*tasks, total=len(pages))
async def main():
print("Downloading directory...")
pages = await download_directory_llmstxt()
print("Downloading pages...")
pages = await download_pages(pages)
print("Preparing dataset...")
ds = Dataset.from_list([page.record() for page in pages])
ds.push_to_hub("plaguss/llms-txt", commit_message="Initial commit")
if __name__ == "__main__":
asyncio.run(main())