File size: 4,476 Bytes
dc02162 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
# /// script
# dependencies = [
# "aiohttp",
# "datasets",
# "beautifulsoup4",
# "llms-txt-rs",
# "tqdm"
# ]
# ///
from dataclasses import dataclass
from urllib.parse import urlparse
import asyncio
import aiohttp
from bs4 import BeautifulSoup
from llms_txt_rs import parse_llms_txt
from datasets import Dataset
from tqdm.asyncio import tqdm_asyncio
import json
# Type alias
ParsedTxt = dict[str, str | None | dict[str, list[dict[str, str | None]]]]
@dataclass
class Page:
name: str
url: str | None
url_full: str | None
# Data added to the Page object once downloaded
llms_txt: str | None = None
llms_txt_parsed: ParsedTxt | None = None
llms_txt_full: str | None = None
@property
def base_url(self) -> str:
parsed = urlparse(self.url)
return f"{parsed.scheme}://{parsed.netloc}"
def add_text(self, txt: str, parsed_txt: ParsedTxt, is_full: bool = False, url: str | None = None) -> None:
if url and not url.startswith(self.base_url):
return
if not is_full:
self.llms_txt = txt
self.llms_txt_parsed = parsed_txt
else:
self.llms_txt_full = txt
def record(self) -> dict:
# This is necessary to ensure the 'llms_txt_parsed' field is saved appropriately,
# otherwise datasets will mix the fields to make the field homogeneus across the different
# pages.
return {
"name": self.name,
"url": self.url,
"url_full": self.url_full,
"llms_txt": self.llms_txt,
"llms_txt_parsed": json.dumps(self.llms_txt_parsed),
"llms_txt_full": self.llms_txt_full,
}
async def download_directory_llmstxt(url: str = "https://directory.llmstxt.cloud/") -> list[Page]:
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
content = await response.read()
soup = BeautifulSoup(content, 'html.parser')
pages = [parse_row(row) for row in soup.find_all('li')]
return pages
# Extracts the pages from the html
def parse_row(row: list[str]) -> Page:
a_elems = row.find_all("a")
name = a_elems[0].find("h3").text
url_full = None
if len(a_elems) > 2: # contains both url llms.txt and llms-full.txt
for elem in a_elems[1:]:
url = elem.get("href")
if "llms-full.txt" in url:
url_full = url
else:
url = a_elems[1].get("href")
return Page(
name=name,
url=url,
url_full=url_full
)
async def download_url(session: aiohttp.ClientSession, page: Page, url: str) -> Page:
try:
async with session.get(url) as response:
if response.status == 200:
txt = await response.text()
try:
parsed = parse_llms_txt(txt)
if "llms.txt" in url:
page.add_text(txt, parsed, is_full=False, url=url)
elif "llms-full.txt" in url:
page.add_text(txt, None, is_full=True, url=url)
else:
print(f"Unexpected url downloaded from the directory: {url}")
return page
except Exception as e:
# No need to do anything, the fields will remain None
print(f"Error parsing '{url}': {e}")
except Exception as e:
print(f"Error downloading url '{url}': {e}")
return page
async def process_page(session: aiohttp.ClientSession, page: Page) -> list[Page]:
if url := page.url:
page = await download_url(session, page, url)
if url := page.url_full:
page = await download_url(session, page, url)
return page
async def download_pages(pages: list[Page]) -> list[Page]:
async with aiohttp.ClientSession() as session:
tasks = [process_page(session, page) for page in pages]
return await tqdm_asyncio.gather(*tasks, total=len(pages))
async def main():
print("Downloading directory...")
pages = await download_directory_llmstxt()
print("Downloading pages...")
pages = await download_pages(pages)
print("Preparing dataset...")
ds = Dataset.from_list([page.record() for page in pages])
ds.push_to_hub("plaguss/llms-txt", commit_message="Initial commit")
if __name__ == "__main__":
asyncio.run(main())
|