h_novel / examples /hnovel_spider /ltxsba_spider.py
qgyd2021's picture
[update]add ltxsba finish
184b56e
raw
history blame
4.76 kB
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse
from glob import glob
from pathlib import Path
import re
from typing import Set
from tqdm import tqdm
import requests
from project_settings import project_path
from toolbox.text_preprocess.filename_process import FilenamePreprocess
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data_dir",
default=(project_path / "data/ltxsba").as_posix(),
type=str
)
args = parser.parse_args()
return args
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36",
}
all_visit_page_pattern = r"""<td class="L"><a href="http://www.ltxsba.xyz/bookcv/(.*?).html">(.*?)</a></td>"""
span_pattern = r"""<span>(.*?)</span>"""
def parse_all_visit_page_text(text):
matches = re.findall(all_visit_page_pattern, text, flags=re.IGNORECASE)
return matches
def main():
args = get_args()
data_dir = Path(args.data_dir)
data_dir.mkdir(parents=True, exist_ok=True)
fn_preprocess = FilenamePreprocess()
finished_idx: Set[int] = set()
for filename in data_dir.parent.glob("ltxsba*/*.txt"):
idx = filename.stem.split("_")[-1]
idx = int(idx)
finished_idx.add(idx)
print("finished idx: {}".format(len(finished_idx)))
for idx in tqdm(range(50, 350)):
url = "http://www.ltxsba.xyz/top/allvisit_{}.html".format(idx)
# print(url)
resp = requests.get(url, headers=headers, timeout=2)
with open("ltxsba_temp.txt", "wb") as f:
f.write(resp.content)
with open("ltxsba_temp.txt", "r", encoding="gbk", errors="ignore") as f:
text = f.read()
matches = parse_all_visit_page_text(text)
for match in matches:
idx, name = match
download_url = "http://www.ltxsba.xyz/down/{}".format(idx)
if int(idx) in finished_idx:
continue
try:
resp = requests.get(download_url)
# print(download_url)
# print(resp.status_code)
except Exception:
continue
# print(resp.content)
with open("ltxsba_temp.txt", "wb") as f:
f.write(resp.content)
with open("ltxsba_temp.txt", "r", encoding="gbk", errors="ignore") as f:
text = f.read()
matches = re.findall(span_pattern, text, flags=re.DOTALL)
if len(matches) != 0:
text = "".join(matches)
else:
text = text
if len(text) == 0:
continue
text = re.sub(r"[\u0020]{4,}", "", text)
text = re.sub(r"[\t]", "", text)
text = re.sub(r" ", "", text)
text = re.sub(r"\n\n", "\n", text)
text = re.sub(r"&amp;", " ", text)
text = re.sub(r"&nbsp;", " ", text)
text = re.sub(r"quot;", "\"", text)
text = re.sub(r"nbsp;", " ", text)
text = re.sub(r"amp;", " ", text)
text = re.sub(r"&#10008", "✘", text)
text = re.sub(r"lt;", "<", text)
text = re.sub(r"gt;", ">", text)
text = re.sub(r"⌒{4,}", "⌒⌒⌒", text)
text = re.sub(r"#{4,}", "###", text)
name = str(name).replace("|", "_")
name = str(name).replace("\\", "_")
name = str(name).replace("/", "_")
name = str(name).replace(":", "_")
name = str(name).replace("~", "_")
name = str(name).replace("?", "_")
name = str(name).replace("~", "_")
name = str(name).replace("*", "")
name = str(name).replace("\"", "")
name = str(name).replace("<", "")
name = str(name).replace(">", "")
name = re.sub(r"&amp;", " ", name)
name = re.sub(r"&nbsp;", " ", name)
name = re.sub(r"quot;", "\"", name)
name = re.sub(r"nbsp;", " ", name)
name = re.sub(r"amp;", " ", name)
name = re.sub(r"&#10008", "✘", name)
name = re.sub(r"lt;", "<", name)
name = re.sub(r"gt;", ">", name)
name = str(name).replace(" ", "")
name = str(name).replace("#", "")
name = fn_preprocess.process(name)
print(name)
with open(data_dir / "{}_{}.txt".format(name, idx), "w", encoding="utf-8") as f:
f.write(text)
return
if __name__ == '__main__':
main()