File size: 1,446 Bytes
9d38059
 
 
b00f9c3
9d38059
b00f9c3
9d38059
 
b00f9c3
 
 
9d38059
b00f9c3
9d38059
 
b00f9c3
 
 
9d38059
 
 
 
b00f9c3
9d38059
 
b00f9c3
 
 
 
 
 
9d38059
 
 
b00f9c3
 
9d38059
 
 
 
 
b00f9c3
 
9d38059
b00f9c3
 
9d38059
b00f9c3
 
 
9d38059
b00f9c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
"""Load html from files, clean up, split, ingest into Weaviate."""
import os
from pathlib import Path
from markdown import markdown

import pickle
from bs4 import BeautifulSoup
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings, OpenAIEmbeddings
from langchain.vectorstores import FAISS
from InstructorEmbedding import INSTRUCTOR

print(os.environ["HUGGINFACE_APIKEY"])

def clean_data(data):
    html = markdown(data)
    soup = BeautifulSoup(html, "html.parser")
    text = ''.join(soup.findAll(text=True))
    return "\n".join([t for t in text.split("\n") if t])

docs = []
metadatas = []
for p in Path("docs").rglob("*"):
    if p.is_dir():
        continue
    if str(p).lower().endswith(('.md', '.mdx')):
        with open(p) as f:
            print(p)
            filename = os.path.splitext(p)[0]
            docs.append(clean_data(f.read()))
            metadatas.append({"source": filename})

text_splitter = CharacterTextSplitter(
    separator="\n",
    chunk_size=512,
    chunk_overlap=64,
    length_function=len,
)

documents = text_splitter.create_documents(docs, metadatas=metadatas)

print("making embedding")
embedding = HuggingFaceEmbeddings()

print("beginning construction of faiss")
search_index = FAISS.from_documents(documents, embedding)

print("beginning pickle")
with open("docs.pkl", 'wb') as f:
    pickle.dump(search_index, f)

print("Pickle complete")