Spaces:
Paused
Paused
import streamlit as st | |
import pickle | |
import os | |
import torch | |
from tqdm.auto import tqdm | |
from langchain.text_splitter import RecursiveCharacterTextSplitter | |
from langchain.vectorstores import Chroma | |
from langchain.embeddings import HuggingFaceInstructEmbeddings | |
from langchain import HuggingFacePipeline | |
from langchain.chains import RetrievalQA | |
st.set_page_config( | |
page_title = 'aitGPT', | |
page_icon = '✅') | |
st.markdown("# Hello") | |
with open("ait-web-document", "rb") as fp: | |
ait_web_documents = pickle.load(fp) | |
text_splitter = RecursiveCharacterTextSplitter( | |
# Set a really small chunk size, just to show. | |
chunk_size = 500, | |
chunk_overlap = 100, | |
length_function = len, | |
) | |
chunked_text = text_splitter.create_documents([doc for doc in tqdm(ait_web_documents)]) | |
st.markdown(f"Number of Documents: {len(ait_web_documents)}") | |
st.markdown(f"Number of chunked texts: {len(chunked_text)}") | |
embedding_model = HuggingFaceInstructEmbeddings(model_name='hkunlp/instructor-base', | |
model_kwargs = {'device': torch.device('cuda' if torch.cuda.is_available() else 'cpu')}) | |
persist_directory = 'db_chunk_500' | |
db_chunk_500 = Chroma.from_documents(documents= chunked_text, | |
embedding= embedding_model, | |
persist_directory=persist_directory) | |
print("load done") |