File size: 2,138 Bytes
3ab64ac
 
 
 
 
2d17ff2
22b7264
68cba5e
 
2d17ff2
68cba5e
3ab64ac
 
 
 
 
400e803
3ab64ac
 
 
 
 
 
8edd3eb
 
 
 
 
 
 
 
 
 
 
 
7ebdd15
8edd3eb
7ebdd15
 
 
 
 
 
 
 
24a7885
22b7264
68cba5e
22b7264
 
 
 
a6f29ed
24a7885
22b7264
 
 
 
 
e491ae1
 
4af9e36
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import glob
import os
from langchain.text_splitter import RecursiveCharacterTextSplitter, SentenceTransformersTokenTextSplitter
from transformers import AutoTokenizer
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.embeddings import HuggingFaceEmbeddings, HuggingFaceInferenceAPIEmbeddings
from langchain_community.vectorstores import Qdrant
#from dotenv import load_dotenv
#load_dotenv()

#HF_token = os.environ["HF_TOKEN"]
path_to_data = "./data/"


def process_pdf():
    files = {'ABC':'./data/MWTS2021.pdf',
            'XYZ':'./data/MWTS2022.pdf'}
    docs = {}
    for file,value in files.items():
        try:
            docs[file] = PyMuPDFLoader(value).load()
        except Exception as e:
            print("Exception: ", e)

    
    # text splitter based on the tokenizer of a model of your choosing
    # to make texts fit exactly a transformer's context window size
    # langchain text splitters: https://python.langchain.com/docs/modules/data_connection/document_transformers/
    chunk_size = 256
    text_splitter = RecursiveCharacterTextSplitter.from_huggingface_tokenizer(
            AutoTokenizer.from_pretrained("BAAI/bge-small-en-v1.5"),
            chunk_size=chunk_size,
            chunk_overlap=int(chunk_size / 10),
            add_start_index=True,
            strip_whitespace=True,
            separators=["\n\n", "\n"],
    )
    all_documents = {}
    for file,value in docs.items():
        doc_processed = text_splitter.split_documents(value)
        for doc in doc_processed:
            doc.metadata["source"] = file
        all_documents[file] = doc_processed
        
    print(all_documents.keys())
  
    
    embeddings = HuggingFaceEmbeddings(
      model_name="sentence-transformers/all-mpnet-base-v2"
    )
    
    qdrant_collections = {}
    for file,value in all_documents.items():
        print("emebddings for:",file)
        qdrant_collections[file] = Qdrant.from_documents(
            value,
            embeddings,
            location=":memory:", 
            collection_name=file,
        )
    print("done")
    return qdrant_collections