File size: 3,652 Bytes
333db1f
 
 
 
 
 
 
 
 
 
 
 
83a3424
 
333db1f
 
 
 
 
599dbd3
7499ad5
333db1f
 
 
c2a1a7f
333db1f
 
 
 
c2a1a7f
f784a51
333db1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
962d2ba
 
733b136
962d2ba
 
 
 
333db1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
import gradio as gr
from langchain_huggingface import HuggingFaceEndpoint,HuggingFaceEmbeddings,ChatHuggingFace
from langchain_core.load import dumpd, dumps, load, loads
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.callbacks import StreamingStdOutCallbackHandler

from langchain_chroma import Chroma
from langchain_core.documents import Document  
from langchain_text_splitters import CharacterTextSplitter
from pypdf import PdfReader
import random
cwd = os.getcwd()
print(cwd)
token=""
#repo_id = "mistralai/Mistral-7B-Instruct-v0.3"
repo_id = "mistralai/Mixtral-8x7B-Instruct-v0.1"
emb = "sentence-transformers/all-mpnet-base-v2"
hf = HuggingFaceEmbeddings(model_name=emb)
#db = Chroma(persist_directory=f"{cwd}/chroma_langchain_db",embedding_function=HuggingFaceEmbeddings(model_name=emb))
#db.persist()
# Load the document, split it into chunks, embed each chunk and load it into the vector store.
#raw_documents = TextLoader('state_of_the_union.txt').load()
def embed_fn(inp):
    db=Chroma()
    text_splitter = CharacterTextSplitter(chunk_size=200, chunk_overlap=10)
    documents = text_splitter.split_text(inp)
    out_emb= hf.embed_documents(documents)
    string_representation = dumps(out_emb, pretty=True)
    db.from_texts(documents,embedding_function=HuggingFaceEmbeddings(model_name=emb),persist_directory=f"{cwd}/chroma_langchain_db")
    
def proc_doc(doc_in):
    for doc in doc_in:
        if doc.endswith(".txt"):
            yield [["",f"Loading Document: {doc}"]]
            outp = read_txt(doc)
            embed_fn(outp)
            yield [["","Loaded"]]
        elif doc.endswith(".pdf"):
            yield [["",f"Loading Document: {doc}"]]
            outp = read_pdf(doc)
            embed_fn(outp)
            yield [["","Loaded"]]


def read_txt(txt_path):
    text=""
    with open(txt_path,"r") as f:
        text = f.read()
    f.close()
    return text

def read_pdf(pdf_path):
    text=""
    reader = PdfReader(f'{pdf_path}')
    number_of_pages = len(reader.pages)
    for i in range(number_of_pages):
        page = reader.pages[i]
        text = f'{text}\n{page.extract_text()}'
    return text
def run_llm(input_text,history):
    MAX_TOKENS=20000
    try:
        qur= hf.embed_query(input_text)
        docs = db.similarity_search_by_vector(qur, k=3)
        
        print(docs)
    except Exception as e:
        print(e)
    callbacks = [StreamingStdOutCallbackHandler()]  
    llm = HuggingFaceEndpoint(  
    endpoint_url=repo_id,  
    max_new_tokens=2056,  
    seed=random.randint(1,99999999999),
    top_k=10,  
    top_p=0.95,  
    typical_p=0.95,  
    temperature=0.01,  
    repetition_penalty=1.03,  
    #callbacks=callbacks,  
    streaming=True,  
    huggingfacehub_api_token=token,
    )  
    out=""
    #prompt = ChatPromptTemplate.from_messages(
    prompt=[
        {"role": "system", "content": f"[INST] Use this data to help answer users questions: {str(docs)} [/INST]"},
        {"role": "user", "content": f"[INST]{input_text}[/INST]"},
    ]
    
    t=llm.invoke(prompt)
    for chunk in t:
        out+=chunk
        yield out
      
    
css="""
#component-0 {
    height:400px;
}
"""

with gr.Blocks(css=css) as app:
    data=gr.State()
    with gr.Column():
        #input_text = gr.Textbox(label="You: ")
        chat = gr.ChatInterface(
            fn=run_llm,
            type="tuples",
            concurrency_limit=20,
            
        )
    with gr.Row():
        msg=gr.HTML()
        file_in=gr.Files(file_count="multiple")
    file_in.change(proc_doc, file_in, msg)
        #btn = gr.Button("Generate")
app.queue().launch()