File size: 4,021 Bytes
058640f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
from langchain.llms import LlamaCpp
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler

#who is salma
en_Pr="""

who is salma

"""
context=["ููŠ ุณู„ู…ูŠ.pdf:ุณู„ู…ูŠ ู‡ูŠ ู…ู‡ู†ุฏุณุฉ","ููŠ ุณู„ู…ูŠ.pdf:ุณู„ู…ูŠ ู‡ูŠ ุทุงู„ุจุฉ ููŠ ูƒู„ูŠุฉ ุญุงุณุจุงุช ูˆ ู…ุนู„ูˆู…ุงุช","ููŠ ุงุฑูƒู„ูŠุงุจ.pdf:ุณู„ู…ูŠ ู‡ูŠ ู…ูˆุถูุฉ ููŠ ู‚ุณู… ุงู„ุฐูƒุงุก ุงู„ุงุตุทู†ุงุนูŠ"]
en_Cont=['in salma_ahmed.pdf: salma is a computer developer', 'in salmaaaaa3333.pdf: salma is an employee in arkleap ', 'in salmaaaaa3333.pdf: salma works from 9 to 5 ', 'in italy_emploee.pdf: salma is a agent who works as a spy ', 'in zolompa7.pdf:']
# template = """you are given contest of answers of question on multiple pdfs with format "in [pdfname]:[answer of the query in the pdf]"
# Answer the following question with related reasoning answers from the following contexts that is given in list format for each pdf name with all possible answers for it , don't mix the answers of different pdfs together , only give answers for each pdf individually"
# ..Don't generate answer from your data generate only from the provided contexts
# answer only as from the provided data ..if it's the answer make reasoning
# ..If the contexts doesn't provide an answer or isn't related to the question, respond only with "there is no answer for the provided question"
# if question in a language and the context in another language but there is an answer ..translate and generate answer with the two different languages
# question:{question}
# context:{context}
# answer:
# """
def llama_local(query,context):
    system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
    Read the given context before answering questions and think step by step. If you can not answer a user question based on 
    the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
    B_INST, E_INST = "[INST]", "[/INST]"
    B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
    SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
    instruction = """
    Context: {context}
    User: {question}"""
    prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
    prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
    callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
    llm = LlamaCpp(
        model_path="C:\\Users\zeyad\Desktop\pythonProject3\\trainmodel\llama-13B-Q4_K_M.gguf",
        callback_manager=callback_manager,
        verbose=True,
        temperature=0,
        top_p=1
    )
    llm_chain = LLMChain(prompt=prompt, llm=llm)
    return llm_chain.run(question=query, context=context)

# system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
# Read the given context before answering questions and think step by step. If you can not answer a user question based on
# the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
# B_INST, E_INST = "[INST]", "[/INST]"
# B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
# SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
# instruction = """
# Context: {context}
# User: {question}"""
# prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
# prompt=PromptTemplate(template=prompt_template, input_variables=["question","context"])
# callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
# llm = LlamaCpp(
#             model_path="C:\\Users\zeyad\Desktop\pythonProject3\\trainmodel\llama-13B-Q4_K_M.gguf",
#             callback_manager=callback_manager,
#             verbose=True,
#             temperature=0,
#             top_p=1
#         )
# llm_chain = LLMChain(prompt=prompt, llm=llm)
# llm_chain.run(question=en_Pr,context=en_Cont)