zeyadahmedd commited on
Commit
058640f
1 Parent(s): bc6ebf4

Upload 3 files

Browse files
Files changed (3) hide show
  1. test/llama.py +67 -0
  2. test/new.py +43 -0
  3. test/test.py +82 -0
test/llama.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.llms import LlamaCpp
2
+ from langchain import PromptTemplate, LLMChain
3
+ from langchain.callbacks.manager import CallbackManager
4
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
+
6
+ #who is salma
7
+ en_Pr="""
8
+
9
+ who is salma
10
+
11
+ """
12
+ context=["في سلمي.pdf:سلمي هي مهندسة","في سلمي.pdf:سلمي هي طالبة في كلية حاسبات و معلومات","في اركلياب.pdf:سلمي هي موضفة في قسم الذكاء الاصطناعي"]
13
+ en_Cont=['in salma_ahmed.pdf: salma is a computer developer', 'in salmaaaaa3333.pdf: salma is an employee in arkleap ', 'in salmaaaaa3333.pdf: salma works from 9 to 5 ', 'in italy_emploee.pdf: salma is a agent who works as a spy ', 'in zolompa7.pdf:']
14
+ # template = """you are given contest of answers of question on multiple pdfs with format "in [pdfname]:[answer of the query in the pdf]"
15
+ # Answer the following question with related reasoning answers from the following contexts that is given in list format for each pdf name with all possible answers for it , don't mix the answers of different pdfs together , only give answers for each pdf individually"
16
+ # ..Don't generate answer from your data generate only from the provided contexts
17
+ # answer only as from the provided data ..if it's the answer make reasoning
18
+ # ..If the contexts doesn't provide an answer or isn't related to the question, respond only with "there is no answer for the provided question"
19
+ # if question in a language and the context in another language but there is an answer ..translate and generate answer with the two different languages
20
+ # question:{question}
21
+ # context:{context}
22
+ # answer:
23
+ # """
24
+ def llama_local(query,context):
25
+ system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
26
+ Read the given context before answering questions and think step by step. If you can not answer a user question based on
27
+ the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
28
+ B_INST, E_INST = "[INST]", "[/INST]"
29
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
30
+ SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
31
+ instruction = """
32
+ Context: {context}
33
+ User: {question}"""
34
+ prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
35
+ prompt = PromptTemplate(template=prompt_template, input_variables=["question", "context"])
36
+ callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
37
+ llm = LlamaCpp(
38
+ model_path="C:\\Users\zeyad\Desktop\pythonProject3\\trainmodel\llama-13B-Q4_K_M.gguf",
39
+ callback_manager=callback_manager,
40
+ verbose=True,
41
+ temperature=0,
42
+ top_p=1
43
+ )
44
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
45
+ return llm_chain.run(question=query, context=context)
46
+
47
+ # system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
48
+ # Read the given context before answering questions and think step by step. If you can not answer a user question based on
49
+ # the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
50
+ # B_INST, E_INST = "[INST]", "[/INST]"
51
+ # B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
52
+ # SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
53
+ # instruction = """
54
+ # Context: {context}
55
+ # User: {question}"""
56
+ # prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
57
+ # prompt=PromptTemplate(template=prompt_template, input_variables=["question","context"])
58
+ # callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
59
+ # llm = LlamaCpp(
60
+ # model_path="C:\\Users\zeyad\Desktop\pythonProject3\\trainmodel\llama-13B-Q4_K_M.gguf",
61
+ # callback_manager=callback_manager,
62
+ # verbose=True,
63
+ # temperature=0,
64
+ # top_p=1
65
+ # )
66
+ # llm_chain = LLMChain(prompt=prompt, llm=llm)
67
+ # llm_chain.run(question=en_Pr,context=en_Cont)
test/new.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio_client import Client
2
+ import time
3
+ # prompt="""
4
+ # من هي سلمي؟
5
+ # """
6
+ # #who is salma
7
+ # en_Pr="""
8
+ #
9
+ # who is salma
10
+ #
11
+ # """
12
+ # context=["في سلمي.pdf:سلمي هي مهندسة","في سلمي.pdf:سلمي هي طالبة في كلية حاسبات و معلومات","في اركلياب.pdf:سلمي هي موضفة في قسم الذكاء الاصطناعي"]
13
+ # en_Cont=['in salma_ahmed.pdf: salma is a computer developer', 'in salmaaaaa3333.pdf: salma is an employee in arkleap ', 'in salmaaaaa3333.pdf: salma works from 9 to 5 ', 'in italy_emploee.pdf: salma is a agent who works as a spy ', 'in zolompa7.pdf:']
14
+ old=time.time()
15
+ system_prompt = """You are a helpful assistant, you will use the provided context to answer user questions.
16
+ Read the given context before answering questions and think step by step. If you can not answer a user question based on
17
+ the provided context, inform the user. Do not use any other information for answering user. Provide a detailed answer to the question."""
18
+ B_INST, E_INST = "[INST]", "[/INST]"
19
+ B_SYS, E_SYS = "<<SYS>>\n", "\n<</SYS>>\n\n"
20
+ SYSTEM_PROMPT = B_SYS + system_prompt + E_SYS
21
+ instruction = """
22
+ Context: {context}
23
+ User: {question}"""
24
+ prompt_template = B_INST + SYSTEM_PROMPT + instruction + E_INST
25
+ def connect_to_llama(query,context):
26
+ client = Client("https://huggingface-projects-llama-2-13b-chat.hf.space/--replicas/5c42d8wx6/")
27
+ result = client.predict(
28
+ """
29
+ question:"{}"
30
+ context:"{}"
31
+ answer:
32
+ """.format(query, context), # str in 'parameter_7' Textbox component
33
+ prompt_template , # str in 'Optional system prompt' Textbox component
34
+ 2048, # int | float (numeric value between 0 and 4096) in 'Max new tokens' Slider componentو
35
+ 0.1,
36
+ 0.05,
37
+ 1, # int | float (numeric value between 0.0 and 1) in 'Top-p (nucleus sampling)' Slider component
38
+ 1, # int | float (numeric value between 1.0 and 2.0) in 'Repetition penalty' Slider component
39
+ api_name="/chat"
40
+ )
41
+ # print(time.time() - old, "sec")
42
+ return result
43
+
test/test.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ checkpoint = "C:\\Users\zeyad\Desktop\pythonProject9\LaMini-T5-738M"
3
+ model = pipeline('text2text-generation', model=checkpoint)
4
+
5
+ # query=" who is salma"
6
+ # context=["in salma_ahmed.pdf: salma is a computer developer","in salmaaaaa3333.pdf: salma is an employee in arkleap ","in salmaaaaa3333.pdf: salma works from 9 to 5 ","in italy_emploee.pdf: salma is a agent who works as a spy ","in zolompa7.pdf:"]
7
+
8
+ # input_prompt = """
9
+ # Given a question and a context, generate reasoning answers form the context in a numbered list format according do different pdfs with format "in [pdf name]:(answer you generate)\n"...
10
+ # Context is an array that contains multiple answers of query from multiple pdfs in this format "in [pdf name]:[answer of the query in the pdf]"...
11
+ # Don't generate answer from your data generate only from the provided contexts...
12
+ # Question:"{}",
13
+ # Contexts:"{}"
14
+ # Answers:
15
+ # (number)-in(pdfname):(answer)
16
+ # """.format(query,context)
17
+
18
+ # input_prompt= """
19
+ # Given a question and a context, generate related reasoning answers from the following contexts that is given ...
20
+ # Context is an array that contains multiple answers of query from multiple pdfs in this format "in [pdfname]:[answer of the query in the pdf]"...
21
+ # Do not generate answers from your own data, only generate answers from the provided contexts...
22
+ # If the contexts doesn't provide an answer or isn't related to the question, respond with "there is no answer for the provided question"
23
+ # Question:"{}"..
24
+ # Contexts:"{}"..
25
+ # Answers [CREATE LIST with the answers for each pdf] :list
26
+ # """.format(query,context)
27
+
28
+
29
+ # input_prompt="""
30
+ # question:{}
31
+ # contexts that has multiple answers in multiple pdfs with format "in [pdf name]:[answer of the query in the pdf]":{}
32
+ # Answers only from the contexts in numbered list ,don't generate the answer from your data, if the answers related to the question with listing all the answers ,don't mix answers ..only answer for each pdf name with different list item:
33
+ # """.format(query,context)
34
+
35
+
36
+ # input_prompt="""
37
+ # given question and contexts that your only allowed to answer from it and only if the answers related to the question..Don't generate answer from your data generate only from the provided contexts..
38
+ # context is an array contains answer of the question from multiple pdfs,each answer has this format "in [pdf name]:[answer of the query in the pdf]"...
39
+ # If the contexts doesn't provide an answer or isn't related to the question, respond with "there is no answer for the provided question"...
40
+ # you could find that the question could has multiple answer on the same pdf , generate tha answer for this pdf for it's own data ..don't consider the another pdf's data..
41
+ # list the answers in a bullet list ,each item has this format "in [pdf name]:[answer of your reasoning to this pdf data only]"..
42
+ # Question:{}
43
+ # Contexts:{}
44
+ # """.format(query,context)
45
+
46
+ # input_prompt=f"""
47
+ # Answer the following question with related reasoning answers from the following contexts that is given and list all the possible answers but don't make reasoning on all the data together ..only reason the data of same pdf name"
48
+ # ..Don't generate answer from your data generate only from the provided contexts
49
+ # ..If the contexts doesn't provide an answer or isn't related to the question, respond with "there is no answer for the provided question"
50
+ # question:"{query}"+"list what you know"
51
+ # context:"{context}"
52
+ # answer:
53
+ # """
54
+
55
+ # query="what does salma studies?"
56
+ # context=["salma is a computer developer( from salma_ahmed.pdf)"," salma is an employee in arkleap (from salmaaaaa3333.pdf) "," salma works from 9 to 5 ( from salmaaaaa3333.pdf)","salma is a agent who works as a spy (from italy_emploee.pdf )","(from salmaaaaa3333.pdf)"]
57
+
58
+ # input_prompt = """
59
+ # Given a question and a context, generate reasoning answers from the context in a bullet list with mentioning the source of the answers like the following format "in [PDF name]: [answer you generate]\n"...
60
+ # provide the answer as mentioned bellow and don't repeat the answer and generate answers only from the contexts and only answer if there is a context related to the question
61
+ # Provide for any generated answer it's meta data which is given in the context (from **.pdf)
62
+ # Question: "{}",
63
+ # Contexts: {}
64
+ # Answers:
65
+ # -[answer generated ](from **.pdf)
66
+ # """.format(query, ', '.join(["{}".format(x) for x in context]))
67
+
68
+ query="who is zozbra?"
69
+ context=["salma is a computer developer( from salma_ahmed.pdf)"," salma is an employee in arkleap (from salmaaaaa3333.pdf) "," salma works from 9 to 5 ( from salmaaaaa3333.pdf)","salma is a agent who works as a spy (from italy_emploee.pdf )","(from salmaaaaa3333.pdf)"]
70
+
71
+ input_prompt = """
72
+ Question: "{}",
73
+ Contexts: {}
74
+ you're a context reasoning AI Assistant that make individual answers not one summarized answer and mentions the pdf name for each answer that he generates
75
+ you're Given a question and a context that contain it's meta data inside brackets with this format (from example.pdf)
76
+ you're allowed to generate answer for each pdf individual in a bullet list and mention there meta data ,generate answers only from the contexts and only answer if there is a context related to the question
77
+ You're not allowed to repeat answers
78
+ you're not allowed to make only one answer that contain summarized answer from all pdfs
79
+ you're not allowed to predict ..if the question not related to the contexts ..respond with "no answer for this question"
80
+ """.format(query, ', '.join(["{}".format(x) for x in context]))
81
+ generated_text = model(input_prompt, max_length=2048, do_sample=False)[0]['generated_text']
82
+ print(generated_text)