File size: 12,086 Bytes
3826b3b
 
 
 
75df934
3826b3b
75df934
3826b3b
9b87900
3826b3b
9b87900
3826b3b
 
 
 
 
 
 
 
 
 
9b87900
3826b3b
 
 
9b87900
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75df934
3826b3b
 
 
 
 
 
75df934
3826b3b
 
75df934
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
75df934
3826b3b
 
75df934
3826b3b
 
 
 
 
 
 
 
 
 
 
75df934
3826b3b
 
 
 
 
9b87900
3826b3b
 
 
 
 
9b87900
3826b3b
 
 
 
 
75df934
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75df934
3826b3b
 
 
 
 
 
76c62c3
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76c62c3
3826b3b
9b87900
3826b3b
9b87900
 
 
 
 
3826b3b
9b87900
 
3826b3b
 
9b87900
 
 
 
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b87900
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9b87900
3826b3b
 
76c62c3
3826b3b
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
# # my_app/model_manager.py
# import google.generativeai as genai
# import chat.arxiv_bot.arxiv_bot_utils as utils
# import json

# model = None

# model_retrieval = None

# model_answer = None

# RETRIEVAL_INSTRUCT = """You are an auto chatbot that response with only one action below based on user question.
#    1. If the guest question is asking about a science topic, you need to respond the information in JSON schema below:
#         {
#             "keywords": [a list of string keywords about the topic],
#             "description": "a paragraph describing the topic in about 50 to 100 words"
#         }
#    2. If the guest is not asking for any informations or documents, you need to respond in JSON schema below:
#         {
#             "answer": "your answer to the user question"
#         }"""

# ANSWER_INSTRUCT = """You are a library assistant that help answering customer question based on the information given.
#         You always answer in a conversational form naturally and politely.
#         You must introduce all the records given, each must contain title, authors and the link to the pdf file."""

# def create_model():
#     with open("apikey.txt","r") as apikey:
#         key = apikey.readline()
#         genai.configure(api_key=key)
#     for m in genai.list_models():
#         if 'generateContent' in m.supported_generation_methods:
#             print(m.name)
#     print("He was there")
#     config = genai.GenerationConfig(max_output_tokens=2048,
#                                 temperature=1.0)
#     safety_settings = [
#         {
#             "category": "HARM_CATEGORY_DANGEROUS",
#             "threshold": "BLOCK_NONE",
#         },
#         {
#             "category": "HARM_CATEGORY_HARASSMENT",
#             "threshold": "BLOCK_NONE",
#         },
#         {
#             "category": "HARM_CATEGORY_HATE_SPEECH",
#             "threshold": "BLOCK_NONE",
#         },
#         {
#             "category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
#             "threshold": "BLOCK_NONE",
#         },
#         {
#             "category": "HARM_CATEGORY_DANGEROUS_CONTENT",
#             "threshold": "BLOCK_NONE",
#         },
#     ]
#     global model, model_retrieval, model_answer
#     model = genai.GenerativeModel("gemini-1.5-pro-latest",
#                                 generation_config=config,
#                                 safety_settings=safety_settings)
#     model_retrieval = genai.GenerativeModel("gemini-1.5-pro-latest",
#                                             generation_config=config,
#                                             safety_settings=safety_settings,
#                                             system_instruction=RETRIEVAL_INSTRUCT)
#     model_answer = genai.GenerativeModel("gemini-1.5-pro-latest",
#                                          generation_config=config,
#                                          safety_settings=safety_settings,
#                                          system_instruction=ANSWER_INSTRUCT)
#     return model, model_answer, model_retrieval

# def get_model():
#     global model, model_answer, model_retrieval
#     if model is None:
#         # Khởi tạo model ở đây
#         model, model_answer, model_retrieval = create_model()  # Giả sử create_model là hàm tạo model của bạn
#     return model, model_answer, model_retrieval

# def extract_keyword_prompt(query):
#     """A prompt that return a JSON block as arguments for querying database"""

#     prompt = """[INST] SYSTEM: You are an auto chatbot that response with only one action below based on user question.
#    1. If the guest question is asking about a science topic, you need to respond the information in JSON schema below:
#         {
#             "keywords": [a list of string keywords about the topic],
#             "description": "a paragraph describing the topic in about 50 to 100 words"
#         }
#    2. If the guest is not asking for any informations or documents, you need to respond in JSON schema below:
#         {
#             "answer": "your answer to the user question"
#         }
#    QUESTION: """ + query + """[/INST]
#    ANSWER: """
#     return prompt

# def make_answer_prompt(input, contexts):
#     """A prompt that return the final answer, based on the queried context"""

#     prompt = (
#         """[INST] You are a library assistant that help answering customer QUESTION based on the INFORMATION given.
#         You always answer in a conversational form naturally and politely.
#         You must introduce all the records given, each must contain title, authors and the link to the pdf file.
#   QUESTION: {input}
#   INFORMATION: '{contexts}'
#   [/INST]
#   ANSWER:
#   """
#     ).format(input=input, contexts=contexts)
#     return prompt

# def retrieval_chat_template(question):
#     return {
#         "role":"user",
#         "parts":[f"QUESTION: {question} \n ANSWER:"]
#     }

# def answer_chat_template(question, contexts):
#     return {
#         "role":"user",
#         "parts":[f"QUESTION: {question} \n INFORMATION: {contexts} \n ANSWER:"]
#     }

# def response(args, db_instance):
#     """Create response context, based on input arguments"""
#     keys = list(dict.keys(args))
#     if "answer" in keys:
#         return args['answer'], None  # trả lời trực tiếp
     
#     if "keywords" in keys:
#         # perform query
#         query_texts = args["description"]
#         keywords = args["keywords"]
#         results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts)
#         # print(results)
#         ids = results['metadatas'][0]
#         if len(ids) == 0:
#             # go crawl some
#             new_records = utils.crawl_arxiv(keyword_list=keywords, max_results=10)
#             print("Got new records: ",len(new_records))
#             if type(new_records) == str:
#                 return "Error occured, information not found", new_records
#             utils.db.add(new_records)
#             db_instance.add(new_records)
#             results = utils.db.query_relevant(keywords=keywords, query_texts=query_texts)
#             ids = results['metadatas'][0]
#             print("Re-queried on chromadb, results: ",ids)
#         paper_id = [id['paper_id'] for id in ids]
#         paper_info = db_instance.query_id(paper_id)
#         print(paper_info)
#         records = [] # get title (2), author (3), link (6)
#         result_string = ""
#         if paper_info:
#             for i in range(len(paper_info)):
#                 result_string += "Record no.{} - Title: {}, Author: {}, Link: {}, ".format(i+1,paper_info[i][2],paper_info[i][3],paper_info[i][6])
#                 id = paper_info[i][0]
#                 selected_document = utils.db.query_exact(id)["documents"]
#                 doc_str = "Summary:"
#                 for doc in selected_document:
#                     doc_str+= doc + " "
#                 result_string += doc_str
#                 records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])
#             return result_string, records
#         else:
#             return "Information not found", "Information not found"
#         # invoke llm and return result

#     # if "title" in keys:
#     #     title = args['title']
#     #     authors = utils.authors_str_to_list(args['author'])
#     #     paper_info = db_instance.query(title = title,author = authors)
#     #     # if query not found then go crawl brh
#     #     # print(paper_info)

#     #     if len(paper_info) == 0:
#     #         new_records = utils.crawl_exact_paper(title=title,author=authors)
#     #         print("Got new records: ",len(new_records))
#     #         if type(new_records) == str:
#     #             # print(new_records)
#     #             return "Error occured, information not found", "Information not found"
#     #         utils.db.add(new_records)
#     #         db_instance.add(new_records)
#     #         paper_info = db_instance.query(title = title,author = authors)
#     #         print("Re-queried on chromadb, results: ",paper_info)
#     #     # -------------------------------------
#     #     records = [] # get title (2), author (3), link (6)
#     #     result_string = ""
#     #     for i in range(len(paper_info)):
#     #         result_string += "Title: {}, Author: {}, Link: {}".format(paper_info[i][2],paper_info[i][3],paper_info[i][6])
#     #         records.append([paper_info[i][2],paper_info[i][3],paper_info[i][6]])
#     #     # process results:
#     #     if len(result_string) == 0:
#     #         return "Information not found", "Information not found"
#     #     return result_string, records
#         # invoke llm and return result

# def full_chain_single_question(input_prompt, db_instance):
#     try:
#         first_prompt = extract_keyword_prompt(input_prompt)
#         temp_answer = model.generate_content(first_prompt).text

#         args = json.loads(utils.trimming(temp_answer))
#         contexts, results = response(args, db_instance)
#         if not results:
#             # print(contexts)
#             return "Random question, direct return", contexts
#         else:
#             output_prompt = make_answer_prompt(input_prompt,contexts)
#             answer = model.generate_content(output_prompt).text
#             return temp_answer, answer
#     except Exception as e:
#         # print(e)
#         return temp_answer, "Error occured: " + str(e)
    

# def format_chat_history_from_web(chat_history: list):
#     temp_chat = []
#     for message in chat_history:
#         temp_chat.append(
#             {
#                 "role": message["role"],
#                 "parts": [message["content"]]
#             }
#         )
#     return temp_chat

# # def full_chain_history_question(chat_history: list, db_instance):
# #     try:
# #         temp_chat = format_chat_history_from_web(chat_history)
# #         print('Extracted temp chat: ',temp_chat)
# #         first_prompt = extract_keyword_prompt(temp_chat[-1]["parts"][0])
# #         temp_answer = model.generate_content(first_prompt).text

# #         args = json.loads(utils.trimming(temp_answer))
# #         contexts, results = response(args, db_instance)
# #         print('Context extracted: ',contexts)
# #         if not results:
# #             return "Random question, direct return", contexts
# #         else:
# #             QA_Prompt = make_answer_prompt(temp_chat[-1]["parts"][0], contexts)
# #             temp_chat[-1]["parts"] = QA_Prompt
# #             print(temp_chat)
# #             answer = model.generate_content(temp_chat).text
# #             return temp_answer, answer
# #     except Exception as e:
# #         # print(e)
# #         return temp_answer, "Error occured: " + str(e)

# def full_chain_history_question(chat_history: list, db_instance):
#     try:
#         temp_chat = format_chat_history_from_web(chat_history)
#         question = temp_chat[-1]['parts'][0]
#         first_answer = model_retrieval.generate_content(temp_chat).text
        
#         print(first_answer)
#         args = json.loads(utils.trimming(first_answer))

#         contexts, results = response(args, db_instance)
#         if not results:
#             return "Random question, direct return", contexts
#         else:
#             print('Context to answers: ',contexts)
#             answer_chat = answer_chat_template(question, contexts)
#             temp_chat[-1] = answer_chat
#             answer = model_answer.generate_content(temp_chat).text
#             return first_answer, answer
#     except Exception as e:
#         if first_answer:
#             return first_answer, "Error occured: " + str(e)
#         else:
#             return "No answer", "Error occured: " + str(e)