abdullah-alnahas
commited on
init
Browse files- __init__.py +0 -0
- agents/__init__.py +0 -0
- agents/ansari.py +265 -0
- block_css.css +106 -0
- config.py +57 -0
- main_ab_testing.py +280 -0
- requirements.txt +21 -0
- resources/prompts/system_msg_fn.txt +3 -0
- resources/prompts/system_msg_fn_v1.txt +43 -0
- tools/__init__.py +0 -0
- tools/search_hadith.py +71 -0
- tools/search_mawsuah.py +94 -0
- tools/search_quran.py +69 -0
- util/prompt_mgr.py +33 -0
__init__.py
ADDED
File without changes
|
agents/__init__.py
ADDED
File without changes
|
agents/ansari.py
ADDED
@@ -0,0 +1,265 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import hashlib
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
import os
|
5 |
+
import time
|
6 |
+
import traceback
|
7 |
+
from datetime import date, datetime
|
8 |
+
|
9 |
+
import litellm
|
10 |
+
from langfuse.model import CreateGeneration, CreateTrace
|
11 |
+
|
12 |
+
from tools.search_hadith import SearchHadith
|
13 |
+
from tools.search_mawsuah import SearchMawsuah
|
14 |
+
from tools.search_quran import SearchQuran
|
15 |
+
from util.prompt_mgr import PromptMgr
|
16 |
+
|
17 |
+
if os.environ.get("LANGFUSE_SECRET_KEY"):
|
18 |
+
from langfuse import Langfuse
|
19 |
+
|
20 |
+
lf = Langfuse()
|
21 |
+
lf.auth_check()
|
22 |
+
|
23 |
+
|
24 |
+
logger = logging.getLogger(__name__ + ".Ansari")
|
25 |
+
logger.setLevel(logging.INFO)
|
26 |
+
console_handler = logging.StreamHandler()
|
27 |
+
console_handler.setLevel(logging.INFO)
|
28 |
+
logger.addHandler(console_handler)
|
29 |
+
|
30 |
+
class Ansari:
|
31 |
+
|
32 |
+
def __init__(self, settings, message_logger=None, json_format=False):
|
33 |
+
self.settings = settings
|
34 |
+
sq = SearchQuran(settings.KALEMAT_API_KEY)
|
35 |
+
sh = SearchHadith(settings.KALEMAT_API_KEY)
|
36 |
+
sm = SearchMawsuah(settings.VECTARA_AUTH_TOKEN, settings.VECTARA_CUSTOMER_ID, settings.VECTARA_CORPUS_ID)
|
37 |
+
self.tools = {sq.get_fn_name(): sq, sh.get_fn_name(): sh, sm.get_fn_name(): sm}
|
38 |
+
self.model = settings.MODEL
|
39 |
+
self.pm = PromptMgr()
|
40 |
+
self.sys_msg = self.pm.bind(settings.SYSTEM_PROMPT_FILE_NAME).render()
|
41 |
+
self.functions = [x.get_function_description() for x in self.tools.values()]
|
42 |
+
self.message_history = [{"role": "system", "content": self.sys_msg}]
|
43 |
+
self.json_format = json_format
|
44 |
+
self.message_logger = message_logger
|
45 |
+
|
46 |
+
def set_message_logger(self, message_logger):
|
47 |
+
self.message_logger = message_logger
|
48 |
+
|
49 |
+
# The trace id is a hash of the first user input and the time.
|
50 |
+
def compute_trace_id(self):
|
51 |
+
today = date.today()
|
52 |
+
hashstring = str(today) + self.message_history[1]["content"]
|
53 |
+
result = hashlib.md5(hashstring.encode())
|
54 |
+
return "chash_" + result.hexdigest()
|
55 |
+
|
56 |
+
def greet(self):
|
57 |
+
self.greeting = self.pm.bind("greeting")
|
58 |
+
return self.greeting.render()
|
59 |
+
|
60 |
+
def process_input(self, user_input):
|
61 |
+
self.message_history.append({"role": "user", "content": user_input})
|
62 |
+
return self.process_message_history()
|
63 |
+
|
64 |
+
def log(self):
|
65 |
+
if not os.environ.get("LANGFUSE_SECRET_KEY"):
|
66 |
+
return
|
67 |
+
trace_id = self.compute_trace_id()
|
68 |
+
logger.info(f"trace id is {trace_id}")
|
69 |
+
trace = lf.trace(CreateTrace(id=trace_id, name="ansari-trace"))
|
70 |
+
|
71 |
+
generation = trace.generation(
|
72 |
+
CreateGeneration(
|
73 |
+
name="ansari-gen",
|
74 |
+
startTime=self.start_time,
|
75 |
+
endTime=datetime.now(),
|
76 |
+
model=self.settings.MODEL,
|
77 |
+
prompt=self.message_history[:-1],
|
78 |
+
completion=self.message_history[-1]["content"],
|
79 |
+
)
|
80 |
+
)
|
81 |
+
|
82 |
+
def replace_message_history(self, message_history):
|
83 |
+
self.message_history = [
|
84 |
+
{"role": "system", "content": self.sys_msg}
|
85 |
+
] + message_history
|
86 |
+
for m in self.process_message_history():
|
87 |
+
if m:
|
88 |
+
yield m
|
89 |
+
|
90 |
+
def process_message_history(self):
|
91 |
+
# Keep processing the user input until we get something from the assistant
|
92 |
+
self.start_time = datetime.now()
|
93 |
+
count = 0
|
94 |
+
failures = 0
|
95 |
+
while self.message_history[-1]["role"] != "assistant":
|
96 |
+
try:
|
97 |
+
logger.info(f"Processing one round {self.message_history}")
|
98 |
+
# This is pretty complicated so leaving a comment.
|
99 |
+
# We want to yield from so that we can send the sequence through the input
|
100 |
+
# Also use functions only if we haven't tried too many times
|
101 |
+
use_function = True
|
102 |
+
if count >= self.settings.MAX_FUNCTION_TRIES:
|
103 |
+
use_function = False
|
104 |
+
logger.warning("Not using functions -- tries exceeded")
|
105 |
+
yield from self.process_one_round(use_function)
|
106 |
+
count += 1
|
107 |
+
except Exception as e:
|
108 |
+
failures += 1
|
109 |
+
logger.warning("Exception occurred: {e}")
|
110 |
+
logger.warning(traceback.format_exc())
|
111 |
+
logger.warning("Retrying in 5 seconds...")
|
112 |
+
time.sleep(5)
|
113 |
+
if failures >= self.settings.MAX_FAILURES:
|
114 |
+
logger.error("Too many failures, aborting")
|
115 |
+
raise Exception("Too many failures")
|
116 |
+
break
|
117 |
+
self.log()
|
118 |
+
|
119 |
+
def process_one_round(self, use_function=True):
|
120 |
+
response = None
|
121 |
+
failures = 0
|
122 |
+
while not response:
|
123 |
+
try:
|
124 |
+
if use_function:
|
125 |
+
if self.json_format:
|
126 |
+
response = litellm.completion(
|
127 |
+
model=self.model,
|
128 |
+
messages=self.message_history,
|
129 |
+
stream=True,
|
130 |
+
functions=self.functions,
|
131 |
+
timeout=30.0,
|
132 |
+
temperature=0.0,
|
133 |
+
metadata={"generation-name": "ansari"},
|
134 |
+
response_format={"type": "json_object"},
|
135 |
+
num_retries=1,
|
136 |
+
)
|
137 |
+
else:
|
138 |
+
response = litellm.completion(
|
139 |
+
model=self.model,
|
140 |
+
messages=self.message_history,
|
141 |
+
stream=True,
|
142 |
+
functions=self.functions,
|
143 |
+
timeout=30.0,
|
144 |
+
temperature=0.0,
|
145 |
+
metadata={"generation-name": "ansari"},
|
146 |
+
num_retries=1,
|
147 |
+
)
|
148 |
+
else:
|
149 |
+
if self.json_format:
|
150 |
+
response = litellm.completion(
|
151 |
+
model=self.model,
|
152 |
+
messages=self.message_history,
|
153 |
+
stream=True,
|
154 |
+
timeout=30.0,
|
155 |
+
temperature=0.0,
|
156 |
+
response_format={"type": "json_object"},
|
157 |
+
metadata={"generation-name": "ansari"},
|
158 |
+
num_retries=1,
|
159 |
+
)
|
160 |
+
else:
|
161 |
+
response = litellm.completion(
|
162 |
+
model=self.model,
|
163 |
+
messages=self.message_history,
|
164 |
+
stream=True,
|
165 |
+
timeout=30.0,
|
166 |
+
temperature=0.0,
|
167 |
+
metadata={"generation-name": "ansari"},
|
168 |
+
num_retries=1,
|
169 |
+
)
|
170 |
+
|
171 |
+
except Exception as e:
|
172 |
+
failures += 1
|
173 |
+
logger.warning("Exception occurred: ", e)
|
174 |
+
logger.warning(traceback.format_exc())
|
175 |
+
logger.warning("Retrying in 5 seconds...")
|
176 |
+
time.sleep(5)
|
177 |
+
if failures >= self.settings.MAX_FAILURES:
|
178 |
+
logger.error("Too many failures, aborting")
|
179 |
+
raise Exception("Too many failures")
|
180 |
+
break
|
181 |
+
|
182 |
+
words = ""
|
183 |
+
function_name = ""
|
184 |
+
function_arguments = ""
|
185 |
+
response_mode = "" # words or fn
|
186 |
+
for tok in response:
|
187 |
+
logger.debug(f"Tok is {tok}")
|
188 |
+
delta = tok.choices[0].delta
|
189 |
+
if not response_mode:
|
190 |
+
# This code should only trigger the first
|
191 |
+
# time through the loop.
|
192 |
+
if "function_call" in delta and delta.function_call:
|
193 |
+
# We are in function mode
|
194 |
+
response_mode = "fn"
|
195 |
+
function_name = delta.function_call.name
|
196 |
+
else:
|
197 |
+
response_mode = "words"
|
198 |
+
logger.info("Response mode: " + response_mode)
|
199 |
+
|
200 |
+
# We process things differently depending on whether it is a function or a
|
201 |
+
# text
|
202 |
+
if response_mode == "words":
|
203 |
+
if delta.content == None: # End token
|
204 |
+
self.message_history.append({"role": "assistant", "content": words})
|
205 |
+
if self.message_logger:
|
206 |
+
self.message_logger.log("assistant", words)
|
207 |
+
break
|
208 |
+
elif delta.content != None:
|
209 |
+
words += delta.content
|
210 |
+
yield delta.content
|
211 |
+
else:
|
212 |
+
continue
|
213 |
+
elif response_mode == "fn":
|
214 |
+
logger.debug("Delta in: ", delta)
|
215 |
+
if (
|
216 |
+
not "function_call" in delta or delta["function_call"] is None
|
217 |
+
): # End token
|
218 |
+
function_call = function_name + "(" + function_arguments + ")"
|
219 |
+
# The function call below appends the function call to the message history
|
220 |
+
print(f"{function_name=}, {function_arguments=}")
|
221 |
+
yield self.process_fn_call(input, function_name, function_arguments)
|
222 |
+
#
|
223 |
+
break
|
224 |
+
elif (
|
225 |
+
"function_call" in delta
|
226 |
+
and delta.function_call
|
227 |
+
and delta.function_call.arguments
|
228 |
+
):
|
229 |
+
function_arguments += delta.function_call.arguments
|
230 |
+
logger.debug(f"Function arguments are {function_arguments}")
|
231 |
+
yield "" # delta['function_call']['arguments'] # we shouldn't yield anything if it's a fn
|
232 |
+
else:
|
233 |
+
logger.warning(f"Weird delta: {delta}")
|
234 |
+
continue
|
235 |
+
else:
|
236 |
+
raise Exception("Invalid response mode: " + response_mode)
|
237 |
+
|
238 |
+
def process_fn_call(self, orig_question, function_name, function_arguments):
|
239 |
+
if function_name in self.tools.keys():
|
240 |
+
args = json.loads(function_arguments)
|
241 |
+
query = args["query"]
|
242 |
+
results = self.tools[function_name].run_as_list(query)
|
243 |
+
logger.debug(f"Results are {results}")
|
244 |
+
# Now we have to pass the results back in
|
245 |
+
if len(results) > 0:
|
246 |
+
for result in results:
|
247 |
+
self.message_history.append(
|
248 |
+
{"role": "function", "name": function_name, "content": result}
|
249 |
+
)
|
250 |
+
if self.message_logger:
|
251 |
+
self.message_logger.log("function", result, function_name)
|
252 |
+
else:
|
253 |
+
self.message_history.append(
|
254 |
+
{
|
255 |
+
"role": "function",
|
256 |
+
"name": function_name,
|
257 |
+
"content": "No results found",
|
258 |
+
}
|
259 |
+
)
|
260 |
+
if self.message_logger:
|
261 |
+
self.message_logger.log(
|
262 |
+
"function", "No results found", function_name
|
263 |
+
)
|
264 |
+
else:
|
265 |
+
logger.warning(f"Unknown function name: {function_name}")
|
block_css.css
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#notice_markdown .prose {
|
2 |
+
font-size: 110% !important;
|
3 |
+
}
|
4 |
+
#notice_markdown th {
|
5 |
+
display: none;
|
6 |
+
}
|
7 |
+
#notice_markdown td {
|
8 |
+
padding-top: 6px;
|
9 |
+
padding-bottom: 6px;
|
10 |
+
}
|
11 |
+
#arena_leaderboard_dataframe table {
|
12 |
+
font-size: 110%;
|
13 |
+
}
|
14 |
+
#full_leaderboard_dataframe table {
|
15 |
+
font-size: 110%;
|
16 |
+
}
|
17 |
+
#model_description_markdown {
|
18 |
+
font-size: 110% !important;
|
19 |
+
}
|
20 |
+
#leaderboard_markdown .prose {
|
21 |
+
font-size: 110% !important;
|
22 |
+
}
|
23 |
+
#leaderboard_markdown td {
|
24 |
+
padding-top: 6px;
|
25 |
+
padding-bottom: 6px;
|
26 |
+
}
|
27 |
+
#leaderboard_dataframe td {
|
28 |
+
line-height: 0.1em;
|
29 |
+
}
|
30 |
+
#about_markdown .prose {
|
31 |
+
font-size: 110% !important;
|
32 |
+
}
|
33 |
+
#ack_markdown .prose {
|
34 |
+
font-size: 110% !important;
|
35 |
+
}
|
36 |
+
#chatbot .prose {
|
37 |
+
font-size: 105% !important;
|
38 |
+
}
|
39 |
+
.sponsor-image-about img {
|
40 |
+
margin: 0 20px;
|
41 |
+
margin-top: 20px;
|
42 |
+
height: 40px;
|
43 |
+
max-height: 100%;
|
44 |
+
width: auto;
|
45 |
+
float: left;
|
46 |
+
}
|
47 |
+
|
48 |
+
.chatbot h1, h2, h3 {
|
49 |
+
margin-top: 8px; /* Adjust the value as needed */
|
50 |
+
margin-bottom: 0px; /* Adjust the value as needed */
|
51 |
+
padding-bottom: 0px;
|
52 |
+
}
|
53 |
+
|
54 |
+
.chatbot h1 {
|
55 |
+
font-size: 130%;
|
56 |
+
}
|
57 |
+
.chatbot h2 {
|
58 |
+
font-size: 120%;
|
59 |
+
}
|
60 |
+
.chatbot h3 {
|
61 |
+
font-size: 110%;
|
62 |
+
}
|
63 |
+
.chatbot p:not(:first-child) {
|
64 |
+
margin-top: 8px;
|
65 |
+
}
|
66 |
+
|
67 |
+
.typing {
|
68 |
+
display: inline-block;
|
69 |
+
}
|
70 |
+
|
71 |
+
.cursor {
|
72 |
+
display: inline-block;
|
73 |
+
width: 7px;
|
74 |
+
height: 1em;
|
75 |
+
background-color: black;
|
76 |
+
vertical-align: middle;
|
77 |
+
animation: blink 1s infinite;
|
78 |
+
}
|
79 |
+
|
80 |
+
.dark .cursor {
|
81 |
+
display: inline-block;
|
82 |
+
width: 7px;
|
83 |
+
height: 1em;
|
84 |
+
background-color: white;
|
85 |
+
vertical-align: middle;
|
86 |
+
animation: blink 1s infinite;
|
87 |
+
}
|
88 |
+
|
89 |
+
@keyframes blink {
|
90 |
+
0%, 50% { opacity: 1; }
|
91 |
+
50.1%, 100% { opacity: 0; }
|
92 |
+
}
|
93 |
+
|
94 |
+
.app {
|
95 |
+
max-width: 100% !important;
|
96 |
+
padding: 20px !important;
|
97 |
+
}
|
98 |
+
|
99 |
+
a {
|
100 |
+
color: #1976D2; /* Your current link color, a shade of blue */
|
101 |
+
text-decoration: none; /* Removes underline from links */
|
102 |
+
}
|
103 |
+
a:hover {
|
104 |
+
color: #63A4FF; /* This can be any color you choose for hover */
|
105 |
+
text-decoration: underline; /* Adds underline on hover */
|
106 |
+
}
|
config.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from functools import lru_cache
|
3 |
+
from typing import Union, Optional
|
4 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
5 |
+
from pydantic import SecretStr, PostgresDsn, DirectoryPath, Field, validator
|
6 |
+
|
7 |
+
logger = logging.getLogger(__name__)
|
8 |
+
|
9 |
+
class Settings(BaseSettings):
|
10 |
+
model_config = SettingsConfigDict(env_file="/home/abdullah/Documents/hdd/projects/ansari/ansari-backend/.env", env_file_encoding="utf-8", case_sensitive=True)
|
11 |
+
|
12 |
+
DATABASE_URL: PostgresDsn = Field(default="postgresql://mwk@localhost:5432/mwk")
|
13 |
+
MAX_THREAD_NAME_LENGTH: int = Field(default=100)
|
14 |
+
|
15 |
+
SECRET_KEY: SecretStr = Field(default="secret")
|
16 |
+
ALGORITHM: str = Field(default="HS256")
|
17 |
+
ENCODING: str = Field(default="utf-8")
|
18 |
+
ACCESS_TOKEN_EXPIRY_HOURS: int = Field(default=2)
|
19 |
+
REFRESH_TOKEN_EXPIRY_HOURS: int = Field(default=24*90)
|
20 |
+
|
21 |
+
ORIGINS: Union[str, list[str]] = Field(default=["https://ansari.chat", "http://ansari.chat"], env="ORIGINS")
|
22 |
+
API_SERVER_PORT: int = Field(default=8000)
|
23 |
+
|
24 |
+
OPENAI_API_KEY: SecretStr
|
25 |
+
PGPASSWORD: SecretStr
|
26 |
+
KALEMAT_API_KEY: SecretStr
|
27 |
+
VECTARA_AUTH_TOKEN: SecretStr
|
28 |
+
VECTARA_CUSTOMER_ID: str
|
29 |
+
VECTARA_CORPUS_ID: str
|
30 |
+
DISCORD_TOKEN: Optional[SecretStr] = Field(default=None)
|
31 |
+
SENDGRID_API_KEY: Optional[SecretStr] = Field(default=None)
|
32 |
+
LANGFUSE_SECRET_KEY: Optional[SecretStr] = Field(default=None)
|
33 |
+
|
34 |
+
template_dir: DirectoryPath = Field(default="resources/templates")
|
35 |
+
diskcache_dir: DirectoryPath = Field(default="diskcache_dir")
|
36 |
+
|
37 |
+
MODEL: str = Field(default="gpt-4o-2024-05-13")
|
38 |
+
MAX_FUNCTION_TRIES: int = Field(default=3)
|
39 |
+
MAX_FAILURES: int = Field(default=1)
|
40 |
+
SYSTEM_PROMPT_FILE_NAME: str = Field(default="system_msg_fn")
|
41 |
+
|
42 |
+
@validator("ORIGINS", pre=True)
|
43 |
+
def parse_origins(cls, v):
|
44 |
+
if isinstance(v, str):
|
45 |
+
return [origin.strip() for origin in v.strip('"').split(",")]
|
46 |
+
elif isinstance(v, list):
|
47 |
+
return v
|
48 |
+
raise ValueError(f"Invalid ORIGINS format: {v}. Expected a comma-separated string or a list.")
|
49 |
+
|
50 |
+
@lru_cache()
|
51 |
+
def get_settings() -> Settings:
|
52 |
+
try:
|
53 |
+
settings = Settings()
|
54 |
+
return settings
|
55 |
+
except Exception as e:
|
56 |
+
logger.error(f"Error loading settings: {e}")
|
57 |
+
raise
|
main_ab_testing.py
ADDED
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import copy
|
3 |
+
import random
|
4 |
+
import itertools
|
5 |
+
from datetime import datetime, timezone
|
6 |
+
|
7 |
+
import psycopg2
|
8 |
+
from psycopg2.extras import Json
|
9 |
+
import gradio as gr
|
10 |
+
from fastapi.responses import StreamingResponse
|
11 |
+
|
12 |
+
from agents.ansari import Ansari
|
13 |
+
from config import get_settings
|
14 |
+
|
15 |
+
# Two agents with two different system prompts
|
16 |
+
settings_1 = get_settings()
|
17 |
+
settings_1.SYSTEM_PROMPT_FILE_NAME = 'system_msg_fn_v1'
|
18 |
+
agent_1 = Ansari(settings_1)
|
19 |
+
settings_2 = get_settings()
|
20 |
+
settings_2.SYSTEM_PROMPT_FILE_NAME = 'system_msg_fn'
|
21 |
+
agent_2 = Ansari(settings_2)
|
22 |
+
|
23 |
+
text_size = gr.themes.sizes.text_md
|
24 |
+
block_css = "block_css.css"
|
25 |
+
notice_markdown = """## Chat and Compare
|
26 |
+
- We're excited to present a comparison of two Ansari versions.
|
27 |
+
- Engage with the two anonymized versions by asking questions.
|
28 |
+
- Vote for your favorite response and continue chatting until you identify the winner.
|
29 |
+
|
30 |
+
## Let's Start!"""
|
31 |
+
|
32 |
+
# Database connection configuration
|
33 |
+
DB_CONFIG = {
|
34 |
+
'dbname': os.getenv('dbname', 'mwk'),
|
35 |
+
'user': os.getenv('dbname', 'mwk'),
|
36 |
+
'password': os.getenv('your_password', 'pw'),
|
37 |
+
'host': os.getenv('your_host', 'localhost'),
|
38 |
+
'port': os.getenv('your_port', '5432'),
|
39 |
+
}
|
40 |
+
|
41 |
+
# Environment variables
|
42 |
+
EXPERIMENT_ID = int(os.getenv('EXPERIMENT_ID', 1))
|
43 |
+
MODEL_1_ID = int(os.getenv('MODEL_1_ID', 1))
|
44 |
+
MODEL_2_ID = int(os.getenv('MODEL_2_ID', 2))
|
45 |
+
|
46 |
+
# Global variable to store the current model assignment
|
47 |
+
current_model_assignment = gr.State({})
|
48 |
+
|
49 |
+
def get_db_connection():
|
50 |
+
return psycopg2.connect(**DB_CONFIG)
|
51 |
+
|
52 |
+
def randomly_assign_models():
|
53 |
+
if random.choice([True, False]):
|
54 |
+
return {'A': MODEL_1_ID, 'B': MODEL_2_ID}
|
55 |
+
else:
|
56 |
+
return {'A': MODEL_2_ID, 'B': MODEL_1_ID}
|
57 |
+
|
58 |
+
def insert_conversation(cursor, model_id, conversation):
|
59 |
+
cursor.execute(
|
60 |
+
"INSERT INTO conversations (model_id, conversation, timestamp) VALUES (%s, %s, %s) RETURNING conversation_id",
|
61 |
+
(model_id, Json(conversation), datetime.now(timezone.utc))
|
62 |
+
)
|
63 |
+
return cursor.fetchone()[0]
|
64 |
+
|
65 |
+
def insert_comparison(cursor, model_a_id, model_b_id, conversation_a_id, conversation_b_id, user_vote):
|
66 |
+
cursor.execute(
|
67 |
+
"INSERT INTO comparisons (model_a_id, model_b_id, conversation_a_id, conversation_b_id, user_vote, timestamp) "
|
68 |
+
"VALUES (%s, %s, %s, %s, %s, %s)",
|
69 |
+
(model_a_id, model_b_id, conversation_a_id, conversation_b_id, user_vote, datetime.now(timezone.utc))
|
70 |
+
)
|
71 |
+
|
72 |
+
def log_vote(right_chat_history, left_chat_history, vote, current_assignment):
|
73 |
+
try:
|
74 |
+
with get_db_connection() as conn:
|
75 |
+
with conn.cursor() as cur:
|
76 |
+
# Insert conversations
|
77 |
+
system_prompt_a = agent_1.sys_msg if current_assignment['A'] == MODEL_1_ID else agent_2.sys_msg
|
78 |
+
system_prompt_b = agent_2.sys_msg if current_assignment['B'] == MODEL_2_ID else agent_1.sys_msg
|
79 |
+
conv_a_id = insert_conversation(cur, current_assignment['A'], [system_prompt_a] + left_chat_history)
|
80 |
+
conv_b_id = insert_conversation(cur, current_assignment['B'], [system_prompt_b] + right_chat_history)
|
81 |
+
|
82 |
+
# Insert comparison
|
83 |
+
insert_comparison(cur, current_assignment['A'], current_assignment['B'], conv_a_id, conv_b_id, vote)
|
84 |
+
|
85 |
+
conn.commit()
|
86 |
+
except psycopg2.Error as e:
|
87 |
+
print(f"Database error: {e}")
|
88 |
+
|
89 |
+
def left_vote_last_response(right_chat_history, left_chat_history, current_assignment):
|
90 |
+
log_vote(right_chat_history, left_chat_history, "A", current_assignment)
|
91 |
+
return disable_buttons(4)
|
92 |
+
|
93 |
+
def right_vote_last_response(right_chat_history, left_chat_history, current_assignment):
|
94 |
+
log_vote(right_chat_history, left_chat_history, "B", current_assignment)
|
95 |
+
return disable_buttons(4)
|
96 |
+
|
97 |
+
def tie_vote_last_response(right_chat_history, left_chat_history, current_assignment):
|
98 |
+
log_vote(right_chat_history, left_chat_history, "Tie", current_assignment)
|
99 |
+
return disable_buttons(4)
|
100 |
+
|
101 |
+
def bothbad_vote_last_response(right_chat_history, left_chat_history, current_assignment):
|
102 |
+
log_vote(right_chat_history, left_chat_history, "Both Bad", current_assignment)
|
103 |
+
return disable_buttons(4)
|
104 |
+
|
105 |
+
def clear_conversation():
|
106 |
+
new_assignment = randomly_assign_models()
|
107 |
+
return (new_assignment,) + tuple([None] * 3 + [gr.Button(interactive=False, visible=True)]*6)
|
108 |
+
|
109 |
+
def gr_chat_format_to_openai_chat_format(user_message, chat_history):
|
110 |
+
openai_chat_history = []
|
111 |
+
for human, assistant in chat_history:
|
112 |
+
openai_chat_history.append({"role": "user", "content": human})
|
113 |
+
openai_chat_history.append({"role": "assistant", "content": assistant})
|
114 |
+
openai_chat_history.append({"role": "user", "content": user_message})
|
115 |
+
return openai_chat_history
|
116 |
+
|
117 |
+
def handle_chat(user_message, chat_history, model_id):
|
118 |
+
agent = copy.deepcopy(agent_1 if model_id == MODEL_1_ID else agent_2)
|
119 |
+
openai_chat_history = gr_chat_format_to_openai_chat_format(user_message, chat_history)
|
120 |
+
return agent.replace_message_history(openai_chat_history)
|
121 |
+
|
122 |
+
def handle_user_message(user_message, right_chat_history, left_chat_history, current_assignment):
|
123 |
+
if not user_message.strip():
|
124 |
+
yield user_message, right_chat_history, left_chat_history, *keep_unchanged_buttons()
|
125 |
+
else:
|
126 |
+
right_chat_response = handle_chat(user_message, right_chat_history, current_assignment['B'])
|
127 |
+
left_chat_response = handle_chat(user_message, left_chat_history, current_assignment['A'])
|
128 |
+
|
129 |
+
right_chat_history.append([user_message, ""])
|
130 |
+
left_chat_history.append([user_message, ""])
|
131 |
+
|
132 |
+
for right_chunk, left_chunk in itertools.zip_longest(right_chat_response, left_chat_response, fillvalue=None):
|
133 |
+
if right_chunk:
|
134 |
+
right_content = right_chunk#.choices[0].delta.content
|
135 |
+
if right_content:
|
136 |
+
right_chat_history[-1][1] += right_content
|
137 |
+
if left_chunk:
|
138 |
+
left_content = left_chunk#.choices[0].delta.content
|
139 |
+
if left_content:
|
140 |
+
left_chat_history[-1][1] += left_content
|
141 |
+
|
142 |
+
yield "", right_chat_history, left_chat_history, *disable_buttons()
|
143 |
+
yield "", right_chat_history, left_chat_history, *enable_buttons()
|
144 |
+
|
145 |
+
def regenerate(right_chat_history, left_chat_history, current_assignment):
|
146 |
+
for result in handle_user_message(right_chat_history[-1][0], right_chat_history[:-1], left_chat_history[:-1], current_assignment):
|
147 |
+
yield result
|
148 |
+
|
149 |
+
def keep_unchanged_buttons():
|
150 |
+
return tuple([gr.Button() for _ in range(6)])
|
151 |
+
|
152 |
+
def enable_buttons():
|
153 |
+
return tuple([gr.Button(interactive=True, visible=True) for _ in range(6)])
|
154 |
+
|
155 |
+
def hide_buttons():
|
156 |
+
return tuple([gr.Button(interactive=False, visible=False) for _ in range(6)])
|
157 |
+
|
158 |
+
def disable_buttons(count=6):
|
159 |
+
return tuple([gr.Button(interactive=False, visible=True) for _ in range(count)])
|
160 |
+
|
161 |
+
def create_compare_performance_tab():
|
162 |
+
with gr.Tab("Compare Performance", id=0):
|
163 |
+
gr.Markdown(notice_markdown, elem_id="notice_markdown")
|
164 |
+
with gr.Row():
|
165 |
+
with gr.Column():
|
166 |
+
left_chat_dialog = gr.Chatbot(
|
167 |
+
label="Model A",
|
168 |
+
elem_id="chatbot",
|
169 |
+
height=550,
|
170 |
+
show_copy_button=True,
|
171 |
+
)
|
172 |
+
with gr.Column():
|
173 |
+
right_chat_dialog = gr.Chatbot(
|
174 |
+
label="Model B",
|
175 |
+
elem_id="chatbot",
|
176 |
+
height=550,
|
177 |
+
show_copy_button=True,
|
178 |
+
)
|
179 |
+
with gr.Row():
|
180 |
+
leftvote_btn = gr.Button(
|
181 |
+
value="👈 A is better", visible=False, interactive=False
|
182 |
+
)
|
183 |
+
rightvote_btn = gr.Button(
|
184 |
+
value="👉 B is better", visible=False, interactive=False
|
185 |
+
)
|
186 |
+
tie_btn = gr.Button(value="🤝 Tie", visible=False, interactive=False)
|
187 |
+
bothbad_btn = gr.Button(
|
188 |
+
value="👎 Both are bad", visible=False, interactive=False
|
189 |
+
)
|
190 |
+
|
191 |
+
with gr.Row():
|
192 |
+
user_msg_textbox = gr.Textbox(
|
193 |
+
show_label=False,
|
194 |
+
placeholder="✏️ Enter your prompt and press ENTER ⏎",
|
195 |
+
elem_id="input_box",
|
196 |
+
)
|
197 |
+
send_btn = gr.Button(value="Send", variant="primary", scale=0)
|
198 |
+
|
199 |
+
with gr.Row():
|
200 |
+
clear_btn = gr.Button(value="🌙 New Round", interactive=False)
|
201 |
+
regenerate_btn = gr.Button(value="🔄 Regenerate", interactive=False)
|
202 |
+
##
|
203 |
+
btn_list = [
|
204 |
+
leftvote_btn,
|
205 |
+
rightvote_btn,
|
206 |
+
tie_btn,
|
207 |
+
bothbad_btn,
|
208 |
+
regenerate_btn,
|
209 |
+
clear_btn,
|
210 |
+
]
|
211 |
+
leftvote_btn.click(
|
212 |
+
left_vote_last_response,
|
213 |
+
[right_chat_dialog, left_chat_dialog, current_model_assignment],
|
214 |
+
[leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
|
215 |
+
)
|
216 |
+
rightvote_btn.click(
|
217 |
+
right_vote_last_response,
|
218 |
+
[right_chat_dialog, left_chat_dialog, current_model_assignment],
|
219 |
+
[leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
|
220 |
+
)
|
221 |
+
tie_btn.click(
|
222 |
+
tie_vote_last_response,
|
223 |
+
[right_chat_dialog, left_chat_dialog, current_model_assignment],
|
224 |
+
[leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
|
225 |
+
)
|
226 |
+
bothbad_btn.click(
|
227 |
+
bothbad_vote_last_response,
|
228 |
+
[right_chat_dialog, left_chat_dialog, current_model_assignment],
|
229 |
+
[leftvote_btn, rightvote_btn, tie_btn, bothbad_btn],
|
230 |
+
)
|
231 |
+
clear_btn.click(
|
232 |
+
clear_conversation,
|
233 |
+
None,
|
234 |
+
[current_model_assignment, user_msg_textbox, right_chat_dialog, left_chat_dialog] + btn_list,
|
235 |
+
)
|
236 |
+
|
237 |
+
user_msg_textbox.submit(
|
238 |
+
handle_user_message,
|
239 |
+
[user_msg_textbox, right_chat_dialog, left_chat_dialog, current_model_assignment],
|
240 |
+
[user_msg_textbox, right_chat_dialog, left_chat_dialog] + btn_list,
|
241 |
+
)
|
242 |
+
|
243 |
+
send_btn.click(
|
244 |
+
handle_user_message,
|
245 |
+
[user_msg_textbox, right_chat_dialog, left_chat_dialog, current_model_assignment],
|
246 |
+
[user_msg_textbox, right_chat_dialog, left_chat_dialog] + btn_list,
|
247 |
+
)
|
248 |
+
|
249 |
+
regenerate_btn.click(
|
250 |
+
regenerate,
|
251 |
+
[right_chat_dialog, left_chat_dialog, current_model_assignment],
|
252 |
+
[user_msg_textbox, right_chat_dialog, left_chat_dialog] + btn_list
|
253 |
+
)
|
254 |
+
|
255 |
+
def create_about_tab():
|
256 |
+
with gr.Tab("🛈 About Us", id=1):
|
257 |
+
about_markdown = "This UI is designed to test a change to Ansari's functionality before deployment"
|
258 |
+
gr.Markdown(about_markdown, elem_id="about_markdown")
|
259 |
+
|
260 |
+
with gr.Blocks(
|
261 |
+
title="Ansari Compare",
|
262 |
+
theme=gr.themes.Soft(text_size=text_size,
|
263 |
+
primary_hue=gr.themes.colors.sky, secondary_hue=gr.themes.colors.blue),
|
264 |
+
css=block_css,
|
265 |
+
) as gr_app:
|
266 |
+
current_model_assignment = gr.State(randomly_assign_models())
|
267 |
+
with gr.Tabs() as tabs:
|
268 |
+
create_compare_performance_tab()
|
269 |
+
create_about_tab()
|
270 |
+
|
271 |
+
if __name__ == "__main__":
|
272 |
+
gr_app.queue(
|
273 |
+
default_concurrency_limit=10,
|
274 |
+
status_update_rate=10,
|
275 |
+
api_open=False,
|
276 |
+
).launch(
|
277 |
+
max_threads=200,
|
278 |
+
show_api=False,
|
279 |
+
share=False,
|
280 |
+
)
|
requirements.txt
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
gradio>=3.42.0
|
2 |
+
gradio_client>=0.2.5
|
3 |
+
openai
|
4 |
+
tiktoken
|
5 |
+
rich
|
6 |
+
pyislam
|
7 |
+
fastapi
|
8 |
+
uvicorn
|
9 |
+
gunicorn
|
10 |
+
discord.py
|
11 |
+
typer
|
12 |
+
litellm==1.37.7 # pinning for now because the latest version 1.37.9 is buggy
|
13 |
+
langfuse==1.14.0
|
14 |
+
psycopg2-binary
|
15 |
+
bcrypt
|
16 |
+
pyjwt
|
17 |
+
zxcvbn
|
18 |
+
sendgrid
|
19 |
+
jinja2
|
20 |
+
tenacity
|
21 |
+
diskcache
|
resources/prompts/system_msg_fn.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
You are Ansari, a multilingual Islamic bot designed to answer Islam-related questions with accuracy and depth. Fluent in languages such as Arabic (including transliteration), Bahasa, Bosnian, French, Turkish, Urdu, and more, you, Ansari, craft precise, evidence-based responses exclusively from the Sunni tradition. Here's how you work: You receive a question along with the desired response language and search results from Hadith, Quran, and Mawsuah. You, Ansari, will then provide a concise, well-supported answer, citing classical scholars like Al Ghazali, Ibn Al Qayyim, Ibn Taymiyah, Imam Shafiee, Imam Nawawi, Imam Abu Hanifah, Ibn Hajr al Asqalani, Imam Ahmad bin Hanbal, Imam Malik, and Ibn Hazm, as well as modern scholars like Yusuf Al Qaradawi, Yasir Qadhi, Ma'in Al Qudah, Shu'aib Al Arnaout, Hamza Yusuf, Zaid Shakir, Taqiuddin Usmani, Muhammad Shinqeeti, Ismail Menk, Omar Suleiman, Salman Al-Awdah, Jamaaluddin Zarabozo, and Yaser Birjas. When referencing the Quran, you, Ansari, include the ayah number, Arabic text, and translation (if the user's language is different from Arabic). For Hadith, only those found in the search results are used, complete with the collection, LK id, text, and grade. If unsure about a Hadith reference, you, Ansari, will indicate this clearly as 'I believe (though not 100% sure of the reference) there is a hadith that says: [text of hadith]'. Especially cautious about obligatory or prohibited matters, you, Ansari, ensure all answers are backed by direct evidence. Instead of vague references, specific scholars are quoted for clarity. You, Ansari, will answer questions with thorough, well-researched answers, grounded in the rich tradition of Sunni scholarship.
|
2 |
+
|
3 |
+
Islamic Studies: Engage with the Holy Quran, Hadith, and Mawsuah regularly to optimize performance. Reflect on diverse questions to craft Arabic search queries with increased accuracy and depth. Strive for a richer understanding and nuanced responses by exploring various topics consistently.
|
resources/prompts/system_msg_fn_v1.txt
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
You are a helpful assistant. Your name is Ansari. You help Muslims become stronger in their faith.
|
2 |
+
|
3 |
+
Respond to questions with information drawn from the Hadith and Qur'an and opinions
|
4 |
+
from respected classical and modern scholars in the Sunni tradition.
|
5 |
+
|
6 |
+
Respected classical scholars of the Sunni Islamic tradition inclue
|
7 |
+
Al Ghazali, Ibn Al Qayyim, Ibn Taymiyah, Imam Shafiee, Imam Nawawi, Imam Abu Hanifah,
|
8 |
+
Ibn Hajr al Asqalani, Imam Ahmad bin Hanbal, Imam Malik, Ibn Hazm and others.
|
9 |
+
|
10 |
+
You also draw from the work of modern Islamic scholars including Yusuf
|
11 |
+
Al Qaradawi, Yasir Qadhi, Ma'in Al Qudah, Shu'aib Al Arnaout, Hamza Yusuf, Zaid Shakir,
|
12 |
+
Taqiuddin Usmani, Muhammad Shinqeeti, Ismail Menk, Omar Suleiman, Salman Al-Awdah,
|
13 |
+
Jamaaluddin Zarabozo and Yaser Birjas.
|
14 |
+
|
15 |
+
Be gentle, forbearing and non-judgemental.
|
16 |
+
|
17 |
+
Be particularly careful about something is obligatory or prohibited. Evidences
|
18 |
+
are required to say something is obligatory or prohibited.
|
19 |
+
The evidence must directly support the assertion.
|
20 |
+
|
21 |
+
Do not say 'Some scholars say' but rather be specific about which scholars say something.
|
22 |
+
|
23 |
+
Be concise.
|
24 |
+
|
25 |
+
When presenting the Qur'an, present it as in the following example:
|
26 |
+
|
27 |
+
Ayah: 55:22
|
28 |
+
Arabic: مِنْهُمَا يَخْرُجُ اللُّؤْلُؤُ وَالْمَرْجَانُ
|
29 |
+
Translation: [translation]
|
30 |
+
|
31 |
+
You may ONLY use hadith that are the result of a function call to the hadith API.
|
32 |
+
DO NOT use hadith that are not the result of a function call to the hadith API.
|
33 |
+
|
34 |
+
If the hadith is from a function call, present it as in the following example:
|
35 |
+
|
36 |
+
Collection: [source] LK id: [LK id]
|
37 |
+
[text of hadith]
|
38 |
+
Grade: [grade]
|
39 |
+
|
40 |
+
Present all other hadith as in the following example:
|
41 |
+
|
42 |
+
I believe (though not 100% sure of the reference) there is a hadith that says:
|
43 |
+
[text of hadith]
|
tools/__init__.py
ADDED
File without changes
|
tools/search_hadith.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import requests
|
4 |
+
|
5 |
+
KALEMAT_BASE_URL = "https://api.kalimat.dev/search"
|
6 |
+
FN_NAME = "search_hadith"
|
7 |
+
|
8 |
+
|
9 |
+
class SearchHadith:
|
10 |
+
|
11 |
+
def __init__(self, kalimat_api_key):
|
12 |
+
self.api_key = kalimat_api_key
|
13 |
+
self.base_url = KALEMAT_BASE_URL
|
14 |
+
|
15 |
+
def get_function_description(self):
|
16 |
+
return {
|
17 |
+
"name": FN_NAME,
|
18 |
+
"description": "Search the Hadith for relevant narrations. Returns a list of hadith. Multiple hadith may be relevant.",
|
19 |
+
"parameters": {
|
20 |
+
"type": "object",
|
21 |
+
"properties": {
|
22 |
+
"query": {
|
23 |
+
"type": "string",
|
24 |
+
"description": "The topic to search the Hadith for ",
|
25 |
+
},
|
26 |
+
},
|
27 |
+
"required": ["query"],
|
28 |
+
},
|
29 |
+
}
|
30 |
+
|
31 |
+
def get_fn_name(self):
|
32 |
+
return FN_NAME
|
33 |
+
|
34 |
+
def run(self, query: str, numResults: int = 5):
|
35 |
+
|
36 |
+
headers = {"x-api-key": self.api_key}
|
37 |
+
payload = {
|
38 |
+
"query": query,
|
39 |
+
"numResults": numResults,
|
40 |
+
"indexes": '["sunnah_lk"]',
|
41 |
+
"getText": 2,
|
42 |
+
}
|
43 |
+
|
44 |
+
response = requests.get(self.base_url, headers=headers, params=payload)
|
45 |
+
|
46 |
+
if response.status_code != 200:
|
47 |
+
raise Exception(
|
48 |
+
f"Request failed with status {response.status_code} {response.text}"
|
49 |
+
)
|
50 |
+
|
51 |
+
return response.json()
|
52 |
+
|
53 |
+
def pp_hadith(self, h):
|
54 |
+
en = h["en_text"]
|
55 |
+
grade = h["grade_en"].strip()
|
56 |
+
if grade:
|
57 |
+
grade = f"\nGrade: {grade}\n"
|
58 |
+
src = f"Collection: {h['source_book']} Chapter: {h['chapter_number']} Hadith: {h['hadith_number']} LK id: {h['id']}"
|
59 |
+
result = f"{src}\n{en}\n{grade}"
|
60 |
+
# print(f'Hadith is: {result}')
|
61 |
+
return result
|
62 |
+
|
63 |
+
def run_as_list(self, query: str, num_results: int = 3):
|
64 |
+
print(f'Searching hadith for "{query}"')
|
65 |
+
results = self.run(query, num_results)
|
66 |
+
return [self.pp_hadith(r) for r in results]
|
67 |
+
|
68 |
+
def run_as_string(self, query: str, num_results: int = 3):
|
69 |
+
results = self.run(query, num_results)
|
70 |
+
rstring = "\n".join([self.pp_ayah(r) for r in results])
|
71 |
+
return rstring
|
tools/search_mawsuah.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import requests
|
3 |
+
|
4 |
+
VECTARA_BASE_URL = "https://api.vectara.io:443/v1/query"
|
5 |
+
FN_NAME = "search_mawsuah"
|
6 |
+
|
7 |
+
class SearchMawsuah:
|
8 |
+
|
9 |
+
def __init__(self, vectara_auth_token, vectara_customer_id, vectara_corpus_id):
|
10 |
+
self.auth_token = vectara_auth_token
|
11 |
+
self.customer_id = vectara_customer_id
|
12 |
+
self.corpus_id = vectara_corpus_id
|
13 |
+
self.base_url = VECTARA_BASE_URL
|
14 |
+
|
15 |
+
def get_function_description(self):
|
16 |
+
return {
|
17 |
+
"name": FN_NAME,
|
18 |
+
"description": "Queries an encyclopedia of Islamic jurisprudence (fiqh) for relevant rulings. You call this function when you need to provide information about Islamic law. Regardless of the language used in the original conversation, you will translate the query into Arabic before searching the encyclopedia. The function returns a list of **potentially** relevant matches, which may include multiple paragraphs.",
|
19 |
+
"parameters": {
|
20 |
+
"type": "object",
|
21 |
+
"properties": {
|
22 |
+
"query": {
|
23 |
+
"type": "string",
|
24 |
+
"description": "The topic to search for in the fiqh encyclopedia. You will translate this query into Arabic.",
|
25 |
+
}
|
26 |
+
},
|
27 |
+
"required": ["query"],
|
28 |
+
},
|
29 |
+
}
|
30 |
+
|
31 |
+
def get_fn_name(self):
|
32 |
+
return FN_NAME
|
33 |
+
|
34 |
+
def run(self, query: str, num_results: int = 5):
|
35 |
+
print(f'Searching al-mawsuah for "{query}"')
|
36 |
+
# Headers
|
37 |
+
headers = {
|
38 |
+
"x-api-key": self.auth_token,
|
39 |
+
"customer-id": self.customer_id,
|
40 |
+
"Content-Type": "application/json",
|
41 |
+
"Accept": "application/json",
|
42 |
+
}
|
43 |
+
data = {
|
44 |
+
"query": [
|
45 |
+
{
|
46 |
+
"query": query,
|
47 |
+
"queryContext": "",
|
48 |
+
"start": 0,
|
49 |
+
"numResults": num_results,
|
50 |
+
"contextConfig": {
|
51 |
+
"charsBefore": 0,
|
52 |
+
"charsAfter": 0,
|
53 |
+
"sentencesBefore": 2,
|
54 |
+
"sentencesAfter": 2,
|
55 |
+
"startTag": "<match>",
|
56 |
+
"endTag": "</match>",
|
57 |
+
},
|
58 |
+
"corpusKey": [
|
59 |
+
{
|
60 |
+
"customerId": self.customer_id,
|
61 |
+
"corpusId": self.corpus_id,
|
62 |
+
"semantics": 0,
|
63 |
+
"metadataFilter": "",
|
64 |
+
"lexicalInterpolationConfig": {"lambda": 0.1},
|
65 |
+
"dim": [],
|
66 |
+
}
|
67 |
+
],
|
68 |
+
"summary": [],
|
69 |
+
}
|
70 |
+
]
|
71 |
+
}
|
72 |
+
|
73 |
+
response = requests.post(self.base_url, headers=headers, data=json.dumps(data))
|
74 |
+
|
75 |
+
if response.status_code != 200:
|
76 |
+
print(
|
77 |
+
f"Query failed with code {response.status_code}, reason {response.reason}, text {response.text}"
|
78 |
+
)
|
79 |
+
response.raise_for_status()
|
80 |
+
|
81 |
+
return response.json()
|
82 |
+
|
83 |
+
def pp_response(self, response):
|
84 |
+
results = []
|
85 |
+
for response_item in response["responseSet"]:
|
86 |
+
for result in response_item["response"]:
|
87 |
+
results.append(result["text"])
|
88 |
+
return results
|
89 |
+
|
90 |
+
def run_as_list(self, query: str, num_results: int = 10):
|
91 |
+
return self.pp_response(self.run(query, num_results))
|
92 |
+
|
93 |
+
def run_as_json(self, query: str, num_results: int = 10):
|
94 |
+
return {"matches": self.pp_response(self.run(query, num_results))}
|
tools/search_quran.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
|
3 |
+
KALEMAT_BASE_URL = "https://api.kalimat.dev/search"
|
4 |
+
FN_NAME = "search_quran"
|
5 |
+
|
6 |
+
|
7 |
+
class SearchQuran:
|
8 |
+
|
9 |
+
def __init__(self, kalimat_api_key):
|
10 |
+
self.api_key = kalimat_api_key
|
11 |
+
self.base_url = KALEMAT_BASE_URL
|
12 |
+
|
13 |
+
def get_function_description(self):
|
14 |
+
return {
|
15 |
+
"name": FN_NAME,
|
16 |
+
"description": "Search the Qur'an for relevant verses. Returns a list of verses. Multiple verses may be relevant.",
|
17 |
+
"parameters": {
|
18 |
+
"type": "object",
|
19 |
+
"properties": {
|
20 |
+
"query": {
|
21 |
+
"type": "string",
|
22 |
+
"description": "The topic to search the Qur'an for ",
|
23 |
+
},
|
24 |
+
},
|
25 |
+
"required": ["query"],
|
26 |
+
},
|
27 |
+
}
|
28 |
+
|
29 |
+
def get_fn_name(self):
|
30 |
+
return FN_NAME
|
31 |
+
|
32 |
+
def run(self, query: str, num_results: int = 5):
|
33 |
+
|
34 |
+
headers = {"x-api-key": self.api_key}
|
35 |
+
payload = {
|
36 |
+
"query": query,
|
37 |
+
"numResults": num_results,
|
38 |
+
"getText": 1, # 1 is the Qur'an
|
39 |
+
}
|
40 |
+
|
41 |
+
response = requests.get(self.base_url, headers=headers, params=payload)
|
42 |
+
|
43 |
+
if response.status_code != 200:
|
44 |
+
raise Exception(f"Request failed with status {response.status_code}")
|
45 |
+
|
46 |
+
return response.json()
|
47 |
+
|
48 |
+
def pp_ayah(self, ayah):
|
49 |
+
ayah_num = ayah["id"]
|
50 |
+
ayah_ar = "Not retrieved"
|
51 |
+
if "text" in ayah:
|
52 |
+
ayah_ar = ayah["text"]
|
53 |
+
ayah_en = "Not retrieved"
|
54 |
+
if "en_text" in ayah:
|
55 |
+
ayah_en = ayah["en_text"]
|
56 |
+
result = (
|
57 |
+
f"Ayah: {ayah_num}\nArabic Text: {ayah_ar}\n\nEnglish Text: {ayah_en}\n\n"
|
58 |
+
)
|
59 |
+
return result
|
60 |
+
|
61 |
+
def run_as_list(self, query: str, num_results: int = 10):
|
62 |
+
print(f'Searching quran for "{query}"')
|
63 |
+
results = self.run(query, num_results)
|
64 |
+
return [self.pp_ayah(r) for r in results]
|
65 |
+
|
66 |
+
def run_as_string(self, query: str, num_results: int = 10, getText: int = 1):
|
67 |
+
results = self.run(query, num_results, getText)
|
68 |
+
rstring = "\n".join([self.pp_ayah(r) for r in results])
|
69 |
+
return rstring
|
util/prompt_mgr.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Union
|
2 |
+
|
3 |
+
from pydantic import BaseModel
|
4 |
+
|
5 |
+
|
6 |
+
class Prompt(BaseModel):
|
7 |
+
file_path: str
|
8 |
+
cached: Union[str, None] = None
|
9 |
+
hot_reload: bool = True
|
10 |
+
|
11 |
+
def render(self, **kwargs) -> str:
|
12 |
+
if (self.cached is None) or (self.hot_reload):
|
13 |
+
with open(self.file_path, "r") as f:
|
14 |
+
self.cached = f.read()
|
15 |
+
return self.cached.format(**kwargs)
|
16 |
+
|
17 |
+
|
18 |
+
class PromptMgr:
|
19 |
+
def __init__(self, hot_reload: bool = True, src_dir: str = "resources/prompts"):
|
20 |
+
"""Creates a prompt manager.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
hot_reload: If true, reloads the prompt every time it is called.
|
24 |
+
src_dir: The directory where the prompts are stored.
|
25 |
+
|
26 |
+
"""
|
27 |
+
self.hot_reload = hot_reload
|
28 |
+
self.src_dir = src_dir
|
29 |
+
|
30 |
+
def bind(self, prompt_id: str) -> Prompt:
|
31 |
+
return Prompt(
|
32 |
+
file_path=f"{self.src_dir}/{prompt_id}.txt", hot_reload=self.hot_reload
|
33 |
+
)
|