Spaces:
Running
on
Zero
Running
on
Zero
silly mistake solved in Web search
Browse files- chatbot.py +2 -12
chatbot.py
CHANGED
@@ -192,18 +192,9 @@ def format_prompt(user_prompt, chat_history):
|
|
192 |
prompt += f"[INST] {user_prompt} [/INST]"
|
193 |
return prompt
|
194 |
|
195 |
-
chat_history = []
|
196 |
-
history = ""
|
197 |
-
|
198 |
-
def update_history(answer="", question=""):
|
199 |
-
global chat_history
|
200 |
-
global history
|
201 |
-
history += f"([ USER: {question}, OpenGPT 4o: {answer} ]),"
|
202 |
-
chat_history.append((question, answer))
|
203 |
-
return history
|
204 |
|
205 |
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
206 |
-
client_mistral = InferenceClient("
|
207 |
generate_kwargs = dict( max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False )
|
208 |
|
209 |
system_llava = "<|im_start|>system\nYou are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible result and explaination to user.<|im_end|>"
|
@@ -236,8 +227,7 @@ def model_inference(
|
|
236 |
messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
|
237 |
|
238 |
messages+=f"\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>web_result\n{web2}<|im_end|>\n<|im_start|>assistant\n"
|
239 |
-
stream =
|
240 |
-
stream = client_mixtral.text_generation(messages, **generate_kwargs)
|
241 |
output = ""
|
242 |
# Construct the output from the stream of tokens
|
243 |
for response in stream:
|
|
|
192 |
prompt += f"[INST] {user_prompt} [/INST]"
|
193 |
return prompt
|
194 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
|
196 |
client_mixtral = InferenceClient("NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO")
|
197 |
+
client_mistral = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
|
198 |
generate_kwargs = dict( max_new_tokens=4000, do_sample=True, stream=True, details=True, return_full_text=False )
|
199 |
|
200 |
system_llava = "<|im_start|>system\nYou are OpenGPT 4o, an exceptionally capable and versatile AI assistant meticulously crafted by KingNish. Your task is to fulfill users query in best possible way. You are provided with image, videos and 3d structures as input with question your task is to give best possible result and explaination to user.<|im_end|>"
|
|
|
227 |
messages += f"\n<|im_start|>assistant\n{str(msg[1])}<|im_end|>"
|
228 |
|
229 |
messages+=f"\n<|im_start|>user\n{user_prompt}<|im_end|>\n<|im_start|>web_result\n{web2}<|im_end|>\n<|im_start|>assistant\n"
|
230 |
+
stream = client_mistral.text_generation(messages, **generate_kwargs)
|
|
|
231 |
output = ""
|
232 |
# Construct the output from the stream of tokens
|
233 |
for response in stream:
|