Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,6 @@ from llama_cpp import Llama
|
|
10 |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
|
11 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
12 |
from llama_cpp_agent.chat_history import BasicChatHistory
|
13 |
-
from llama_cpp_agent.chat_history.messages import Roles, ChatMessage
|
14 |
import gradio as gr
|
15 |
from huggingface_hub import hf_hub_download
|
16 |
|
@@ -34,7 +33,7 @@ def chat_fn(message, history, model, system_message, max_tokens, temperature, to
|
|
34 |
response_generator = respond(message, history_list, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
35 |
|
36 |
for chat_history in response_generator:
|
37 |
-
yield chat_history.get_messages(), history
|
38 |
|
39 |
|
40 |
def respond(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
@@ -74,10 +73,10 @@ def respond(message, history, model, system_message, max_tokens, temperature, to
|
|
74 |
messages = BasicChatHistory()
|
75 |
|
76 |
for user_msg, bot_msg in history:
|
77 |
-
messages.add_message(
|
78 |
-
messages.add_message(
|
79 |
|
80 |
-
messages.add_message(
|
81 |
|
82 |
start_time = time.time()
|
83 |
token_count = 0
|
@@ -95,7 +94,7 @@ def respond(message, history, model, system_message, max_tokens, temperature, to
|
|
95 |
for output in stream:
|
96 |
outputs += output
|
97 |
token_count += len(output.split())
|
98 |
-
messages.add_message(
|
99 |
yield messages
|
100 |
|
101 |
end_time = time.time()
|
|
|
10 |
from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
|
11 |
from llama_cpp_agent.providers import LlamaCppPythonProvider
|
12 |
from llama_cpp_agent.chat_history import BasicChatHistory
|
|
|
13 |
import gradio as gr
|
14 |
from huggingface_hub import hf_hub_download
|
15 |
|
|
|
33 |
response_generator = respond(message, history_list, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty)
|
34 |
|
35 |
for chat_history in response_generator:
|
36 |
+
yield chat_history.get_messages(), history
|
37 |
|
38 |
|
39 |
def respond(message, history, model, system_message, max_tokens, temperature, top_p, top_k, repeat_penalty):
|
|
|
73 |
messages = BasicChatHistory()
|
74 |
|
75 |
for user_msg, bot_msg in history:
|
76 |
+
messages.add_message({"role": "user", "content": user_msg})
|
77 |
+
messages.add_message({"role": "assistant", "content": bot_msg})
|
78 |
|
79 |
+
messages.add_message({"role": "user", "content": message})
|
80 |
|
81 |
start_time = time.time()
|
82 |
token_count = 0
|
|
|
94 |
for output in stream:
|
95 |
outputs += output
|
96 |
token_count += len(output.split())
|
97 |
+
messages.add_message({"role": "assistant", "content": output})
|
98 |
yield messages
|
99 |
|
100 |
end_time = time.time()
|