Spaces:
Runtime error
Runtime error
JohnSmith9982
commited on
Commit
•
632a25e
1
Parent(s):
7822da1
Upload 33 files
Browse files
modules/index_func.py
CHANGED
@@ -16,7 +16,7 @@ def get_index_name(file_src):
|
|
16 |
|
17 |
md5_hash = hashlib.md5()
|
18 |
for file_path in file_paths:
|
19 |
-
with open(file_path, "rb"
|
20 |
while chunk := f.read(8192):
|
21 |
md5_hash.update(chunk)
|
22 |
|
|
|
16 |
|
17 |
md5_hash = hashlib.md5()
|
18 |
for file_path in file_paths:
|
19 |
+
with open(file_path, "rb") as f:
|
20 |
while chunk := f.read(8192):
|
21 |
md5_hash.update(chunk)
|
22 |
|
modules/models/ChuanhuAgent.py
CHANGED
@@ -63,7 +63,7 @@ class ChuanhuAgent_Client(BaseLLMModel):
|
|
63 |
self.index_summary = None
|
64 |
self.index = None
|
65 |
if "Pro" in self.model_name:
|
66 |
-
self.tools = load_tools(["google-search-results-json", "llm-math", "arxiv", "wikipedia", "wolfram-alpha"], llm=self.llm)
|
67 |
else:
|
68 |
self.tools = load_tools(["ddg-search", "llm-math", "arxiv", "wikipedia"], llm=self.llm)
|
69 |
self.tools.append(
|
|
|
63 |
self.index_summary = None
|
64 |
self.index = None
|
65 |
if "Pro" in self.model_name:
|
66 |
+
self.tools = load_tools(["serpapi", "google-search-results-json", "llm-math", "arxiv", "wikipedia", "wolfram-alpha"], llm=self.llm)
|
67 |
else:
|
68 |
self.tools = load_tools(["ddg-search", "llm-math", "arxiv", "wikipedia"], llm=self.llm)
|
69 |
self.tools.append(
|
modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc
CHANGED
Binary files a/modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc and b/modules/models/__pycache__/ChuanhuAgent.cpython-311.pyc differ
|
|
modules/models/__pycache__/base_model.cpython-311.pyc
CHANGED
Binary files a/modules/models/__pycache__/base_model.cpython-311.pyc and b/modules/models/__pycache__/base_model.cpython-311.pyc differ
|
|
modules/models/__pycache__/base_model.cpython-39.pyc
CHANGED
Binary files a/modules/models/__pycache__/base_model.cpython-39.pyc and b/modules/models/__pycache__/base_model.cpython-39.pyc differ
|
|
modules/models/base_model.py
CHANGED
@@ -375,7 +375,7 @@ class BaseLLMModel:
|
|
375 |
|
376 |
status_text = "开始生成回答……"
|
377 |
logging.info(
|
378 |
-
|
379 |
)
|
380 |
if should_check_token_count:
|
381 |
yield chatbot + [(inputs, "")], status_text
|
@@ -481,6 +481,7 @@ class BaseLLMModel:
|
|
481 |
if len(self.history) > 0:
|
482 |
inputs = self.history[-2]["content"]
|
483 |
del self.history[-2:]
|
|
|
484 |
self.all_token_counts.pop()
|
485 |
elif len(chatbot) > 0:
|
486 |
inputs = chatbot[-1][0]
|
|
|
375 |
|
376 |
status_text = "开始生成回答……"
|
377 |
logging.info(
|
378 |
+
"用户" + f"{self.user_identifier}" + "的输入为:" + colorama.Fore.BLUE + f"{inputs}" + colorama.Style.RESET_ALL
|
379 |
)
|
380 |
if should_check_token_count:
|
381 |
yield chatbot + [(inputs, "")], status_text
|
|
|
481 |
if len(self.history) > 0:
|
482 |
inputs = self.history[-2]["content"]
|
483 |
del self.history[-2:]
|
484 |
+
if len(self.all_token_counts) > 0:
|
485 |
self.all_token_counts.pop()
|
486 |
elif len(chatbot) > 0:
|
487 |
inputs = chatbot[-1][0]
|